diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -7064,6 +7064,23 @@ } } +static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { + // The maximum representable value of a half is 65504. For floats the maximum + // value is 3.4e38 which requires roughly 129 bits. + unsigned BitWidth = I->getType()->getScalarSizeInBits(); + if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) + return; + if (isa(I) && BitWidth >= 17) { + Lower = APInt(BitWidth, -65504); + Upper = APInt(BitWidth, 65505); + } + + if (isa(I) && BitWidth >= 16) { + // For a fptoui the lower limit is left as 0. + Upper = APInt(BitWidth, 65505); + } +} + ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo, AssumptionCache *AC, const Instruction *CtxI, @@ -7088,6 +7105,8 @@ setLimitsForIntrinsic(*II, Lower, Upper); else if (auto *SI = dyn_cast(V)) setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); + else if (isa(V) || isa(V)) + setLimitForFPToI(cast(V), Lower, Upper); ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper); diff --git a/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll --- a/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll @@ -544,137 +544,26 @@ define arm_aapcs_vfpcc <4 x i32> @stest_f16i32(<4 x half> %x) { ; CHECK-LABEL: stest_f16i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u16 r0, q0[0] +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov.u16 r0, q0[3] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: mov r9, r0 -; CHECK-NEXT: mvn r0, #-2147483648 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: mov r8, r1 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mvn r4, #-2147483648 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov.u16 r0, q4[1] -; CHECK-NEXT: csetm r11, ne -; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: mov r7, r0 -; CHECK-NEXT: subs r0, r0, r4 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov r6, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov.u16 r0, q4[3] -; CHECK-NEXT: csetm r10, ne -; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: vmov.u16 r0, q4[2] -; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: adr r2, .LCPI6_0 -; CHECK-NEXT: vmov q1[2], q1[0], r9, r7 -; CHECK-NEXT: vldrw.u32 q0, [r2] -; CHECK-NEXT: adr r2, .LCPI6_1 -; CHECK-NEXT: vmov q4[2], q4[0], r11, r10 -; CHECK-NEXT: vldrw.u32 q2, [r2] -; CHECK-NEXT: vmov q1[3], q1[1], r8, r6 -; CHECK-NEXT: vmov q4[3], q4[1], r11, r10 -; CHECK-NEXT: vand q1, q1, q4 -; CHECK-NEXT: vbic q4, q2, q4 -; CHECK-NEXT: mvn r12, #-2147483648 -; CHECK-NEXT: subs.w r2, r4, r12 -; CHECK-NEXT: vorr q1, q1, q4 -; CHECK-NEXT: vmov q3[2], q3[0], r0, r4 -; CHECK-NEXT: vmov r2, r3, d2 -; CHECK-NEXT: sbcs r7, r5, #0 -; CHECK-NEXT: vmov q3[3], q3[1], r1, r5 -; CHECK-NEXT: vmov r6, r5, d3 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csetm r7, ne -; CHECK-NEXT: subs.w r0, r0, r12 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: mov.w r0, #-1 -; CHECK-NEXT: vmov q4[2], q4[0], r1, r7 -; CHECK-NEXT: vmov q4[3], q4[1], r1, r7 -; CHECK-NEXT: vand q3, q3, q4 -; CHECK-NEXT: vbic q2, q2, q4 -; CHECK-NEXT: vorr q2, q3, q2 -; CHECK-NEXT: vmov r1, r7, d5 -; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 -; CHECK-NEXT: sbcs.w r2, r0, r3 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 -; CHECK-NEXT: sbcs.w r3, r0, r5 -; CHECK-NEXT: vmov r6, r5, d4 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne -; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 -; CHECK-NEXT: sbcs.w r1, r0, r7 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: rsbs.w r7, r6, #-2147483648 -; CHECK-NEXT: sbcs r0, r5 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov q3[2], q3[0], r0, r1 -; CHECK-NEXT: vbic q4, q0, q3 -; CHECK-NEXT: vand q2, q2, q3 -; CHECK-NEXT: vmov.32 q3[1], r2 -; CHECK-NEXT: vorr q2, q2, q4 -; CHECK-NEXT: vmov q3[2], q3[0], r2, r3 -; CHECK-NEXT: vbic q0, q0, q3 -; CHECK-NEXT: vand q1, q1, q3 -; CHECK-NEXT: vorr q0, q1, q0 -; CHECK-NEXT: vmov.f32 s1, s2 -; CHECK-NEXT: vmov.f32 s2, s8 -; CHECK-NEXT: vmov.f32 s3, s10 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI6_0: -; CHECK-NEXT: .long 2147483648 @ 0x80000000 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 2147483648 @ 0x80000000 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .LCPI6_1: -; CHECK-NEXT: .long 2147483647 @ 0x7fffffff -; CHECK-NEXT: .long 0 @ 0x0 -; CHECK-NEXT: .long 2147483647 @ 0x7fffffff -; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: vmov q5[2], q5[0], r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: vmov q5[3], q5[1], r0, r4 +; CHECK-NEXT: vmov q0, q5 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptosi <4 x half> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, @@ -688,71 +577,26 @@ define arm_aapcs_vfpcc <4 x i32> @utesth_f16i32(<4 x half> %x) { ; CHECK-LABEL: utesth_f16i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov.u16 r0, q0[3] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixunshfdi ; CHECK-NEXT: mov r4, r0 -; CHECK-NEXT: vmov.u16 r0, q4[3] -; CHECK-NEXT: mov r8, r1 -; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: vmov.u16 r0, q4[1] -; CHECK-NEXT: mov r9, r1 +; CHECK-NEXT: vmov.u16 r0, q4[0] ; CHECK-NEXT: bl __fixunshfdi ; CHECK-NEXT: mov r5, r0 -; CHECK-NEXT: vmov.u16 r0, q4[0] -; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: vmov.u16 r0, q4[2] ; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: subs.w r3, r5, #-1 -; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 -; CHECK-NEXT: sbcs r3, r7, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: vmov q1[2], q1[0], r4, r6 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r3, #1 -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne -; CHECK-NEXT: subs.w r0, r0, #-1 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: subs.w r1, r6, #-1 -; CHECK-NEXT: sbcs r1, r9, #0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs.w r7, r4, #-1 -; CHECK-NEXT: sbcs r7, r8, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov.32 q2[1], r2 -; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 -; CHECK-NEXT: vand q1, q1, q2 -; CHECK-NEXT: vorn q1, q1, q2 -; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 -; CHECK-NEXT: vand q0, q0, q2 -; CHECK-NEXT: vorn q0, q0, q2 -; CHECK-NEXT: vmov.f32 s1, s2 -; CHECK-NEXT: vmov.f32 s2, s4 -; CHECK-NEXT: vmov.f32 s3, s6 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: vmov q5[2], q5[0], r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixunshfdi +; CHECK-NEXT: vmov q5[3], q5[1], r0, r4 +; CHECK-NEXT: vmov q0, q5 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptoui <4 x half> %x to <4 x i64> %0 = icmp ult <4 x i64> %conv, @@ -764,107 +608,67 @@ define arm_aapcs_vfpcc <4 x i32> @ustest_f16i32(<4 x half> %x) { ; CHECK-LABEL: ustest_f16i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov.u16 r0, q0[2] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: mov r4, r0 -; CHECK-NEXT: vmov.u16 r0, q4[2] -; CHECK-NEXT: mov r5, r1 -; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: subs.w r2, r4, #-1 -; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 -; CHECK-NEXT: sbcs r2, r5, #0 -; CHECK-NEXT: vmov.i64 q6, #0xffffffff -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: subs.w r0, r0, #-1 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 ; CHECK-NEXT: vmov.u16 r0, q4[1] -; CHECK-NEXT: vand q0, q0, q1 -; CHECK-NEXT: vbic q1, q6, q1 -; CHECK-NEXT: vorr q5, q0, q1 -; CHECK-NEXT: vmov r7, r8, d11 +; CHECK-NEXT: mov r8, r1 ; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r6, r0 +; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: mov r9, r1 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: vmov.u16 r0, q4[0] -; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: mov r7, r1 ; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: subs.w r2, r4, #-1 -; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 -; CHECK-NEXT: sbcs r2, r5, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: rsbs r3, r5, #0 ; CHECK-NEXT: mov.w r2, #0 +; CHECK-NEXT: sbcs.w r3, r2, r7 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r6 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r4, r5 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: subs.w r0, r0, #-1 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: vmov r1, r3, d10 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs r7, r7, #0 -; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 -; CHECK-NEXT: sbcs.w r7, r6, r8 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 +; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: rsbs r7, r4, #0 +; CHECK-NEXT: sbcs.w r7, r2, r8 ; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: vand q0, q0, q1 -; CHECK-NEXT: vbic q1, q6, q1 -; CHECK-NEXT: vorr q0, q0, q1 ; CHECK-NEXT: it lt ; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: vmov r0, r2, d1 ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: vmov r5, r4, d0 ; CHECK-NEXT: csetm r7, ne -; CHECK-NEXT: rsbs r1, r1, #0 -; CHECK-NEXT: sbcs.w r1, r6, r3 -; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: rsbs r6, r6, #0 +; CHECK-NEXT: sbcs.w r6, r2, r9 +; CHECK-NEXT: vmov q3[2], q3[0], r7, r3 +; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: vand q1, q1, q3 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: vmov q2[2], q2[0], r1, r7 -; CHECK-NEXT: vand q2, q5, q2 +; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csetm r6, ne ; CHECK-NEXT: rsbs r0, r0, #0 -; CHECK-NEXT: sbcs.w r0, r6, r2 -; CHECK-NEXT: mov.w r0, #0 +; CHECK-NEXT: sbcs.w r0, r2, r1 ; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs r2, r5, #0 -; CHECK-NEXT: sbcs.w r2, r6, r4 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov q1[2], q1[0], r2, r0 -; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r6 +; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vmov.f32 s1, s2 -; CHECK-NEXT: vmov.f32 s2, s8 -; CHECK-NEXT: vmov.f32 s3, s10 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: %conv = fptosi <4 x half> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, @@ -1190,44 +994,34 @@ define arm_aapcs_vfpcc <8 x i16> @utesth_f16i16(<8 x half> %x) { ; CHECK-LABEL: utesth_f16i16: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: .pad #16 -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: vmovx.f16 s12, s2 -; CHECK-NEXT: vmovx.f16 s10, s3 -; CHECK-NEXT: vcvt.u32.f16 s14, s3 -; CHECK-NEXT: vcvt.u32.f16 s2, s2 -; CHECK-NEXT: vcvt.u32.f16 s10, s10 -; CHECK-NEXT: vcvt.u32.f16 s12, s12 -; CHECK-NEXT: vmov r1, s14 -; CHECK-NEXT: vmovx.f16 s8, s1 -; CHECK-NEXT: vmov r2, s2 -; CHECK-NEXT: vcvt.u32.f16 s6, s0 -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 -; CHECK-NEXT: vcvt.u32.f16 s4, s1 -; CHECK-NEXT: vmov r1, s10 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vcvt.u32.f16 s8, s8 -; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vmovx.f16 s6, s2 +; CHECK-NEXT: vcvt.u32.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s2, s0 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 -; CHECK-NEXT: vmov r1, s4 -; CHECK-NEXT: vqmovnb.u32 q3, q4 -; CHECK-NEXT: vmov r2, s6 -; CHECK-NEXT: mov r0, sp -; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 -; CHECK-NEXT: vmov r1, s8 -; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: vmovlb.u16 q3, q3 -; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 -; CHECK-NEXT: vstrh.32 q3, [r0, #8] -; CHECK-NEXT: vqmovnb.u32 q0, q1 -; CHECK-NEXT: vmovlb.u16 q0, q0 -; CHECK-NEXT: vstrh.32 q0, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vcvt.u32.f16 s14, s2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmovx.f16 s4, s3 +; CHECK-NEXT: vmovx.f16 s10, s1 +; CHECK-NEXT: vcvt.u32.f16 s8, s3 +; CHECK-NEXT: vcvt.u32.f16 s5, s1 +; CHECK-NEXT: vmov.16 q0[0], r0 +; CHECK-NEXT: vmov r0, s14 +; CHECK-NEXT: vmov.16 q0[1], r0 +; CHECK-NEXT: vmov r0, s5 +; CHECK-NEXT: vcvt.u32.f16 s10, s10 +; CHECK-NEXT: vmov.16 q0[2], r0 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: vcvt.u32.f16 s6, s6 +; CHECK-NEXT: vmov.16 q0[3], r0 +; CHECK-NEXT: vmov r0, s12 +; CHECK-NEXT: vmov.16 q0[4], r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov.16 q0[5], r0 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vcvt.u32.f16 s4, s4 +; CHECK-NEXT: vmov.16 q0[6], r0 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov.16 q0[7], r0 ; CHECK-NEXT: bx lr entry: %conv = fptoui <8 x half> %x to <8 x i32> @@ -1244,38 +1038,35 @@ ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #16 ; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s6, s0 ; CHECK-NEXT: vcvt.s32.f16 s10, s0 -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vcvt.s32.f16 s14, s0 ; CHECK-NEXT: vmovx.f16 s0, s3 -; CHECK-NEXT: vcvt.s32.f16 s9, s0 +; CHECK-NEXT: vcvt.s32.f16 s5, s3 +; CHECK-NEXT: vcvt.s32.f16 s12, s0 ; CHECK-NEXT: vmovx.f16 s0, s2 -; CHECK-NEXT: vcvt.s32.f16 s13, s3 -; CHECK-NEXT: vcvt.s32.f16 s15, s2 -; CHECK-NEXT: vcvt.s32.f16 s11, s0 -; CHECK-NEXT: vmov r1, s13 -; CHECK-NEXT: vmov r2, s15 +; CHECK-NEXT: vcvt.s32.f16 s7, s2 +; CHECK-NEXT: vcvt.s32.f16 s14, s0 +; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: vmovx.f16 s4, s1 -; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vmov r2, s7 ; CHECK-NEXT: vcvt.s32.f16 s8, s1 -; CHECK-NEXT: vmov r1, s9 -; CHECK-NEXT: vcvt.s32.f16 s12, s4 -; CHECK-NEXT: vmov r2, s11 -; CHECK-NEXT: vmov.i32 q0, #0xffff +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vmov r1, s12 +; CHECK-NEXT: vmov r2, s14 +; CHECK-NEXT: vcvt.s32.f16 s4, s4 ; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s6, s6 ; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 -; CHECK-NEXT: vmov r1, s12 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmin.s32 q4, q4, q0 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vmax.s32 q3, q4, q0 ; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 -; CHECK-NEXT: mov r0, sp -; CHECK-NEXT: vmin.s32 q0, q2, q0 -; CHECK-NEXT: vmax.s32 q4, q4, q1 -; CHECK-NEXT: vmax.s32 q0, q0, q1 -; CHECK-NEXT: vstrh.32 q4, [r0, #8] +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vmax.s32 q0, q2, q0 ; CHECK-NEXT: vstrh.32 q0, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0] ; CHECK-NEXT: add sp, #16 @@ -1753,41 +1544,21 @@ define arm_aapcs_vfpcc <2 x i64> @utesth_f16i64(<2 x half> %x) { ; CHECK-LABEL: utesth_f16i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, lr} -; CHECK-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.u16 r0, q0[1] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixunshfti -; CHECK-NEXT: mov r4, r1 -; CHECK-NEXT: subs r1, r2, #1 -; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r7, r0, r6, ne +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov.u16 r0, q4[0] -; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: bl __fixunshfti -; CHECK-NEXT: subs r2, #1 -; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r2, r4, r6, ne -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop {r4, r5, r6, r7, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptoui <2 x half> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, @@ -2251,198 +2022,107 @@ ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: cset r1, gt ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r2, r1, ne -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov r0, r5, d8 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r8, r2, r1, ne -; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r9, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r7, mi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, gt -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r6, r0, r2, ne -; CHECK-NEXT: mov r0, r4 -; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r9, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r7, mi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, gt -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r4, r0, r2, ne -; CHECK-NEXT: mov r0, r5 -; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov q0[2], q0[0], r6, r8 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r9, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r7, mi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, gt -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: vmov q0[3], q0[1], r0, r4 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} -entry: - %conv = fptosi <4 x float> %x to <4 x i64> - %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) - %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) - %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> - ret <4 x i32> %conv6 -} - -define arm_aapcs_vfpcc <4 x i32> @stest_f16i32_mm(<4 x half> %x) { -; CHECK-LABEL: stest_f16i32_mm: -; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u16 r0, q0[2] -; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: bl __fixhfdi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: mvn r4, #-2147483648 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: mov.w r9, #0 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: mov.w r5, #-2147483648 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r0, r4 -; CHECK-NEXT: csel r0, r0, r4, lo -; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csel r1, r2, r1, ne +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vmov r0, r5, d8 ; CHECK-NEXT: cset r3, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r9, mi -; CHECK-NEXT: cmp.w r1, #-1 -; CHECK-NEXT: cset r2, gt -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r5, ne -; CHECK-NEXT: cmp.w r0, #-2147483648 -; CHECK-NEXT: csel r0, r0, r5, hi -; CHECK-NEXT: adds r1, #1 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r8, r0, r2, ne -; CHECK-NEXT: vmov.u16 r0, q4[0] -; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: csel r8, r2, r1, ne +; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, mi ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r0, r4 -; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: csel r2, r0, r9, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r3, eq ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r9, mi -; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r5, ne -; CHECK-NEXT: cmp.w r0, #-2147483648 -; CHECK-NEXT: csel r0, r0, r5, hi -; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r1, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r7, r0, r2, ne -; CHECK-NEXT: vmov.u16 r0, q4[3] -; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: csel r6, r0, r2, ne +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, mi ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r0, r4 -; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: csel r2, r0, r9, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r3, eq ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r9, mi -; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r5, ne -; CHECK-NEXT: cmp.w r0, #-2147483648 -; CHECK-NEXT: csel r0, r0, r5, hi -; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r1, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r6, r0, r2, ne -; CHECK-NEXT: vmov.u16 r0, q4[1] -; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: csel r4, r0, r2, ne +; CHECK-NEXT: mov r0, r5 +; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov q0[2], q0[0], r7, r8 +; CHECK-NEXT: vmov q0[2], q0[0], r6, r8 ; CHECK-NEXT: cset r2, mi ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r0, r4 -; CHECK-NEXT: csel r0, r0, r4, lo +; CHECK-NEXT: csel r2, r0, r9, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r3, eq ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r9, mi -; CHECK-NEXT: cmp.w r1, #-1 +; CHECK-NEXT: csel r1, r1, r7, mi +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r5, ne -; CHECK-NEXT: cmp.w r0, #-2147483648 -; CHECK-NEXT: csel r0, r0, r5, hi -; CHECK-NEXT: adds r1, #1 +; CHECK-NEXT: csel r2, r0, r2, ne +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r1, eq ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: vmov q0[3], q0[1], r0, r6 +; CHECK-NEXT: vmov q0[3], q0[1], r0, r4 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi <4 x float> %x to <4 x i64> + %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) + %spec.store.select7 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %spec.store.select, <4 x i64> zeroinitializer) + %conv6 = trunc <4 x i64> %spec.store.select7 to <4 x i32> + ret <4 x i32> %conv6 +} + +define arm_aapcs_vfpcc <4 x i32> @stest_f16i32_mm(<4 x half> %x) { +; CHECK-LABEL: stest_f16i32_mm: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov.u16 r0, q0[3] +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[2] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: vmov q5[2], q5[0], r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[1] +; CHECK-NEXT: bl __fixhfdi +; CHECK-NEXT: vmov q5[3], q5[1], r0, r4 +; CHECK-NEXT: vmov q0, q5 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptosi <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) @@ -2454,43 +2134,26 @@ define arm_aapcs_vfpcc <4 x i32> @utesth_f16i32_mm(<4 x half> %x) { ; CHECK-LABEL: utesth_f16i32_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, lr} -; CHECK-NEXT: push {r4, r5, r6, r7, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: vmov.u16 r0, q0[2] +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov.u16 r0, q0[3] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: mov.w r4, #-1 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r5, r0, r4, ne +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov.u16 r0, q4[0] ; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r6, r0, r4, ne -; CHECK-NEXT: vmov.u16 r0, q4[3] +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: vmov.u16 r0, q4[2] ; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r7, r0, r4, ne +; CHECK-NEXT: vmov q5[2], q5[0], r5, r0 ; CHECK-NEXT: vmov.u16 r0, q4[1] ; CHECK-NEXT: bl __fixunshfdi -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov q0[2], q0[0], r6, r5 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: vmov q0[3], q0[1], r0, r7 -; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop {r4, r5, r6, r7, pc} +; CHECK-NEXT: vmov q5[3], q5[1], r0, r4 +; CHECK-NEXT: vmov q0, q5 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptoui <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %conv, <4 x i64> ) @@ -2501,66 +2164,34 @@ define arm_aapcs_vfpcc <4 x i32> @ustest_f16i32_mm(<4 x half> %x) { ; CHECK-LABEL: ustest_f16i32_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.u16 r0, q0[2] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: mov.w r4, #-1 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r5, mi -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r2, r0, r2, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r1, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r8, r0, r2, ne +; CHECK-NEXT: csel r4, r0, r2, ne ; CHECK-NEXT: vmov.u16 r0, q4[0] ; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r5, mi -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r2, r0, r2, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r1, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r7, r0, r2, ne +; CHECK-NEXT: csel r5, r0, r2, ne ; CHECK-NEXT: vmov.u16 r0, q4[3] ; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r5, mi -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r2, r0, r2, ne @@ -2571,17 +2202,7 @@ ; CHECK-NEXT: vmov.u16 r0, q4[1] ; CHECK-NEXT: bl __fixhfdi ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov q0[2], q0[0], r7, r8 -; CHECK-NEXT: cset r2, mi -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csel r0, r0, r2, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r1, r1, r5, mi -; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r5, r4 ; CHECK-NEXT: cset r2, gt ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r2, r0, r2, ne @@ -2591,7 +2212,7 @@ ; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: vmov q0[3], q0[1], r0, r6 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: pop {r4, r5, r6, pc} entry: %conv = fptosi <4 x half> %x to <4 x i64> %spec.store.select = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %conv, <4 x i64> ) @@ -2899,44 +2520,34 @@ define arm_aapcs_vfpcc <8 x i16> @utesth_f16i16_mm(<8 x half> %x) { ; CHECK-LABEL: utesth_f16i16_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: .pad #16 -; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: vmovx.f16 s12, s2 -; CHECK-NEXT: vmovx.f16 s10, s3 -; CHECK-NEXT: vcvt.u32.f16 s14, s3 -; CHECK-NEXT: vcvt.u32.f16 s2, s2 -; CHECK-NEXT: vcvt.u32.f16 s10, s10 -; CHECK-NEXT: vcvt.u32.f16 s12, s12 -; CHECK-NEXT: vmov r1, s14 -; CHECK-NEXT: vmovx.f16 s8, s1 -; CHECK-NEXT: vmov r2, s2 -; CHECK-NEXT: vcvt.u32.f16 s6, s0 -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 -; CHECK-NEXT: vcvt.u32.f16 s4, s1 -; CHECK-NEXT: vmov r1, s10 -; CHECK-NEXT: vmov r2, s12 -; CHECK-NEXT: vcvt.u32.f16 s8, s8 -; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vmovx.f16 s6, s2 +; CHECK-NEXT: vcvt.u32.f16 s12, s2 +; CHECK-NEXT: vmovx.f16 s2, s0 ; CHECK-NEXT: vcvt.u32.f16 s0, s0 -; CHECK-NEXT: vmov r1, s4 -; CHECK-NEXT: vqmovnb.u32 q3, q4 -; CHECK-NEXT: vmov r2, s6 -; CHECK-NEXT: mov r0, sp -; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 -; CHECK-NEXT: vmov r1, s8 -; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: vmovlb.u16 q3, q3 -; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 -; CHECK-NEXT: vstrh.32 q3, [r0, #8] -; CHECK-NEXT: vqmovnb.u32 q0, q1 -; CHECK-NEXT: vmovlb.u16 q0, q0 -; CHECK-NEXT: vstrh.32 q0, [r0] -; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: add sp, #16 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vcvt.u32.f16 s14, s2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: vmovx.f16 s4, s3 +; CHECK-NEXT: vmovx.f16 s10, s1 +; CHECK-NEXT: vcvt.u32.f16 s8, s3 +; CHECK-NEXT: vcvt.u32.f16 s5, s1 +; CHECK-NEXT: vmov.16 q0[0], r0 +; CHECK-NEXT: vmov r0, s14 +; CHECK-NEXT: vmov.16 q0[1], r0 +; CHECK-NEXT: vmov r0, s5 +; CHECK-NEXT: vcvt.u32.f16 s10, s10 +; CHECK-NEXT: vmov.16 q0[2], r0 +; CHECK-NEXT: vmov r0, s10 +; CHECK-NEXT: vcvt.u32.f16 s6, s6 +; CHECK-NEXT: vmov.16 q0[3], r0 +; CHECK-NEXT: vmov r0, s12 +; CHECK-NEXT: vmov.16 q0[4], r0 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov.16 q0[5], r0 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vcvt.u32.f16 s4, s4 +; CHECK-NEXT: vmov.16 q0[6], r0 +; CHECK-NEXT: vmov r0, s4 +; CHECK-NEXT: vmov.16 q0[7], r0 ; CHECK-NEXT: bx lr entry: %conv = fptoui <8 x half> %x to <8 x i32> @@ -2952,38 +2563,35 @@ ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: .pad #16 ; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmovx.f16 s6, s0 ; CHECK-NEXT: vcvt.s32.f16 s10, s0 -; CHECK-NEXT: vmovx.f16 s0, s0 -; CHECK-NEXT: vcvt.s32.f16 s14, s0 ; CHECK-NEXT: vmovx.f16 s0, s3 -; CHECK-NEXT: vcvt.s32.f16 s9, s0 +; CHECK-NEXT: vcvt.s32.f16 s5, s3 +; CHECK-NEXT: vcvt.s32.f16 s12, s0 ; CHECK-NEXT: vmovx.f16 s0, s2 -; CHECK-NEXT: vcvt.s32.f16 s13, s3 -; CHECK-NEXT: vcvt.s32.f16 s15, s2 -; CHECK-NEXT: vcvt.s32.f16 s11, s0 -; CHECK-NEXT: vmov r1, s13 -; CHECK-NEXT: vmov r2, s15 +; CHECK-NEXT: vcvt.s32.f16 s7, s2 +; CHECK-NEXT: vcvt.s32.f16 s14, s0 +; CHECK-NEXT: vmov r1, s5 ; CHECK-NEXT: vmovx.f16 s4, s1 -; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vmov r2, s7 ; CHECK-NEXT: vcvt.s32.f16 s8, s1 -; CHECK-NEXT: vmov r1, s9 -; CHECK-NEXT: vcvt.s32.f16 s12, s4 -; CHECK-NEXT: vmov r2, s11 -; CHECK-NEXT: vmov.i32 q0, #0xffff +; CHECK-NEXT: vmov q4[2], q4[0], r2, r1 +; CHECK-NEXT: vmov r1, s12 +; CHECK-NEXT: vmov r2, s14 +; CHECK-NEXT: vcvt.s32.f16 s4, s4 ; CHECK-NEXT: vmov q4[3], q4[1], r2, r1 +; CHECK-NEXT: vcvt.s32.f16 s6, s6 ; CHECK-NEXT: vmov r1, s8 +; CHECK-NEXT: vmov.i32 q0, #0x0 ; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 -; CHECK-NEXT: vmov r1, s12 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmin.s32 q4, q4, q0 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vmax.s32 q3, q4, q0 ; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 -; CHECK-NEXT: mov r0, sp -; CHECK-NEXT: vmin.s32 q0, q2, q0 -; CHECK-NEXT: vmax.s32 q4, q4, q1 -; CHECK-NEXT: vmax.s32 q0, q0, q1 -; CHECK-NEXT: vstrh.32 q4, [r0, #8] +; CHECK-NEXT: vstrh.32 q3, [r0, #8] +; CHECK-NEXT: vmax.s32 q0, q2, q0 ; CHECK-NEXT: vstrh.32 q0, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0] ; CHECK-NEXT: add sp, #16 @@ -3671,53 +3279,21 @@ define arm_aapcs_vfpcc <2 x i64> @utesth_f16i64_mm(<2 x half> %x) { ; CHECK-LABEL: utesth_f16i64_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.u16 r0, q0[1] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixunshfti -; CHECK-NEXT: mov r8, r1 -; CHECK-NEXT: eor r1, r2, #1 -; CHECK-NEXT: orr.w r6, r1, r3 -; CHECK-NEXT: subs r1, r2, #1 -; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r9, r0, r6, ne +; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: vmov.u16 r0, q4[0] -; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: bl __fixunshfti -; CHECK-NEXT: eor r4, r2, #1 -; CHECK-NEXT: subs r2, #1 -; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: orr.w r4, r4, r3 -; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r8, r7, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r2, r2, r6, ne -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r1, r1, r4, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %conv = fptoui <2 x half> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) diff --git a/llvm/test/Transforms/InstSimplify/fptoi-range.ll b/llvm/test/Transforms/InstSimplify/fptoi-range.ll --- a/llvm/test/Transforms/InstSimplify/fptoi-range.ll +++ b/llvm/test/Transforms/InstSimplify/fptoi-range.ll @@ -14,9 +14,7 @@ define i1 @f16_si_max2(half %f) { ; CHECK-LABEL: @f16_si_max2( -; CHECK-NEXT: [[I:%.*]] = fptosi half [[F:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[I]], 65504 -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 false ; %i = fptosi half %f to i32 %c = icmp sgt i32 %i, 65504 @@ -36,9 +34,7 @@ define i1 @f16_si_min1(half %f) { ; CHECK-LABEL: @f16_si_min1( -; CHECK-NEXT: [[I:%.*]] = fptosi half [[F:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sge i32 [[I]], -65504 -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 true ; %i = fptosi half %f to i32 %c = icmp sge i32 %i, -65504 @@ -80,9 +76,7 @@ define i1 @f16_ui_max2(half %f) { ; CHECK-LABEL: @f16_ui_max2( -; CHECK-NEXT: [[I:%.*]] = fptoui half [[F:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[I]], 65504 -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 false ; %i = fptoui half %f to i32 %c = icmp sgt i32 %i, 65504 @@ -102,9 +96,7 @@ define i1 @f16_ui16_max3(half %f) { ; CHECK-LABEL: @f16_ui16_max3( -; CHECK-NEXT: [[I:%.*]] = fptoui half [[F:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[I]], -32 -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 true ; %i = fptoui half %f to i16 %c = icmp ule i16 %i, 65504 @@ -113,9 +105,7 @@ define i1 @f16_ui_min1(half %f) { ; CHECK-LABEL: @f16_ui_min1( -; CHECK-NEXT: [[I:%.*]] = fptoui half [[F:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sge i32 [[I]], 0 -; CHECK-NEXT: ret i1 [[C]] +; CHECK-NEXT: ret i1 true ; %i = fptoui half %f to i32 %c = icmp sge i32 %i, 0 @@ -159,9 +149,7 @@ define <2 x i1> @v2f16_si_max2(<2 x half> %f) { ; CHECK-LABEL: @v2f16_si_max2( -; CHECK-NEXT: [[I:%.*]] = fptosi <2 x half> [[F:%.*]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i32> [[I]], -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %i = fptosi <2 x half> %f to <2 x i32> %c = icmp sgt <2 x i32> %i, @@ -181,9 +169,7 @@ define <2 x i1> @v2f16_si_min1(<2 x half> %f) { ; CHECK-LABEL: @v2f16_si_min1( -; CHECK-NEXT: [[I:%.*]] = fptosi <2 x half> [[F:%.*]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = icmp sge <2 x i32> [[I]], -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> ; %i = fptosi <2 x half> %f to <2 x i32> %c = icmp sge <2 x i32> %i, @@ -225,9 +211,7 @@ define <2 x i1> @v2f16_ui_max2(<2 x half> %f) { ; CHECK-LABEL: @v2f16_ui_max2( -; CHECK-NEXT: [[I:%.*]] = fptoui <2 x half> [[F:%.*]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i32> [[I]], -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %i = fptoui <2 x half> %f to <2 x i32> %c = icmp sgt <2 x i32> %i, @@ -247,9 +231,7 @@ define <2 x i1> @v2f16_ui16_max3(<2 x half> %f) { ; CHECK-LABEL: @v2f16_ui16_max3( -; CHECK-NEXT: [[I:%.*]] = fptoui <2 x half> [[F:%.*]] to <2 x i16> -; CHECK-NEXT: [[C:%.*]] = icmp ule <2 x i16> [[I]], -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> ; %i = fptoui <2 x half> %f to <2 x i16> %c = icmp ule <2 x i16> %i, @@ -258,9 +240,7 @@ define <2 x i1> @v2f16_ui_min1(<2 x half> %f) { ; CHECK-LABEL: @v2f16_ui_min1( -; CHECK-NEXT: [[I:%.*]] = fptoui <2 x half> [[F:%.*]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = icmp sge <2 x i32> [[I]], zeroinitializer -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> ; %i = fptoui <2 x half> %f to <2 x i32> %c = icmp sge <2 x i32> %i,