diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -387,6 +387,7 @@ bool simplifyCallSite(Function *F, CallBase &Call); template bool simplifyInstruction(Instruction &I, Callable Evaluate); + bool simplifyIntrinsicCallIsConstant(CallBase &CB); ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); /// Return true if the given argument to the function being considered for @@ -1531,6 +1532,27 @@ return true; } +/// Try to simplify a call to llvm.is.constant. +/// +/// Duplicate the argument checking from CallAnalyzer::simplifyCallSite since +/// we expect calls of this specific intrinsic to be infrequent. +/// +/// FIXME: If we knew CB's parent's caller, we might be able to determine +/// whether inlining CB's parent into CB's parent's caller would change how the +/// call to llvm.is.constant would evaluate. The member CandidateCall of +/// CallAnalyzer is CB's parent's caller. +bool CallAnalyzer::simplifyIntrinsicCallIsConstant(CallBase &CB) { + Value *Arg = CB.getArgOperand(0); + Constant *C = dyn_cast(Arg); + + if (!C) + C = dyn_cast_or_null(SimplifiedValues.lookup(Arg)); + + Type *RT = CB.getFunctionType()->getReturnType(); + SimplifiedValues[&CB] = ConstantInt::get(RT, C ? 1 : 0); + return true; +} + bool CallAnalyzer::visitBitCast(BitCastInst &I) { // Propagate constants through bitcasts. if (simplifyInstruction(I, [&](SmallVectorImpl &COps) { @@ -2154,6 +2176,8 @@ if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0))) SROAArgValues[II] = SROAArg; return true; + case Intrinsic::is_constant: + return simplifyIntrinsicCallIsConstant(Call); } } diff --git a/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/call-intrinsic-is-constant.ll @@ -0,0 +1,1133 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt %s -passes=inline -S | FileCheck %s + +@hweight_long_w = external dso_local global i64, align 8 + +define internal i32 @__nodes_weight() { +entry: + %__trans_tmp_1 = alloca i32, align 4 + %call = call i32 @hweight_long() + store i32 %call, i32* %__trans_tmp_1, align 4 + %0 = load i32, i32* %__trans_tmp_1, align 4 + ret i32 %0 +} + +define dso_local i32 @amd_numa_init() { +; CHECK-LABEL: @amd_numa_init( +; CHECK-NEXT: [[__TRANS_TMP_1_I:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[__TRANS_TMP_1_I]] to i8* +; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP1]]) +; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.is.constant.i64(i64 [[TMP2]]) +; CHECK-NEXT: br i1 [[TMP3]], label [[COND_TRUE_I_I:%.*]], label [[COND_FALSE_I_I:%.*]] +; CHECK: cond.true.i.i: +; CHECK-NEXT: [[TMP4:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND_I_I:%.*]] = and i64 [[TMP4]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD_I_I:%.*]] = add nsw i64 [[AND_I_I]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND1_I_I:%.*]] = and i64 [[TMP6]], 2 +; CHECK-NEXT: [[TOBOOL_I_I:%.*]] = icmp ne i64 [[AND1_I_I]], 0 +; CHECK-NEXT: [[LNOT_I_I:%.*]] = xor i1 [[TOBOOL_I_I]], true +; CHECK-NEXT: [[LNOT_EXT_I_I:%.*]] = zext i1 [[LNOT_I_I]] to i32 +; CHECK-NEXT: [[CONV_I_I:%.*]] = sext i32 [[LNOT_EXT_I_I]] to i64 +; CHECK-NEXT: [[ADD2_I_I:%.*]] = add nsw i64 [[ADD_I_I]], [[CONV_I_I]] +; CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND3_I_I:%.*]] = and i64 [[TMP7]], 3 +; CHECK-NEXT: [[TOBOOL4_I_I:%.*]] = icmp ne i64 [[AND3_I_I]], 0 +; CHECK-NEXT: [[LNOT5_I_I:%.*]] = xor i1 [[TOBOOL4_I_I]], true +; CHECK-NEXT: [[LNOT_EXT6_I_I:%.*]] = zext i1 [[LNOT5_I_I]] to i32 +; CHECK-NEXT: [[CONV7_I_I:%.*]] = sext i32 [[LNOT_EXT6_I_I]] to i64 +; CHECK-NEXT: [[ADD8_I_I:%.*]] = add nsw i64 [[ADD2_I_I]], [[CONV7_I_I]] +; CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND9_I_I:%.*]] = and i64 [[TMP8]], 4 +; CHECK-NEXT: [[TOBOOL10_I_I:%.*]] = icmp ne i64 [[AND9_I_I]], 0 +; CHECK-NEXT: [[LNOT11_I_I:%.*]] = xor i1 [[TOBOOL10_I_I]], true +; CHECK-NEXT: [[LNOT_EXT14_I_I:%.*]] = zext i1 [[TOBOOL10_I_I]] to i32 +; CHECK-NEXT: [[CONV15_I_I:%.*]] = sext i32 [[LNOT_EXT14_I_I]] to i64 +; CHECK-NEXT: [[ADD16_I_I:%.*]] = add nsw i64 [[ADD8_I_I]], [[CONV15_I_I]] +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL17_I_I:%.*]] = icmp ne i64 [[TMP9]], 0 +; CHECK-NEXT: [[LNOT18_I_I:%.*]] = xor i1 [[TOBOOL17_I_I]], true +; CHECK-NEXT: [[LNOT_EXT21_I_I:%.*]] = zext i1 [[TOBOOL17_I_I]] to i32 +; CHECK-NEXT: [[CONV22_I_I:%.*]] = sext i32 [[LNOT_EXT21_I_I]] to i64 +; CHECK-NEXT: [[ADD23_I_I:%.*]] = add nsw i64 [[ADD16_I_I]], [[CONV22_I_I]] +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL_I_I:%.*]] = shl i64 [[TMP10]], 6 +; CHECK-NEXT: [[TOBOOL24_I_I:%.*]] = icmp ne i64 [[SHL_I_I]], 0 +; CHECK-NEXT: [[LNOT25_I_I:%.*]] = xor i1 [[TOBOOL24_I_I]], true +; CHECK-NEXT: [[LNOT_EXT26_I_I:%.*]] = zext i1 [[LNOT25_I_I]] to i32 +; CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP12:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD27_I_I:%.*]] = add nsw i64 7, [[TMP12]] +; CHECK-NEXT: [[SH_PROM_I_I:%.*]] = trunc i64 [[ADD27_I_I]] to i32 +; CHECK-NEXT: [[SHL28_I_I:%.*]] = shl i32 1, [[SH_PROM_I_I]] +; CHECK-NEXT: [[CONV29_I_I:%.*]] = sext i32 [[SHL28_I_I]] to i64 +; CHECK-NEXT: [[AND30_I_I:%.*]] = and i64 [[TMP11]], [[CONV29_I_I]] +; CHECK-NEXT: [[TOBOOL31_I_I:%.*]] = icmp ne i64 [[AND30_I_I]], 0 +; CHECK-NEXT: [[LNOT32_I_I:%.*]] = xor i1 [[TOBOOL31_I_I]], true +; CHECK-NEXT: [[LNOT_EXT33_I_I:%.*]] = zext i1 [[LNOT32_I_I]] to i32 +; CHECK-NEXT: [[ADD34_I_I:%.*]] = add nsw i32 [[LNOT_EXT26_I_I]], [[LNOT_EXT33_I_I]] +; CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR_I_I:%.*]] = ashr i64 [[TMP13]], 1 +; CHECK-NEXT: [[TMP14:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL35_I_I:%.*]] = shl i64 [[SHR_I_I]], [[TMP14]] +; CHECK-NEXT: [[SHR36_I_I:%.*]] = ashr i64 [[SHL35_I_I]], 8 +; CHECK-NEXT: [[SHL37_I_I:%.*]] = shl i64 [[SHR36_I_I]], 2 +; CHECK-NEXT: [[TOBOOL38_I_I:%.*]] = icmp ne i64 [[SHL37_I_I]], 0 +; CHECK-NEXT: [[LNOT39_I_I:%.*]] = xor i1 [[TOBOOL38_I_I]], true +; CHECK-NEXT: [[LNOT_EXT40_I_I:%.*]] = zext i1 [[LNOT39_I_I]] to i32 +; CHECK-NEXT: [[ADD41_I_I:%.*]] = add nsw i32 [[ADD34_I_I]], [[LNOT_EXT40_I_I]] +; CHECK-NEXT: [[TMP15:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR42_I_I:%.*]] = ashr i64 [[TMP15]], 3 +; CHECK-NEXT: [[TOBOOL43_I_I:%.*]] = icmp ne i64 [[SHR42_I_I]], 0 +; CHECK-NEXT: [[LNOT44_I_I:%.*]] = xor i1 [[TOBOOL43_I_I]], true +; CHECK-NEXT: [[LNOT_EXT45_I_I:%.*]] = zext i1 [[LNOT44_I_I]] to i32 +; CHECK-NEXT: [[ADD46_I_I:%.*]] = add nsw i32 [[ADD41_I_I]], [[LNOT_EXT45_I_I]] +; CHECK-NEXT: [[TMP16:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR47_I_I:%.*]] = ashr i64 [[TMP16]], 4 +; CHECK-NEXT: [[TOBOOL48_I_I:%.*]] = icmp ne i64 [[SHR47_I_I]], 0 +; CHECK-NEXT: [[LNOT49_I_I:%.*]] = xor i1 [[TOBOOL48_I_I]], true +; CHECK-NEXT: [[LNOT_EXT50_I_I:%.*]] = zext i1 [[LNOT49_I_I]] to i32 +; CHECK-NEXT: [[ADD51_I_I:%.*]] = add nsw i32 [[ADD46_I_I]], [[LNOT_EXT50_I_I]] +; CHECK-NEXT: [[ADD52_I_I:%.*]] = add nsw i32 [[ADD51_I_I]], 5 +; CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR53_I_I:%.*]] = ashr i64 [[TMP17]], 6 +; CHECK-NEXT: [[TOBOOL54_I_I:%.*]] = icmp ne i64 [[SHR53_I_I]], 0 +; CHECK-NEXT: [[LNOT55_I_I:%.*]] = xor i1 [[TOBOOL54_I_I]], true +; CHECK-NEXT: [[LNOT_EXT56_I_I:%.*]] = zext i1 [[LNOT55_I_I]] to i32 +; CHECK-NEXT: [[ADD57_I_I:%.*]] = add nsw i32 [[ADD52_I_I]], [[LNOT_EXT56_I_I]] +; CHECK-NEXT: [[TMP18:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR58_I_I:%.*]] = ashr i64 [[TMP18]], 7 +; CHECK-NEXT: [[TOBOOL59_I_I:%.*]] = icmp ne i64 [[SHR58_I_I]], 0 +; CHECK-NEXT: [[LNOT60_I_I:%.*]] = xor i1 [[TOBOOL59_I_I]], true +; CHECK-NEXT: [[LNOT_EXT61_I_I:%.*]] = zext i1 [[LNOT60_I_I]] to i32 +; CHECK-NEXT: [[ADD62_I_I:%.*]] = add nsw i32 [[ADD57_I_I]], [[LNOT_EXT61_I_I]] +; CHECK-NEXT: [[CONV63_I_I:%.*]] = sext i32 [[ADD62_I_I]] to i64 +; CHECK-NEXT: [[TMP19:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD64_I_I:%.*]] = add nsw i64 [[CONV63_I_I]], [[TMP19]] +; CHECK-NEXT: [[SHL65_I_I:%.*]] = shl i64 [[ADD23_I_I]], [[ADD64_I_I]] +; CHECK-NEXT: [[SHR66_I_I:%.*]] = ashr i64 [[SHL65_I_I]], 6 +; CHECK-NEXT: [[TMP20:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR67_I_I:%.*]] = ashr i64 [[TMP20]], 1 +; CHECK-NEXT: [[TOBOOL68_I_I:%.*]] = icmp ne i64 [[SHR67_I_I]], 0 +; CHECK-NEXT: [[LNOT69_I_I:%.*]] = xor i1 [[TOBOOL68_I_I]], true +; CHECK-NEXT: [[LNOT_EXT70_I_I:%.*]] = zext i1 [[LNOT69_I_I]] to i32 +; CHECK-NEXT: [[TMP21:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR71_I_I:%.*]] = ashr i64 [[TMP21]], 2 +; CHECK-NEXT: [[TOBOOL72_I_I:%.*]] = icmp ne i64 [[SHR71_I_I]], 0 +; CHECK-NEXT: [[LNOT73_I_I:%.*]] = xor i1 [[TOBOOL72_I_I]], true +; CHECK-NEXT: [[LNOT_EXT74_I_I:%.*]] = zext i1 [[LNOT73_I_I]] to i32 +; CHECK-NEXT: [[ADD75_I_I:%.*]] = add nsw i32 [[LNOT_EXT70_I_I]], [[LNOT_EXT74_I_I]] +; CHECK-NEXT: [[TMP22:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP23:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM76_I_I:%.*]] = trunc i64 [[TMP23]] to i32 +; CHECK-NEXT: [[SHL77_I_I:%.*]] = shl i32 1, [[SH_PROM76_I_I]] +; CHECK-NEXT: [[SHR78_I_I:%.*]] = ashr i32 [[SHL77_I_I]], 1 +; CHECK-NEXT: [[CONV79_I_I:%.*]] = sext i32 [[SHR78_I_I]] to i64 +; CHECK-NEXT: [[AND80_I_I:%.*]] = and i64 [[TMP22]], [[CONV79_I_I]] +; CHECK-NEXT: [[TOBOOL81_I_I:%.*]] = icmp ne i64 [[AND80_I_I]], 0 +; CHECK-NEXT: [[LNOT82_I_I:%.*]] = xor i1 [[TOBOOL81_I_I]], true +; CHECK-NEXT: [[LNOT_EXT83_I_I:%.*]] = zext i1 [[LNOT82_I_I]] to i32 +; CHECK-NEXT: [[ADD84_I_I:%.*]] = add nsw i32 [[ADD75_I_I]], [[LNOT_EXT83_I_I]] +; CHECK-NEXT: [[TMP24:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR85_I_I:%.*]] = ashr i64 [[TMP24]], 16 +; CHECK-NEXT: [[TOBOOL86_I_I:%.*]] = icmp ne i64 [[SHR85_I_I]], 0 +; CHECK-NEXT: [[LNOT87_I_I:%.*]] = xor i1 [[TOBOOL86_I_I]], true +; CHECK-NEXT: [[LNOT_EXT90_I_I:%.*]] = zext i1 [[TOBOOL86_I_I]] to i32 +; CHECK-NEXT: [[ADD91_I_I:%.*]] = add nsw i32 [[ADD84_I_I]], [[LNOT_EXT90_I_I]] +; CHECK-NEXT: [[TMP25:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR92_I_I:%.*]] = ashr i64 [[TMP25]], 6 +; CHECK-NEXT: [[TOBOOL93_I_I:%.*]] = icmp ne i64 [[SHR92_I_I]], 0 +; CHECK-NEXT: [[LNOT94_I_I:%.*]] = xor i1 [[TOBOOL93_I_I]], true +; CHECK-NEXT: [[LNOT_EXT97_I_I:%.*]] = zext i1 [[TOBOOL93_I_I]] to i32 +; CHECK-NEXT: [[ADD98_I_I:%.*]] = add nsw i32 [[ADD91_I_I]], [[LNOT_EXT97_I_I]] +; CHECK-NEXT: [[TMP26:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP27:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR99_I_I:%.*]] = ashr i64 [[TMP27]], 16 +; CHECK-NEXT: [[SHL100_I_I:%.*]] = shl i64 1, [[SHR99_I_I]] +; CHECK-NEXT: [[SHR101_I_I:%.*]] = lshr i64 [[SHL100_I_I]], 1 +; CHECK-NEXT: [[AND102_I_I:%.*]] = and i64 [[TMP26]], [[SHR101_I_I]] +; CHECK-NEXT: [[TOBOOL103_I_I:%.*]] = icmp ne i64 [[AND102_I_I]], 0 +; CHECK-NEXT: [[LNOT104_I_I:%.*]] = xor i1 [[TOBOOL103_I_I]], true +; CHECK-NEXT: [[LNOT_EXT105_I_I:%.*]] = zext i1 [[LNOT104_I_I]] to i32 +; CHECK-NEXT: [[ADD106_I_I:%.*]] = add nsw i32 [[ADD98_I_I]], [[LNOT_EXT105_I_I]] +; CHECK-NEXT: [[TMP28:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR107_I_I:%.*]] = ashr i64 [[TMP28]], 8 +; CHECK-NEXT: [[TMP29:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL108_I_I:%.*]] = shl i64 [[SHR107_I_I]], [[TMP29]] +; CHECK-NEXT: [[SHR109_I_I:%.*]] = ashr i64 [[SHL108_I_I]], 8 +; CHECK-NEXT: [[SHL110_I_I:%.*]] = shl i64 [[SHR109_I_I]], 16 +; CHECK-NEXT: [[SHR111_I_I:%.*]] = ashr i64 [[SHL110_I_I]], 8 +; CHECK-NEXT: [[TOBOOL112_I_I:%.*]] = icmp ne i64 [[SHR111_I_I]], 0 +; CHECK-NEXT: [[LNOT113_I_I:%.*]] = xor i1 [[TOBOOL112_I_I]], true +; CHECK-NEXT: [[LNOT_EXT114_I_I:%.*]] = zext i1 [[LNOT113_I_I]] to i32 +; CHECK-NEXT: [[ADD115_I_I:%.*]] = add nsw i32 [[ADD106_I_I]], [[LNOT_EXT114_I_I]] +; CHECK-NEXT: [[TMP30:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR116_I_I:%.*]] = ashr i64 [[TMP30]], 16 +; CHECK-NEXT: [[TOBOOL117_I_I:%.*]] = icmp ne i64 [[SHR116_I_I]], 0 +; CHECK-NEXT: [[LNOT118_I_I:%.*]] = xor i1 [[TOBOOL117_I_I]], true +; CHECK-NEXT: [[LNOT_EXT119_I_I:%.*]] = zext i1 [[LNOT118_I_I]] to i32 +; CHECK-NEXT: [[ADD120_I_I:%.*]] = add nsw i32 [[ADD115_I_I]], [[LNOT_EXT119_I_I]] +; CHECK-NEXT: [[TMP31:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR121_I_I:%.*]] = ashr i64 [[TMP31]], 6 +; CHECK-NEXT: [[SHR122_I_I:%.*]] = ashr i64 [[SHR121_I_I]], 5 +; CHECK-NEXT: [[TOBOOL123_I_I:%.*]] = icmp ne i64 [[SHR122_I_I]], 0 +; CHECK-NEXT: [[LNOT124_I_I:%.*]] = xor i1 [[TOBOOL123_I_I]], true +; CHECK-NEXT: [[LNOT_EXT125_I_I:%.*]] = zext i1 [[LNOT124_I_I]] to i32 +; CHECK-NEXT: [[ADD126_I_I:%.*]] = add nsw i32 [[ADD120_I_I]], [[LNOT_EXT125_I_I]] +; CHECK-NEXT: [[ADD127_I_I:%.*]] = add nsw i32 [[ADD126_I_I]], 6 +; CHECK-NEXT: [[CONV128_I_I:%.*]] = sext i32 [[ADD127_I_I]] to i64 +; CHECK-NEXT: [[TMP32:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD129_I_I:%.*]] = add nsw i64 [[CONV128_I_I]], [[TMP32]] +; CHECK-NEXT: [[TMP33:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR130_I_I:%.*]] = ashr i64 [[TMP33]], 32 +; CHECK-NEXT: [[AND131_I_I:%.*]] = and i64 [[SHR130_I_I]], 1 +; CHECK-NEXT: [[ADD132_I_I:%.*]] = add nsw i64 7, [[AND131_I_I]] +; CHECK-NEXT: [[TMP34:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR133_I_I:%.*]] = ashr i64 [[TMP34]], 32 +; CHECK-NEXT: [[TOBOOL134_I_I:%.*]] = icmp ne i64 [[SHR133_I_I]], 0 +; CHECK-NEXT: [[LNOT135_I_I:%.*]] = xor i1 [[TOBOOL134_I_I]], true +; CHECK-NEXT: [[LNOT_EXT136_I_I:%.*]] = zext i1 [[LNOT135_I_I]] to i32 +; CHECK-NEXT: [[CONV137_I_I:%.*]] = sext i32 [[LNOT_EXT136_I_I]] to i64 +; CHECK-NEXT: [[ADD138_I_I:%.*]] = add nsw i64 [[ADD132_I_I]], [[CONV137_I_I]] +; CHECK-NEXT: [[TMP35:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR139_I_I:%.*]] = ashr i64 [[TMP35]], 2 +; CHECK-NEXT: [[TOBOOL140_I_I:%.*]] = icmp ne i64 [[SHR139_I_I]], 0 +; CHECK-NEXT: [[LNOT141_I_I:%.*]] = xor i1 [[TOBOOL140_I_I]], true +; CHECK-NEXT: [[LNOT_EXT144_I_I:%.*]] = zext i1 [[TOBOOL140_I_I]] to i32 +; CHECK-NEXT: [[CONV145_I_I:%.*]] = sext i32 [[LNOT_EXT144_I_I]] to i64 +; CHECK-NEXT: [[ADD146_I_I:%.*]] = add nsw i64 [[ADD138_I_I]], [[CONV145_I_I]] +; CHECK-NEXT: [[TMP36:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD147_I_I:%.*]] = add nsw i64 [[ADD146_I_I]], [[TMP36]] +; CHECK-NEXT: [[SHR148_I_I:%.*]] = ashr i64 [[ADD129_I_I]], [[ADD147_I_I]] +; CHECK-NEXT: [[AND149_I_I:%.*]] = and i64 [[SHR66_I_I]], [[SHR148_I_I]] +; CHECK-NEXT: [[TMP37:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD150_I_I:%.*]] = add nsw i64 3, [[TMP37]] +; CHECK-NEXT: [[TMP38:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD151_I_I:%.*]] = add nsw i64 [[ADD150_I_I]], [[TMP38]] +; CHECK-NEXT: [[CMP_I_I:%.*]] = icmp slt i64 1, [[ADD151_I_I]] +; CHECK-NEXT: [[CONV152_I_I:%.*]] = zext i1 [[CMP_I_I]] to i32 +; CHECK-NEXT: [[CONV153_I_I:%.*]] = sext i32 [[CONV152_I_I]] to i64 +; CHECK-NEXT: [[TMP39:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND154_I_I:%.*]] = and i64 [[TMP39]], 6 +; CHECK-NEXT: [[TOBOOL155_I_I:%.*]] = icmp ne i64 [[AND154_I_I]], 0 +; CHECK-NEXT: [[LNOT156_I_I:%.*]] = xor i1 [[TOBOOL155_I_I]], true +; CHECK-NEXT: [[LNOT_EXT157_I_I:%.*]] = zext i1 [[LNOT156_I_I]] to i32 +; CHECK-NEXT: [[TMP40:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR158_I_I:%.*]] = ashr i64 [[TMP40]], 7 +; CHECK-NEXT: [[TOBOOL159_I_I:%.*]] = icmp ne i64 [[SHR158_I_I]], 0 +; CHECK-NEXT: [[LNOT160_I_I:%.*]] = xor i1 [[TOBOOL159_I_I]], true +; CHECK-NEXT: [[LNOT_EXT163_I_I:%.*]] = zext i1 [[TOBOOL159_I_I]] to i32 +; CHECK-NEXT: [[ADD164_I_I:%.*]] = add nsw i32 [[LNOT_EXT157_I_I]], [[LNOT_EXT163_I_I]] +; CHECK-NEXT: [[CONV165_I_I:%.*]] = sext i32 [[ADD164_I_I]] to i64 +; CHECK-NEXT: [[TMP41:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR166_I_I:%.*]] = ashr i64 [[TMP41]], 2 +; CHECK-NEXT: [[ADD167_I_I:%.*]] = add nsw i64 [[CONV165_I_I]], [[SHR166_I_I]] +; CHECK-NEXT: [[TMP42:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL168_I_I:%.*]] = shl i64 [[TMP42]], 1 +; CHECK-NEXT: [[TOBOOL169_I_I:%.*]] = icmp ne i64 [[SHL168_I_I]], 0 +; CHECK-NEXT: [[LNOT170_I_I:%.*]] = xor i1 [[TOBOOL169_I_I]], true +; CHECK-NEXT: [[LNOT_EXT171_I_I:%.*]] = zext i1 [[LNOT170_I_I]] to i32 +; CHECK-NEXT: [[CONV172_I_I:%.*]] = sext i32 [[LNOT_EXT171_I_I]] to i64 +; CHECK-NEXT: [[ADD173_I_I:%.*]] = add nsw i64 [[ADD167_I_I]], [[CONV172_I_I]] +; CHECK-NEXT: [[TMP43:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR174_I_I:%.*]] = ashr i64 [[TMP43]], 32 +; CHECK-NEXT: [[SHR175_I_I:%.*]] = ashr i64 [[SHR174_I_I]], 2 +; CHECK-NEXT: [[TOBOOL176_I_I:%.*]] = icmp ne i64 [[SHR175_I_I]], 0 +; CHECK-NEXT: [[LNOT177_I_I:%.*]] = xor i1 [[TOBOOL176_I_I]], true +; CHECK-NEXT: [[LNOT_EXT178_I_I:%.*]] = zext i1 [[LNOT177_I_I]] to i32 +; CHECK-NEXT: [[CONV179_I_I:%.*]] = sext i32 [[LNOT_EXT178_I_I]] to i64 +; CHECK-NEXT: [[ADD180_I_I:%.*]] = add nsw i64 [[ADD173_I_I]], [[CONV179_I_I]] +; CHECK-NEXT: [[TMP44:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR181_I_I:%.*]] = ashr i64 [[TMP44]], 2 +; CHECK-NEXT: [[SHR182_I_I:%.*]] = ashr i64 [[SHR181_I_I]], 3 +; CHECK-NEXT: [[TOBOOL183_I_I:%.*]] = icmp ne i64 [[SHR182_I_I]], 0 +; CHECK-NEXT: [[LNOT184_I_I:%.*]] = xor i1 [[TOBOOL183_I_I]], true +; CHECK-NEXT: [[LNOT_EXT185_I_I:%.*]] = zext i1 [[LNOT184_I_I]] to i32 +; CHECK-NEXT: [[CONV186_I_I:%.*]] = sext i32 [[LNOT_EXT185_I_I]] to i64 +; CHECK-NEXT: [[ADD187_I_I:%.*]] = add nsw i64 [[ADD180_I_I]], [[CONV186_I_I]] +; CHECK-NEXT: [[TMP45:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR188_I_I:%.*]] = ashr i64 [[TMP45]], 2 +; CHECK-NEXT: [[SHR189_I_I:%.*]] = ashr i64 [[SHR188_I_I]], 4 +; CHECK-NEXT: [[TOBOOL190_I_I:%.*]] = icmp ne i64 [[SHR189_I_I]], 0 +; CHECK-NEXT: [[LNOT191_I_I:%.*]] = xor i1 [[TOBOOL190_I_I]], true +; CHECK-NEXT: [[LNOT_EXT192_I_I:%.*]] = zext i1 [[LNOT191_I_I]] to i32 +; CHECK-NEXT: [[CONV193_I_I:%.*]] = sext i32 [[LNOT_EXT192_I_I]] to i64 +; CHECK-NEXT: [[ADD194_I_I:%.*]] = add nsw i64 [[ADD187_I_I]], [[CONV193_I_I]] +; CHECK-NEXT: [[TMP46:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR195_I_I:%.*]] = ashr i64 [[TMP46]], 2 +; CHECK-NEXT: [[SHR196_I_I:%.*]] = ashr i64 [[SHR195_I_I]], 5 +; CHECK-NEXT: [[TOBOOL197_I_I:%.*]] = icmp ne i64 [[SHR196_I_I]], 0 +; CHECK-NEXT: [[LNOT198_I_I:%.*]] = xor i1 [[TOBOOL197_I_I]], true +; CHECK-NEXT: [[LNOT_EXT199_I_I:%.*]] = zext i1 [[LNOT198_I_I]] to i32 +; CHECK-NEXT: [[CONV200_I_I:%.*]] = sext i32 [[LNOT_EXT199_I_I]] to i64 +; CHECK-NEXT: [[ADD201_I_I:%.*]] = add nsw i64 [[ADD194_I_I]], [[CONV200_I_I]] +; CHECK-NEXT: [[TMP47:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD202_I_I:%.*]] = add nsw i64 [[ADD201_I_I]], [[TMP47]] +; CHECK-NEXT: [[SHR203_I_I:%.*]] = ashr i64 [[ADD202_I_I]], 2 +; CHECK-NEXT: [[TMP48:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL204_I_I:%.*]] = icmp ne i64 [[TMP48]], 0 +; CHECK-NEXT: [[LNOT205_I_I:%.*]] = xor i1 [[TOBOOL204_I_I]], true +; CHECK-NEXT: [[LNOT_EXT206_I_I:%.*]] = zext i1 [[LNOT205_I_I]] to i32 +; CHECK-NEXT: [[ADD207_I_I:%.*]] = add nsw i32 6, [[LNOT_EXT206_I_I]] +; CHECK-NEXT: [[SH_PROM208_I_I:%.*]] = zext i32 [[ADD207_I_I]] to i64 +; CHECK-NEXT: [[SHR209_I_I:%.*]] = ashr i64 [[SHR203_I_I]], [[SH_PROM208_I_I]] +; CHECK-NEXT: [[SHR210_I_I:%.*]] = ashr i64 [[SHR209_I_I]], 2 +; CHECK-NEXT: [[TMP49:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR211_I_I:%.*]] = ashr i64 [[TMP49]], 2 +; CHECK-NEXT: [[SHR212_I_I:%.*]] = ashr i64 [[SHR211_I_I]], 16 +; CHECK-NEXT: [[AND213_I_I:%.*]] = and i64 [[SHR212_I_I]], 1 +; CHECK-NEXT: [[ADD214_I_I:%.*]] = add nsw i64 7, [[AND213_I_I]] +; CHECK-NEXT: [[TMP50:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[CMP215_I_I:%.*]] = icmp slt i64 [[TMP50]], 1 +; CHECK-NEXT: [[CONV216_I_I:%.*]] = zext i1 [[CMP215_I_I]] to i32 +; CHECK-NEXT: [[CONV217_I_I:%.*]] = sext i32 [[CONV216_I_I]] to i64 +; CHECK-NEXT: [[ADD218_I_I:%.*]] = add nsw i64 [[ADD214_I_I]], [[CONV217_I_I]] +; CHECK-NEXT: [[TMP51:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP52:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM219_I_I:%.*]] = trunc i64 [[TMP52]] to i32 +; CHECK-NEXT: [[SHL220_I_I:%.*]] = shl i32 6, [[SH_PROM219_I_I]] +; CHECK-NEXT: [[SHR221_I_I:%.*]] = ashr i32 [[SHL220_I_I]], 1 +; CHECK-NEXT: [[CONV222_I_I:%.*]] = sext i32 [[SHR221_I_I]] to i64 +; CHECK-NEXT: [[CMP223_I_I:%.*]] = icmp sgt i64 [[TMP51]], [[CONV222_I_I]] +; CHECK-NEXT: [[CONV224_I_I:%.*]] = zext i1 [[CMP223_I_I]] to i32 +; CHECK-NEXT: [[CONV225_I_I:%.*]] = sext i32 [[CONV224_I_I]] to i64 +; CHECK-NEXT: [[ADD226_I_I:%.*]] = add nsw i64 [[ADD218_I_I]], [[CONV225_I_I]] +; CHECK-NEXT: [[TMP53:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP54:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM227_I_I:%.*]] = trunc i64 [[TMP54]] to i32 +; CHECK-NEXT: [[SHL228_I_I:%.*]] = shl i32 1, [[SH_PROM227_I_I]] +; CHECK-NEXT: [[SHR229_I_I:%.*]] = ashr i32 [[SHL228_I_I]], 5 +; CHECK-NEXT: [[CONV230_I_I:%.*]] = sext i32 [[SHR229_I_I]] to i64 +; CHECK-NEXT: [[AND231_I_I:%.*]] = and i64 [[TMP53]], [[CONV230_I_I]] +; CHECK-NEXT: [[TOBOOL232_I_I:%.*]] = icmp ne i64 [[AND231_I_I]], 0 +; CHECK-NEXT: [[LNOT233_I_I:%.*]] = xor i1 [[TOBOOL232_I_I]], true +; CHECK-NEXT: [[LNOT_EXT234_I_I:%.*]] = zext i1 [[LNOT233_I_I]] to i32 +; CHECK-NEXT: [[CONV235_I_I:%.*]] = sext i32 [[LNOT_EXT234_I_I]] to i64 +; CHECK-NEXT: [[ADD236_I_I:%.*]] = add nsw i64 [[ADD226_I_I]], [[CONV235_I_I]] +; CHECK-NEXT: [[TMP55:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR237_I_I:%.*]] = ashr i64 [[TMP55]], 1 +; CHECK-NEXT: [[TOBOOL238_I_I:%.*]] = icmp ne i64 [[SHR237_I_I]], 0 +; CHECK-NEXT: [[LNOT239_I_I:%.*]] = xor i1 [[TOBOOL238_I_I]], true +; CHECK-NEXT: [[LNOT_EXT242_I_I:%.*]] = zext i1 [[TOBOOL238_I_I]] to i32 +; CHECK-NEXT: [[CONV243_I_I:%.*]] = sext i32 [[LNOT_EXT242_I_I]] to i64 +; CHECK-NEXT: [[ADD244_I_I:%.*]] = add nsw i64 [[ADD236_I_I]], [[CONV243_I_I]] +; CHECK-NEXT: [[TMP56:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP57:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP58:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD245_I_I:%.*]] = add nsw i64 2, [[TMP58]] +; CHECK-NEXT: [[SHR246_I_I:%.*]] = ashr i64 [[TMP57]], [[ADD245_I_I]] +; CHECK-NEXT: [[SHR247_I_I:%.*]] = ashr i64 [[SHR246_I_I]], 6 +; CHECK-NEXT: [[SHL248_I_I:%.*]] = shl i64 [[SHR247_I_I]], 1 +; CHECK-NEXT: [[AND249_I_I:%.*]] = and i64 [[TMP56]], [[SHL248_I_I]] +; CHECK-NEXT: [[TOBOOL250_I_I:%.*]] = icmp ne i64 [[AND249_I_I]], 0 +; CHECK-NEXT: [[LNOT251_I_I:%.*]] = xor i1 [[TOBOOL250_I_I]], true +; CHECK-NEXT: [[LNOT_EXT252_I_I:%.*]] = zext i1 [[LNOT251_I_I]] to i32 +; CHECK-NEXT: [[CONV253_I_I:%.*]] = sext i32 [[LNOT_EXT252_I_I]] to i64 +; CHECK-NEXT: [[ADD254_I_I:%.*]] = add nsw i64 [[ADD244_I_I]], [[CONV253_I_I]] +; CHECK-NEXT: [[TMP59:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP60:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD255_I_I:%.*]] = add nsw i64 8, [[TMP60]] +; CHECK-NEXT: [[SHR256_I_I:%.*]] = ashr i64 [[TMP59]], [[ADD255_I_I]] +; CHECK-NEXT: [[AND257_I_I:%.*]] = and i64 [[SHR256_I_I]], 3 +; CHECK-NEXT: [[TOBOOL258_I_I:%.*]] = icmp ne i64 [[AND257_I_I]], 0 +; CHECK-NEXT: [[LNOT259_I_I:%.*]] = xor i1 [[TOBOOL258_I_I]], true +; CHECK-NEXT: [[LNOT_EXT260_I_I:%.*]] = zext i1 [[LNOT259_I_I]] to i32 +; CHECK-NEXT: [[CONV261_I_I:%.*]] = sext i32 [[LNOT_EXT260_I_I]] to i64 +; CHECK-NEXT: [[ADD262_I_I:%.*]] = add nsw i64 [[ADD254_I_I]], [[CONV261_I_I]] +; CHECK-NEXT: [[TMP61:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL263_I_I:%.*]] = icmp ne i64 [[TMP61]], 0 +; CHECK-NEXT: [[LNOT264_I_I:%.*]] = xor i1 [[TOBOOL263_I_I]], true +; CHECK-NEXT: [[LNOT_EXT265_I_I:%.*]] = zext i1 [[LNOT264_I_I]] to i32 +; CHECK-NEXT: [[CONV266_I_I:%.*]] = sext i32 [[LNOT_EXT265_I_I]] to i64 +; CHECK-NEXT: [[ADD267_I_I:%.*]] = add nsw i64 [[ADD262_I_I]], [[CONV266_I_I]] +; CHECK-NEXT: [[SHR268_I_I:%.*]] = ashr i64 [[SHR210_I_I]], [[ADD267_I_I]] +; CHECK-NEXT: [[SHR269_I_I:%.*]] = ashr i64 [[SHR268_I_I]], 6 +; CHECK-NEXT: [[TMP62:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD270_I_I:%.*]] = add nsw i64 4, [[TMP62]] +; CHECK-NEXT: [[SHR271_I_I:%.*]] = ashr i64 [[SHR269_I_I]], [[ADD270_I_I]] +; CHECK-NEXT: [[SHR272_I_I:%.*]] = ashr i64 [[SHR271_I_I]], 2 +; CHECK-NEXT: [[TMP63:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR273_I_I:%.*]] = ashr i64 [[TMP63]], 6 +; CHECK-NEXT: [[TMP64:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD274_I_I:%.*]] = add nsw i64 6, [[TMP64]] +; CHECK-NEXT: [[SHR275_I_I:%.*]] = ashr i64 [[SHR273_I_I]], [[ADD274_I_I]] +; CHECK-NEXT: [[AND276_I_I:%.*]] = and i64 [[SHR275_I_I]], 7 +; CHECK-NEXT: [[TOBOOL277_I_I:%.*]] = icmp ne i64 [[AND276_I_I]], 0 +; CHECK-NEXT: [[LNOT278_I_I:%.*]] = xor i1 [[TOBOOL277_I_I]], true +; CHECK-NEXT: [[LNOT_EXT279_I_I:%.*]] = zext i1 [[LNOT278_I_I]] to i32 +; CHECK-NEXT: [[SH_PROM280_I_I:%.*]] = zext i32 [[LNOT_EXT279_I_I]] to i64 +; CHECK-NEXT: [[SHR281_I_I:%.*]] = ashr i64 [[SHR272_I_I]], [[SH_PROM280_I_I]] +; CHECK-NEXT: [[CMP282_I_I:%.*]] = icmp sgt i64 [[CONV153_I_I]], [[SHR281_I_I]] +; CHECK-NEXT: [[CONV283_I_I:%.*]] = zext i1 [[CMP282_I_I]] to i32 +; CHECK-NEXT: [[CONV284_I_I:%.*]] = sext i32 [[CONV283_I_I]] to i64 +; CHECK-NEXT: [[AND285_I_I:%.*]] = and i64 [[AND149_I_I]], [[CONV284_I_I]] +; CHECK-NEXT: br label [[__NODES_WEIGHT_EXIT:%.*]] +; CHECK: cond.false.i.i: +; CHECK-NEXT: [[TMP65:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[CALL_I_I:%.*]] = call i32 (i64, ...) bitcast (i32 (...)* @__arch_hweight64 to i32 (i64, ...)*)(i64 [[TMP65]]) +; CHECK-NEXT: [[CONV286_I_I:%.*]] = sext i32 [[CALL_I_I]] to i64 +; CHECK-NEXT: br label [[__NODES_WEIGHT_EXIT]] +; CHECK: __nodes_weight.exit: +; CHECK-NEXT: [[COND_I_I:%.*]] = phi i64 [ [[AND285_I_I]], [[COND_TRUE_I_I]] ], [ [[CONV286_I_I]], [[COND_FALSE_I_I]] ] +; CHECK-NEXT: [[CONV287_I_I:%.*]] = trunc i64 [[COND_I_I]] to i32 +; CHECK-NEXT: store i32 [[CONV287_I_I]], i32* [[__TRANS_TMP_1_I]], align 4 +; CHECK-NEXT: [[TMP66:%.*]] = load i32, i32* [[__TRANS_TMP_1_I]], align 4 +; CHECK-NEXT: [[TMP67:%.*]] = bitcast i32* [[__TRANS_TMP_1_I]] to i8* +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP67]]) +; CHECK-NEXT: br label [[IF_END7:%.*]] +; CHECK: if.end7: +; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[RETVAL]], align 4 +; CHECK-NEXT: ret i32 [[LOAD]] + %retval = alloca i32, align 4 + %call6 = call i32 @__nodes_weight() + br label %if.end7 + +if.end7: ; preds = %if.then5, %if.end4, %if.then3, %if.then + %load = load i32, i32* %retval, align 4 + ret i32 %load +} + +declare dso_local i32 @early_pci_allowed(...) +declare dso_local i32 @find_northbridge(...) + +define hidden i32 @hweight_long() { +; CHECK-LABEL: @hweight_long( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.constant.i64(i64 [[TMP0]]) +; CHECK-NEXT: br i1 [[TMP1]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +; CHECK: cond.true: +; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND:%.*]] = and i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[AND]], [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND1:%.*]] = and i64 [[TMP4]], 2 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i64 [[AND1]], 0 +; CHECK-NEXT: [[LNOT:%.*]] = xor i1 [[TOBOOL]], true +; CHECK-NEXT: [[LNOT_EXT:%.*]] = zext i1 [[LNOT]] to i32 +; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[LNOT_EXT]] to i64 +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i64 [[ADD]], [[CONV]] +; CHECK-NEXT: [[TMP5:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND3:%.*]] = and i64 [[TMP5]], 3 +; CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i64 [[AND3]], 0 +; CHECK-NEXT: [[LNOT5:%.*]] = xor i1 [[TOBOOL4]], true +; CHECK-NEXT: [[LNOT_EXT6:%.*]] = zext i1 [[LNOT5]] to i32 +; CHECK-NEXT: [[CONV7:%.*]] = sext i32 [[LNOT_EXT6]] to i64 +; CHECK-NEXT: [[ADD8:%.*]] = add nsw i64 [[ADD2]], [[CONV7]] +; CHECK-NEXT: [[TMP6:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND9:%.*]] = and i64 [[TMP6]], 4 +; CHECK-NEXT: [[TOBOOL10:%.*]] = icmp ne i64 [[AND9]], 0 +; CHECK-NEXT: [[LNOT11:%.*]] = xor i1 [[TOBOOL10]], true +; CHECK-NEXT: [[LNOT13:%.*]] = xor i1 [[LNOT11]], true +; CHECK-NEXT: [[LNOT_EXT14:%.*]] = zext i1 [[LNOT13]] to i32 +; CHECK-NEXT: [[CONV15:%.*]] = sext i32 [[LNOT_EXT14]] to i64 +; CHECK-NEXT: [[ADD16:%.*]] = add nsw i64 [[ADD8]], [[CONV15]] +; CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL17:%.*]] = icmp ne i64 [[TMP7]], 0 +; CHECK-NEXT: [[LNOT18:%.*]] = xor i1 [[TOBOOL17]], true +; CHECK-NEXT: [[LNOT20:%.*]] = xor i1 [[LNOT18]], true +; CHECK-NEXT: [[LNOT_EXT21:%.*]] = zext i1 [[LNOT20]] to i32 +; CHECK-NEXT: [[CONV22:%.*]] = sext i32 [[LNOT_EXT21]] to i64 +; CHECK-NEXT: [[ADD23:%.*]] = add nsw i64 [[ADD16]], [[CONV22]] +; CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[TMP8]], 6 +; CHECK-NEXT: [[TOBOOL24:%.*]] = icmp ne i64 [[SHL]], 0 +; CHECK-NEXT: [[LNOT25:%.*]] = xor i1 [[TOBOOL24]], true +; CHECK-NEXT: [[LNOT_EXT26:%.*]] = zext i1 [[LNOT25]] to i32 +; CHECK-NEXT: [[TMP9:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP10:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD27:%.*]] = add nsw i64 7, [[TMP10]] +; CHECK-NEXT: [[SH_PROM:%.*]] = trunc i64 [[ADD27]] to i32 +; CHECK-NEXT: [[SHL28:%.*]] = shl i32 1, [[SH_PROM]] +; CHECK-NEXT: [[CONV29:%.*]] = sext i32 [[SHL28]] to i64 +; CHECK-NEXT: [[AND30:%.*]] = and i64 [[TMP9]], [[CONV29]] +; CHECK-NEXT: [[TOBOOL31:%.*]] = icmp ne i64 [[AND30]], 0 +; CHECK-NEXT: [[LNOT32:%.*]] = xor i1 [[TOBOOL31]], true +; CHECK-NEXT: [[LNOT_EXT33:%.*]] = zext i1 [[LNOT32]] to i32 +; CHECK-NEXT: [[ADD34:%.*]] = add nsw i32 [[LNOT_EXT26]], [[LNOT_EXT33]] +; CHECK-NEXT: [[TMP11:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR:%.*]] = ashr i64 [[TMP11]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL35:%.*]] = shl i64 [[SHR]], [[TMP12]] +; CHECK-NEXT: [[SHR36:%.*]] = ashr i64 [[SHL35]], 8 +; CHECK-NEXT: [[SHL37:%.*]] = shl i64 [[SHR36]], 2 +; CHECK-NEXT: [[TOBOOL38:%.*]] = icmp ne i64 [[SHL37]], 0 +; CHECK-NEXT: [[LNOT39:%.*]] = xor i1 [[TOBOOL38]], true +; CHECK-NEXT: [[LNOT_EXT40:%.*]] = zext i1 [[LNOT39]] to i32 +; CHECK-NEXT: [[ADD41:%.*]] = add nsw i32 [[ADD34]], [[LNOT_EXT40]] +; CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR42:%.*]] = ashr i64 [[TMP13]], 3 +; CHECK-NEXT: [[TOBOOL43:%.*]] = icmp ne i64 [[SHR42]], 0 +; CHECK-NEXT: [[LNOT44:%.*]] = xor i1 [[TOBOOL43]], true +; CHECK-NEXT: [[LNOT_EXT45:%.*]] = zext i1 [[LNOT44]] to i32 +; CHECK-NEXT: [[ADD46:%.*]] = add nsw i32 [[ADD41]], [[LNOT_EXT45]] +; CHECK-NEXT: [[TMP14:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR47:%.*]] = ashr i64 [[TMP14]], 4 +; CHECK-NEXT: [[TOBOOL48:%.*]] = icmp ne i64 [[SHR47]], 0 +; CHECK-NEXT: [[LNOT49:%.*]] = xor i1 [[TOBOOL48]], true +; CHECK-NEXT: [[LNOT_EXT50:%.*]] = zext i1 [[LNOT49]] to i32 +; CHECK-NEXT: [[ADD51:%.*]] = add nsw i32 [[ADD46]], [[LNOT_EXT50]] +; CHECK-NEXT: [[ADD52:%.*]] = add nsw i32 [[ADD51]], 5 +; CHECK-NEXT: [[TMP15:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR53:%.*]] = ashr i64 [[TMP15]], 6 +; CHECK-NEXT: [[TOBOOL54:%.*]] = icmp ne i64 [[SHR53]], 0 +; CHECK-NEXT: [[LNOT55:%.*]] = xor i1 [[TOBOOL54]], true +; CHECK-NEXT: [[LNOT_EXT56:%.*]] = zext i1 [[LNOT55]] to i32 +; CHECK-NEXT: [[ADD57:%.*]] = add nsw i32 [[ADD52]], [[LNOT_EXT56]] +; CHECK-NEXT: [[TMP16:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR58:%.*]] = ashr i64 [[TMP16]], 7 +; CHECK-NEXT: [[TOBOOL59:%.*]] = icmp ne i64 [[SHR58]], 0 +; CHECK-NEXT: [[LNOT60:%.*]] = xor i1 [[TOBOOL59]], true +; CHECK-NEXT: [[LNOT_EXT61:%.*]] = zext i1 [[LNOT60]] to i32 +; CHECK-NEXT: [[ADD62:%.*]] = add nsw i32 [[ADD57]], [[LNOT_EXT61]] +; CHECK-NEXT: [[CONV63:%.*]] = sext i32 [[ADD62]] to i64 +; CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD64:%.*]] = add nsw i64 [[CONV63]], [[TMP17]] +; CHECK-NEXT: [[SHL65:%.*]] = shl i64 [[ADD23]], [[ADD64]] +; CHECK-NEXT: [[SHR66:%.*]] = ashr i64 [[SHL65]], 6 +; CHECK-NEXT: [[TMP18:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR67:%.*]] = ashr i64 [[TMP18]], 1 +; CHECK-NEXT: [[TOBOOL68:%.*]] = icmp ne i64 [[SHR67]], 0 +; CHECK-NEXT: [[LNOT69:%.*]] = xor i1 [[TOBOOL68]], true +; CHECK-NEXT: [[LNOT_EXT70:%.*]] = zext i1 [[LNOT69]] to i32 +; CHECK-NEXT: [[TMP19:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR71:%.*]] = ashr i64 [[TMP19]], 2 +; CHECK-NEXT: [[TOBOOL72:%.*]] = icmp ne i64 [[SHR71]], 0 +; CHECK-NEXT: [[LNOT73:%.*]] = xor i1 [[TOBOOL72]], true +; CHECK-NEXT: [[LNOT_EXT74:%.*]] = zext i1 [[LNOT73]] to i32 +; CHECK-NEXT: [[ADD75:%.*]] = add nsw i32 [[LNOT_EXT70]], [[LNOT_EXT74]] +; CHECK-NEXT: [[TMP20:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP21:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM76:%.*]] = trunc i64 [[TMP21]] to i32 +; CHECK-NEXT: [[SHL77:%.*]] = shl i32 1, [[SH_PROM76]] +; CHECK-NEXT: [[SHR78:%.*]] = ashr i32 [[SHL77]], 1 +; CHECK-NEXT: [[CONV79:%.*]] = sext i32 [[SHR78]] to i64 +; CHECK-NEXT: [[AND80:%.*]] = and i64 [[TMP20]], [[CONV79]] +; CHECK-NEXT: [[TOBOOL81:%.*]] = icmp ne i64 [[AND80]], 0 +; CHECK-NEXT: [[LNOT82:%.*]] = xor i1 [[TOBOOL81]], true +; CHECK-NEXT: [[LNOT_EXT83:%.*]] = zext i1 [[LNOT82]] to i32 +; CHECK-NEXT: [[ADD84:%.*]] = add nsw i32 [[ADD75]], [[LNOT_EXT83]] +; CHECK-NEXT: [[TMP22:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR85:%.*]] = ashr i64 [[TMP22]], 16 +; CHECK-NEXT: [[TOBOOL86:%.*]] = icmp ne i64 [[SHR85]], 0 +; CHECK-NEXT: [[LNOT87:%.*]] = xor i1 [[TOBOOL86]], true +; CHECK-NEXT: [[LNOT89:%.*]] = xor i1 [[LNOT87]], true +; CHECK-NEXT: [[LNOT_EXT90:%.*]] = zext i1 [[LNOT89]] to i32 +; CHECK-NEXT: [[ADD91:%.*]] = add nsw i32 [[ADD84]], [[LNOT_EXT90]] +; CHECK-NEXT: [[TMP23:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR92:%.*]] = ashr i64 [[TMP23]], 6 +; CHECK-NEXT: [[TOBOOL93:%.*]] = icmp ne i64 [[SHR92]], 0 +; CHECK-NEXT: [[LNOT94:%.*]] = xor i1 [[TOBOOL93]], true +; CHECK-NEXT: [[LNOT96:%.*]] = xor i1 [[LNOT94]], true +; CHECK-NEXT: [[LNOT_EXT97:%.*]] = zext i1 [[LNOT96]] to i32 +; CHECK-NEXT: [[ADD98:%.*]] = add nsw i32 [[ADD91]], [[LNOT_EXT97]] +; CHECK-NEXT: [[TMP24:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP25:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR99:%.*]] = ashr i64 [[TMP25]], 16 +; CHECK-NEXT: [[SHL100:%.*]] = shl i64 1, [[SHR99]] +; CHECK-NEXT: [[SHR101:%.*]] = lshr i64 [[SHL100]], 1 +; CHECK-NEXT: [[AND102:%.*]] = and i64 [[TMP24]], [[SHR101]] +; CHECK-NEXT: [[TOBOOL103:%.*]] = icmp ne i64 [[AND102]], 0 +; CHECK-NEXT: [[LNOT104:%.*]] = xor i1 [[TOBOOL103]], true +; CHECK-NEXT: [[LNOT_EXT105:%.*]] = zext i1 [[LNOT104]] to i32 +; CHECK-NEXT: [[ADD106:%.*]] = add nsw i32 [[ADD98]], [[LNOT_EXT105]] +; CHECK-NEXT: [[TMP26:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR107:%.*]] = ashr i64 [[TMP26]], 8 +; CHECK-NEXT: [[TMP27:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL108:%.*]] = shl i64 [[SHR107]], [[TMP27]] +; CHECK-NEXT: [[SHR109:%.*]] = ashr i64 [[SHL108]], 8 +; CHECK-NEXT: [[SHL110:%.*]] = shl i64 [[SHR109]], 16 +; CHECK-NEXT: [[SHR111:%.*]] = ashr i64 [[SHL110]], 8 +; CHECK-NEXT: [[TOBOOL112:%.*]] = icmp ne i64 [[SHR111]], 0 +; CHECK-NEXT: [[LNOT113:%.*]] = xor i1 [[TOBOOL112]], true +; CHECK-NEXT: [[LNOT_EXT114:%.*]] = zext i1 [[LNOT113]] to i32 +; CHECK-NEXT: [[ADD115:%.*]] = add nsw i32 [[ADD106]], [[LNOT_EXT114]] +; CHECK-NEXT: [[TMP28:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR116:%.*]] = ashr i64 [[TMP28]], 16 +; CHECK-NEXT: [[TOBOOL117:%.*]] = icmp ne i64 [[SHR116]], 0 +; CHECK-NEXT: [[LNOT118:%.*]] = xor i1 [[TOBOOL117]], true +; CHECK-NEXT: [[LNOT_EXT119:%.*]] = zext i1 [[LNOT118]] to i32 +; CHECK-NEXT: [[ADD120:%.*]] = add nsw i32 [[ADD115]], [[LNOT_EXT119]] +; CHECK-NEXT: [[TMP29:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR121:%.*]] = ashr i64 [[TMP29]], 6 +; CHECK-NEXT: [[SHR122:%.*]] = ashr i64 [[SHR121]], 5 +; CHECK-NEXT: [[TOBOOL123:%.*]] = icmp ne i64 [[SHR122]], 0 +; CHECK-NEXT: [[LNOT124:%.*]] = xor i1 [[TOBOOL123]], true +; CHECK-NEXT: [[LNOT_EXT125:%.*]] = zext i1 [[LNOT124]] to i32 +; CHECK-NEXT: [[ADD126:%.*]] = add nsw i32 [[ADD120]], [[LNOT_EXT125]] +; CHECK-NEXT: [[ADD127:%.*]] = add nsw i32 [[ADD126]], 6 +; CHECK-NEXT: [[CONV128:%.*]] = sext i32 [[ADD127]] to i64 +; CHECK-NEXT: [[TMP30:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD129:%.*]] = add nsw i64 [[CONV128]], [[TMP30]] +; CHECK-NEXT: [[TMP31:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR130:%.*]] = ashr i64 [[TMP31]], 32 +; CHECK-NEXT: [[AND131:%.*]] = and i64 [[SHR130]], 1 +; CHECK-NEXT: [[ADD132:%.*]] = add nsw i64 7, [[AND131]] +; CHECK-NEXT: [[TMP32:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR133:%.*]] = ashr i64 [[TMP32]], 32 +; CHECK-NEXT: [[TOBOOL134:%.*]] = icmp ne i64 [[SHR133]], 0 +; CHECK-NEXT: [[LNOT135:%.*]] = xor i1 [[TOBOOL134]], true +; CHECK-NEXT: [[LNOT_EXT136:%.*]] = zext i1 [[LNOT135]] to i32 +; CHECK-NEXT: [[CONV137:%.*]] = sext i32 [[LNOT_EXT136]] to i64 +; CHECK-NEXT: [[ADD138:%.*]] = add nsw i64 [[ADD132]], [[CONV137]] +; CHECK-NEXT: [[TMP33:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR139:%.*]] = ashr i64 [[TMP33]], 2 +; CHECK-NEXT: [[TOBOOL140:%.*]] = icmp ne i64 [[SHR139]], 0 +; CHECK-NEXT: [[LNOT141:%.*]] = xor i1 [[TOBOOL140]], true +; CHECK-NEXT: [[LNOT143:%.*]] = xor i1 [[LNOT141]], true +; CHECK-NEXT: [[LNOT_EXT144:%.*]] = zext i1 [[LNOT143]] to i32 +; CHECK-NEXT: [[CONV145:%.*]] = sext i32 [[LNOT_EXT144]] to i64 +; CHECK-NEXT: [[ADD146:%.*]] = add nsw i64 [[ADD138]], [[CONV145]] +; CHECK-NEXT: [[TMP34:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD147:%.*]] = add nsw i64 [[ADD146]], [[TMP34]] +; CHECK-NEXT: [[SHR148:%.*]] = ashr i64 [[ADD129]], [[ADD147]] +; CHECK-NEXT: [[AND149:%.*]] = and i64 [[SHR66]], [[SHR148]] +; CHECK-NEXT: [[TMP35:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD150:%.*]] = add nsw i64 3, [[TMP35]] +; CHECK-NEXT: [[TMP36:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD151:%.*]] = add nsw i64 [[ADD150]], [[TMP36]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 1, [[ADD151]] +; CHECK-NEXT: [[CONV152:%.*]] = zext i1 [[CMP]] to i32 +; CHECK-NEXT: [[CONV153:%.*]] = sext i32 [[CONV152]] to i64 +; CHECK-NEXT: [[TMP37:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[AND154:%.*]] = and i64 [[TMP37]], 6 +; CHECK-NEXT: [[TOBOOL155:%.*]] = icmp ne i64 [[AND154]], 0 +; CHECK-NEXT: [[LNOT156:%.*]] = xor i1 [[TOBOOL155]], true +; CHECK-NEXT: [[LNOT_EXT157:%.*]] = zext i1 [[LNOT156]] to i32 +; CHECK-NEXT: [[TMP38:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR158:%.*]] = ashr i64 [[TMP38]], 7 +; CHECK-NEXT: [[TOBOOL159:%.*]] = icmp ne i64 [[SHR158]], 0 +; CHECK-NEXT: [[LNOT160:%.*]] = xor i1 [[TOBOOL159]], true +; CHECK-NEXT: [[LNOT162:%.*]] = xor i1 [[LNOT160]], true +; CHECK-NEXT: [[LNOT_EXT163:%.*]] = zext i1 [[LNOT162]] to i32 +; CHECK-NEXT: [[ADD164:%.*]] = add nsw i32 [[LNOT_EXT157]], [[LNOT_EXT163]] +; CHECK-NEXT: [[CONV165:%.*]] = sext i32 [[ADD164]] to i64 +; CHECK-NEXT: [[TMP39:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR166:%.*]] = ashr i64 [[TMP39]], 2 +; CHECK-NEXT: [[ADD167:%.*]] = add nsw i64 [[CONV165]], [[SHR166]] +; CHECK-NEXT: [[TMP40:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHL168:%.*]] = shl i64 [[TMP40]], 1 +; CHECK-NEXT: [[TOBOOL169:%.*]] = icmp ne i64 [[SHL168]], 0 +; CHECK-NEXT: [[LNOT170:%.*]] = xor i1 [[TOBOOL169]], true +; CHECK-NEXT: [[LNOT_EXT171:%.*]] = zext i1 [[LNOT170]] to i32 +; CHECK-NEXT: [[CONV172:%.*]] = sext i32 [[LNOT_EXT171]] to i64 +; CHECK-NEXT: [[ADD173:%.*]] = add nsw i64 [[ADD167]], [[CONV172]] +; CHECK-NEXT: [[TMP41:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR174:%.*]] = ashr i64 [[TMP41]], 32 +; CHECK-NEXT: [[SHR175:%.*]] = ashr i64 [[SHR174]], 2 +; CHECK-NEXT: [[TOBOOL176:%.*]] = icmp ne i64 [[SHR175]], 0 +; CHECK-NEXT: [[LNOT177:%.*]] = xor i1 [[TOBOOL176]], true +; CHECK-NEXT: [[LNOT_EXT178:%.*]] = zext i1 [[LNOT177]] to i32 +; CHECK-NEXT: [[CONV179:%.*]] = sext i32 [[LNOT_EXT178]] to i64 +; CHECK-NEXT: [[ADD180:%.*]] = add nsw i64 [[ADD173]], [[CONV179]] +; CHECK-NEXT: [[TMP42:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR181:%.*]] = ashr i64 [[TMP42]], 2 +; CHECK-NEXT: [[SHR182:%.*]] = ashr i64 [[SHR181]], 3 +; CHECK-NEXT: [[TOBOOL183:%.*]] = icmp ne i64 [[SHR182]], 0 +; CHECK-NEXT: [[LNOT184:%.*]] = xor i1 [[TOBOOL183]], true +; CHECK-NEXT: [[LNOT_EXT185:%.*]] = zext i1 [[LNOT184]] to i32 +; CHECK-NEXT: [[CONV186:%.*]] = sext i32 [[LNOT_EXT185]] to i64 +; CHECK-NEXT: [[ADD187:%.*]] = add nsw i64 [[ADD180]], [[CONV186]] +; CHECK-NEXT: [[TMP43:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR188:%.*]] = ashr i64 [[TMP43]], 2 +; CHECK-NEXT: [[SHR189:%.*]] = ashr i64 [[SHR188]], 4 +; CHECK-NEXT: [[TOBOOL190:%.*]] = icmp ne i64 [[SHR189]], 0 +; CHECK-NEXT: [[LNOT191:%.*]] = xor i1 [[TOBOOL190]], true +; CHECK-NEXT: [[LNOT_EXT192:%.*]] = zext i1 [[LNOT191]] to i32 +; CHECK-NEXT: [[CONV193:%.*]] = sext i32 [[LNOT_EXT192]] to i64 +; CHECK-NEXT: [[ADD194:%.*]] = add nsw i64 [[ADD187]], [[CONV193]] +; CHECK-NEXT: [[TMP44:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR195:%.*]] = ashr i64 [[TMP44]], 2 +; CHECK-NEXT: [[SHR196:%.*]] = ashr i64 [[SHR195]], 5 +; CHECK-NEXT: [[TOBOOL197:%.*]] = icmp ne i64 [[SHR196]], 0 +; CHECK-NEXT: [[LNOT198:%.*]] = xor i1 [[TOBOOL197]], true +; CHECK-NEXT: [[LNOT_EXT199:%.*]] = zext i1 [[LNOT198]] to i32 +; CHECK-NEXT: [[CONV200:%.*]] = sext i32 [[LNOT_EXT199]] to i64 +; CHECK-NEXT: [[ADD201:%.*]] = add nsw i64 [[ADD194]], [[CONV200]] +; CHECK-NEXT: [[TMP45:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD202:%.*]] = add nsw i64 [[ADD201]], [[TMP45]] +; CHECK-NEXT: [[SHR203:%.*]] = ashr i64 [[ADD202]], 2 +; CHECK-NEXT: [[TMP46:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL204:%.*]] = icmp ne i64 [[TMP46]], 0 +; CHECK-NEXT: [[LNOT205:%.*]] = xor i1 [[TOBOOL204]], true +; CHECK-NEXT: [[LNOT_EXT206:%.*]] = zext i1 [[LNOT205]] to i32 +; CHECK-NEXT: [[ADD207:%.*]] = add nsw i32 6, [[LNOT_EXT206]] +; CHECK-NEXT: [[SH_PROM208:%.*]] = zext i32 [[ADD207]] to i64 +; CHECK-NEXT: [[SHR209:%.*]] = ashr i64 [[SHR203]], [[SH_PROM208]] +; CHECK-NEXT: [[SHR210:%.*]] = ashr i64 [[SHR209]], 2 +; CHECK-NEXT: [[TMP47:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR211:%.*]] = ashr i64 [[TMP47]], 2 +; CHECK-NEXT: [[SHR212:%.*]] = ashr i64 [[SHR211]], 16 +; CHECK-NEXT: [[AND213:%.*]] = and i64 [[SHR212]], 1 +; CHECK-NEXT: [[ADD214:%.*]] = add nsw i64 7, [[AND213]] +; CHECK-NEXT: [[TMP48:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[CMP215:%.*]] = icmp slt i64 [[TMP48]], 1 +; CHECK-NEXT: [[CONV216:%.*]] = zext i1 [[CMP215]] to i32 +; CHECK-NEXT: [[CONV217:%.*]] = sext i32 [[CONV216]] to i64 +; CHECK-NEXT: [[ADD218:%.*]] = add nsw i64 [[ADD214]], [[CONV217]] +; CHECK-NEXT: [[TMP49:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP50:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM219:%.*]] = trunc i64 [[TMP50]] to i32 +; CHECK-NEXT: [[SHL220:%.*]] = shl i32 6, [[SH_PROM219]] +; CHECK-NEXT: [[SHR221:%.*]] = ashr i32 [[SHL220]], 1 +; CHECK-NEXT: [[CONV222:%.*]] = sext i32 [[SHR221]] to i64 +; CHECK-NEXT: [[CMP223:%.*]] = icmp sgt i64 [[TMP49]], [[CONV222]] +; CHECK-NEXT: [[CONV224:%.*]] = zext i1 [[CMP223]] to i32 +; CHECK-NEXT: [[CONV225:%.*]] = sext i32 [[CONV224]] to i64 +; CHECK-NEXT: [[ADD226:%.*]] = add nsw i64 [[ADD218]], [[CONV225]] +; CHECK-NEXT: [[TMP51:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP52:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SH_PROM227:%.*]] = trunc i64 [[TMP52]] to i32 +; CHECK-NEXT: [[SHL228:%.*]] = shl i32 1, [[SH_PROM227]] +; CHECK-NEXT: [[SHR229:%.*]] = ashr i32 [[SHL228]], 5 +; CHECK-NEXT: [[CONV230:%.*]] = sext i32 [[SHR229]] to i64 +; CHECK-NEXT: [[AND231:%.*]] = and i64 [[TMP51]], [[CONV230]] +; CHECK-NEXT: [[TOBOOL232:%.*]] = icmp ne i64 [[AND231]], 0 +; CHECK-NEXT: [[LNOT233:%.*]] = xor i1 [[TOBOOL232]], true +; CHECK-NEXT: [[LNOT_EXT234:%.*]] = zext i1 [[LNOT233]] to i32 +; CHECK-NEXT: [[CONV235:%.*]] = sext i32 [[LNOT_EXT234]] to i64 +; CHECK-NEXT: [[ADD236:%.*]] = add nsw i64 [[ADD226]], [[CONV235]] +; CHECK-NEXT: [[TMP53:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR237:%.*]] = ashr i64 [[TMP53]], 1 +; CHECK-NEXT: [[TOBOOL238:%.*]] = icmp ne i64 [[SHR237]], 0 +; CHECK-NEXT: [[LNOT239:%.*]] = xor i1 [[TOBOOL238]], true +; CHECK-NEXT: [[LNOT241:%.*]] = xor i1 [[LNOT239]], true +; CHECK-NEXT: [[LNOT_EXT242:%.*]] = zext i1 [[LNOT241]] to i32 +; CHECK-NEXT: [[CONV243:%.*]] = sext i32 [[LNOT_EXT242]] to i64 +; CHECK-NEXT: [[ADD244:%.*]] = add nsw i64 [[ADD236]], [[CONV243]] +; CHECK-NEXT: [[TMP54:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP55:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP56:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD245:%.*]] = add nsw i64 2, [[TMP56]] +; CHECK-NEXT: [[SHR246:%.*]] = ashr i64 [[TMP55]], [[ADD245]] +; CHECK-NEXT: [[SHR247:%.*]] = ashr i64 [[SHR246]], 6 +; CHECK-NEXT: [[SHL248:%.*]] = shl i64 [[SHR247]], 1 +; CHECK-NEXT: [[AND249:%.*]] = and i64 [[TMP54]], [[SHL248]] +; CHECK-NEXT: [[TOBOOL250:%.*]] = icmp ne i64 [[AND249]], 0 +; CHECK-NEXT: [[LNOT251:%.*]] = xor i1 [[TOBOOL250]], true +; CHECK-NEXT: [[LNOT_EXT252:%.*]] = zext i1 [[LNOT251]] to i32 +; CHECK-NEXT: [[CONV253:%.*]] = sext i32 [[LNOT_EXT252]] to i64 +; CHECK-NEXT: [[ADD254:%.*]] = add nsw i64 [[ADD244]], [[CONV253]] +; CHECK-NEXT: [[TMP57:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TMP58:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD255:%.*]] = add nsw i64 8, [[TMP58]] +; CHECK-NEXT: [[SHR256:%.*]] = ashr i64 [[TMP57]], [[ADD255]] +; CHECK-NEXT: [[AND257:%.*]] = and i64 [[SHR256]], 3 +; CHECK-NEXT: [[TOBOOL258:%.*]] = icmp ne i64 [[AND257]], 0 +; CHECK-NEXT: [[LNOT259:%.*]] = xor i1 [[TOBOOL258]], true +; CHECK-NEXT: [[LNOT_EXT260:%.*]] = zext i1 [[LNOT259]] to i32 +; CHECK-NEXT: [[CONV261:%.*]] = sext i32 [[LNOT_EXT260]] to i64 +; CHECK-NEXT: [[ADD262:%.*]] = add nsw i64 [[ADD254]], [[CONV261]] +; CHECK-NEXT: [[TMP59:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[TOBOOL263:%.*]] = icmp ne i64 [[TMP59]], 0 +; CHECK-NEXT: [[LNOT264:%.*]] = xor i1 [[TOBOOL263]], true +; CHECK-NEXT: [[LNOT_EXT265:%.*]] = zext i1 [[LNOT264]] to i32 +; CHECK-NEXT: [[CONV266:%.*]] = sext i32 [[LNOT_EXT265]] to i64 +; CHECK-NEXT: [[ADD267:%.*]] = add nsw i64 [[ADD262]], [[CONV266]] +; CHECK-NEXT: [[SHR268:%.*]] = ashr i64 [[SHR210]], [[ADD267]] +; CHECK-NEXT: [[SHR269:%.*]] = ashr i64 [[SHR268]], 6 +; CHECK-NEXT: [[TMP60:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD270:%.*]] = add nsw i64 4, [[TMP60]] +; CHECK-NEXT: [[SHR271:%.*]] = ashr i64 [[SHR269]], [[ADD270]] +; CHECK-NEXT: [[SHR272:%.*]] = ashr i64 [[SHR271]], 2 +; CHECK-NEXT: [[TMP61:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[SHR273:%.*]] = ashr i64 [[TMP61]], 6 +; CHECK-NEXT: [[TMP62:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[ADD274:%.*]] = add nsw i64 6, [[TMP62]] +; CHECK-NEXT: [[SHR275:%.*]] = ashr i64 [[SHR273]], [[ADD274]] +; CHECK-NEXT: [[AND276:%.*]] = and i64 [[SHR275]], 7 +; CHECK-NEXT: [[TOBOOL277:%.*]] = icmp ne i64 [[AND276]], 0 +; CHECK-NEXT: [[LNOT278:%.*]] = xor i1 [[TOBOOL277]], true +; CHECK-NEXT: [[LNOT_EXT279:%.*]] = zext i1 [[LNOT278]] to i32 +; CHECK-NEXT: [[SH_PROM280:%.*]] = zext i32 [[LNOT_EXT279]] to i64 +; CHECK-NEXT: [[SHR281:%.*]] = ashr i64 [[SHR272]], [[SH_PROM280]] +; CHECK-NEXT: [[CMP282:%.*]] = icmp sgt i64 [[CONV153]], [[SHR281]] +; CHECK-NEXT: [[CONV283:%.*]] = zext i1 [[CMP282]] to i32 +; CHECK-NEXT: [[CONV284:%.*]] = sext i32 [[CONV283]] to i64 +; CHECK-NEXT: [[AND285:%.*]] = and i64 [[AND149]], [[CONV284]] +; CHECK-NEXT: br label [[COND_END:%.*]] +; CHECK: cond.false: +; CHECK-NEXT: [[TMP63:%.*]] = load i64, i64* @hweight_long_w, align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 (i64, ...) bitcast (i32 (...)* @__arch_hweight64 to i32 (i64, ...)*)(i64 [[TMP63]]) +; CHECK-NEXT: [[CONV286:%.*]] = sext i32 [[CALL]] to i64 +; CHECK-NEXT: br label [[COND_END]] +; CHECK: cond.end: +; CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[AND285]], [[COND_TRUE]] ], [ [[CONV286]], [[COND_FALSE]] ] +; CHECK-NEXT: [[CONV287:%.*]] = trunc i64 [[COND]] to i32 +; CHECK-NEXT: ret i32 [[CONV287]] +; +entry: + %0 = load i64, i64* @hweight_long_w, align 8 + %1 = call i1 @llvm.is.constant.i64(i64 %0) + br i1 %1, label %cond.true, label %cond.false + +cond.true: ; preds = %entry + %2 = load i64, i64* @hweight_long_w, align 8 + %and = and i64 %2, 1 + %3 = load i64, i64* @hweight_long_w, align 8 + %add = add nsw i64 %and, %3 + %4 = load i64, i64* @hweight_long_w, align 8 + %and1 = and i64 %4, 2 + %tobool = icmp ne i64 %and1, 0 + %lnot = xor i1 %tobool, true + %lnot.ext = zext i1 %lnot to i32 + %conv = sext i32 %lnot.ext to i64 + %add2 = add nsw i64 %add, %conv + %5 = load i64, i64* @hweight_long_w, align 8 + %and3 = and i64 %5, 3 + %tobool4 = icmp ne i64 %and3, 0 + %lnot5 = xor i1 %tobool4, true + %lnot.ext6 = zext i1 %lnot5 to i32 + %conv7 = sext i32 %lnot.ext6 to i64 + %add8 = add nsw i64 %add2, %conv7 + %6 = load i64, i64* @hweight_long_w, align 8 + %and9 = and i64 %6, 4 + %tobool10 = icmp ne i64 %and9, 0 + %lnot11 = xor i1 %tobool10, true + %lnot13 = xor i1 %lnot11, true + %lnot.ext14 = zext i1 %lnot13 to i32 + %conv15 = sext i32 %lnot.ext14 to i64 + %add16 = add nsw i64 %add8, %conv15 + %7 = load i64, i64* @hweight_long_w, align 8 + %tobool17 = icmp ne i64 %7, 0 + %lnot18 = xor i1 %tobool17, true + %lnot20 = xor i1 %lnot18, true + %lnot.ext21 = zext i1 %lnot20 to i32 + %conv22 = sext i32 %lnot.ext21 to i64 + %add23 = add nsw i64 %add16, %conv22 + %8 = load i64, i64* @hweight_long_w, align 8 + %shl = shl i64 %8, 6 + %tobool24 = icmp ne i64 %shl, 0 + %lnot25 = xor i1 %tobool24, true + %lnot.ext26 = zext i1 %lnot25 to i32 + %9 = load i64, i64* @hweight_long_w, align 8 + %10 = load i64, i64* @hweight_long_w, align 8 + %add27 = add nsw i64 7, %10 + %sh_prom = trunc i64 %add27 to i32 + %shl28 = shl i32 1, %sh_prom + %conv29 = sext i32 %shl28 to i64 + %and30 = and i64 %9, %conv29 + %tobool31 = icmp ne i64 %and30, 0 + %lnot32 = xor i1 %tobool31, true + %lnot.ext33 = zext i1 %lnot32 to i32 + %add34 = add nsw i32 %lnot.ext26, %lnot.ext33 + %11 = load i64, i64* @hweight_long_w, align 8 + %shr = ashr i64 %11, 1 + %12 = load i64, i64* @hweight_long_w, align 8 + %shl35 = shl i64 %shr, %12 + %shr36 = ashr i64 %shl35, 8 + %shl37 = shl i64 %shr36, 2 + %tobool38 = icmp ne i64 %shl37, 0 + %lnot39 = xor i1 %tobool38, true + %lnot.ext40 = zext i1 %lnot39 to i32 + %add41 = add nsw i32 %add34, %lnot.ext40 + %13 = load i64, i64* @hweight_long_w, align 8 + %shr42 = ashr i64 %13, 3 + %tobool43 = icmp ne i64 %shr42, 0 + %lnot44 = xor i1 %tobool43, true + %lnot.ext45 = zext i1 %lnot44 to i32 + %add46 = add nsw i32 %add41, %lnot.ext45 + %14 = load i64, i64* @hweight_long_w, align 8 + %shr47 = ashr i64 %14, 4 + %tobool48 = icmp ne i64 %shr47, 0 + %lnot49 = xor i1 %tobool48, true + %lnot.ext50 = zext i1 %lnot49 to i32 + %add51 = add nsw i32 %add46, %lnot.ext50 + %add52 = add nsw i32 %add51, 5 + %15 = load i64, i64* @hweight_long_w, align 8 + %shr53 = ashr i64 %15, 6 + %tobool54 = icmp ne i64 %shr53, 0 + %lnot55 = xor i1 %tobool54, true + %lnot.ext56 = zext i1 %lnot55 to i32 + %add57 = add nsw i32 %add52, %lnot.ext56 + %16 = load i64, i64* @hweight_long_w, align 8 + %shr58 = ashr i64 %16, 7 + %tobool59 = icmp ne i64 %shr58, 0 + %lnot60 = xor i1 %tobool59, true + %lnot.ext61 = zext i1 %lnot60 to i32 + %add62 = add nsw i32 %add57, %lnot.ext61 + %conv63 = sext i32 %add62 to i64 + %17 = load i64, i64* @hweight_long_w, align 8 + %add64 = add nsw i64 %conv63, %17 + %shl65 = shl i64 %add23, %add64 + %shr66 = ashr i64 %shl65, 6 + %18 = load i64, i64* @hweight_long_w, align 8 + %shr67 = ashr i64 %18, 1 + %tobool68 = icmp ne i64 %shr67, 0 + %lnot69 = xor i1 %tobool68, true + %lnot.ext70 = zext i1 %lnot69 to i32 + %19 = load i64, i64* @hweight_long_w, align 8 + %shr71 = ashr i64 %19, 2 + %tobool72 = icmp ne i64 %shr71, 0 + %lnot73 = xor i1 %tobool72, true + %lnot.ext74 = zext i1 %lnot73 to i32 + %add75 = add nsw i32 %lnot.ext70, %lnot.ext74 + %20 = load i64, i64* @hweight_long_w, align 8 + %21 = load i64, i64* @hweight_long_w, align 8 + %sh_prom76 = trunc i64 %21 to i32 + %shl77 = shl i32 1, %sh_prom76 + %shr78 = ashr i32 %shl77, 1 + %conv79 = sext i32 %shr78 to i64 + %and80 = and i64 %20, %conv79 + %tobool81 = icmp ne i64 %and80, 0 + %lnot82 = xor i1 %tobool81, true + %lnot.ext83 = zext i1 %lnot82 to i32 + %add84 = add nsw i32 %add75, %lnot.ext83 + %22 = load i64, i64* @hweight_long_w, align 8 + %shr85 = ashr i64 %22, 16 + %tobool86 = icmp ne i64 %shr85, 0 + %lnot87 = xor i1 %tobool86, true + %lnot89 = xor i1 %lnot87, true + %lnot.ext90 = zext i1 %lnot89 to i32 + %add91 = add nsw i32 %add84, %lnot.ext90 + %23 = load i64, i64* @hweight_long_w, align 8 + %shr92 = ashr i64 %23, 6 + %tobool93 = icmp ne i64 %shr92, 0 + %lnot94 = xor i1 %tobool93, true + %lnot96 = xor i1 %lnot94, true + %lnot.ext97 = zext i1 %lnot96 to i32 + %add98 = add nsw i32 %add91, %lnot.ext97 + %24 = load i64, i64* @hweight_long_w, align 8 + %25 = load i64, i64* @hweight_long_w, align 8 + %shr99 = ashr i64 %25, 16 + %shl100 = shl i64 1, %shr99 + %shr101 = lshr i64 %shl100, 1 + %and102 = and i64 %24, %shr101 + %tobool103 = icmp ne i64 %and102, 0 + %lnot104 = xor i1 %tobool103, true + %lnot.ext105 = zext i1 %lnot104 to i32 + %add106 = add nsw i32 %add98, %lnot.ext105 + %26 = load i64, i64* @hweight_long_w, align 8 + %shr107 = ashr i64 %26, 8 + %27 = load i64, i64* @hweight_long_w, align 8 + %shl108 = shl i64 %shr107, %27 + %shr109 = ashr i64 %shl108, 8 + %shl110 = shl i64 %shr109, 16 + %shr111 = ashr i64 %shl110, 8 + %tobool112 = icmp ne i64 %shr111, 0 + %lnot113 = xor i1 %tobool112, true + %lnot.ext114 = zext i1 %lnot113 to i32 + %add115 = add nsw i32 %add106, %lnot.ext114 + %28 = load i64, i64* @hweight_long_w, align 8 + %shr116 = ashr i64 %28, 16 + %tobool117 = icmp ne i64 %shr116, 0 + %lnot118 = xor i1 %tobool117, true + %lnot.ext119 = zext i1 %lnot118 to i32 + %add120 = add nsw i32 %add115, %lnot.ext119 + %29 = load i64, i64* @hweight_long_w, align 8 + %shr121 = ashr i64 %29, 6 + %shr122 = ashr i64 %shr121, 5 + %tobool123 = icmp ne i64 %shr122, 0 + %lnot124 = xor i1 %tobool123, true + %lnot.ext125 = zext i1 %lnot124 to i32 + %add126 = add nsw i32 %add120, %lnot.ext125 + %add127 = add nsw i32 %add126, 6 + %conv128 = sext i32 %add127 to i64 + %30 = load i64, i64* @hweight_long_w, align 8 + %add129 = add nsw i64 %conv128, %30 + %31 = load i64, i64* @hweight_long_w, align 8 + %shr130 = ashr i64 %31, 32 + %and131 = and i64 %shr130, 1 + %add132 = add nsw i64 7, %and131 + %32 = load i64, i64* @hweight_long_w, align 8 + %shr133 = ashr i64 %32, 32 + %tobool134 = icmp ne i64 %shr133, 0 + %lnot135 = xor i1 %tobool134, true + %lnot.ext136 = zext i1 %lnot135 to i32 + %conv137 = sext i32 %lnot.ext136 to i64 + %add138 = add nsw i64 %add132, %conv137 + %33 = load i64, i64* @hweight_long_w, align 8 + %shr139 = ashr i64 %33, 2 + %tobool140 = icmp ne i64 %shr139, 0 + %lnot141 = xor i1 %tobool140, true + %lnot143 = xor i1 %lnot141, true + %lnot.ext144 = zext i1 %lnot143 to i32 + %conv145 = sext i32 %lnot.ext144 to i64 + %add146 = add nsw i64 %add138, %conv145 + %34 = load i64, i64* @hweight_long_w, align 8 + %add147 = add nsw i64 %add146, %34 + %shr148 = ashr i64 %add129, %add147 + %and149 = and i64 %shr66, %shr148 + %35 = load i64, i64* @hweight_long_w, align 8 + %add150 = add nsw i64 3, %35 + %36 = load i64, i64* @hweight_long_w, align 8 + %add151 = add nsw i64 %add150, %36 + %cmp = icmp slt i64 1, %add151 + %conv152 = zext i1 %cmp to i32 + %conv153 = sext i32 %conv152 to i64 + %37 = load i64, i64* @hweight_long_w, align 8 + %and154 = and i64 %37, 6 + %tobool155 = icmp ne i64 %and154, 0 + %lnot156 = xor i1 %tobool155, true + %lnot.ext157 = zext i1 %lnot156 to i32 + %38 = load i64, i64* @hweight_long_w, align 8 + %shr158 = ashr i64 %38, 7 + %tobool159 = icmp ne i64 %shr158, 0 + %lnot160 = xor i1 %tobool159, true + %lnot162 = xor i1 %lnot160, true + %lnot.ext163 = zext i1 %lnot162 to i32 + %add164 = add nsw i32 %lnot.ext157, %lnot.ext163 + %conv165 = sext i32 %add164 to i64 + %39 = load i64, i64* @hweight_long_w, align 8 + %shr166 = ashr i64 %39, 2 + %add167 = add nsw i64 %conv165, %shr166 + %40 = load i64, i64* @hweight_long_w, align 8 + %shl168 = shl i64 %40, 1 + %tobool169 = icmp ne i64 %shl168, 0 + %lnot170 = xor i1 %tobool169, true + %lnot.ext171 = zext i1 %lnot170 to i32 + %conv172 = sext i32 %lnot.ext171 to i64 + %add173 = add nsw i64 %add167, %conv172 + %41 = load i64, i64* @hweight_long_w, align 8 + %shr174 = ashr i64 %41, 32 + %shr175 = ashr i64 %shr174, 2 + %tobool176 = icmp ne i64 %shr175, 0 + %lnot177 = xor i1 %tobool176, true + %lnot.ext178 = zext i1 %lnot177 to i32 + %conv179 = sext i32 %lnot.ext178 to i64 + %add180 = add nsw i64 %add173, %conv179 + %42 = load i64, i64* @hweight_long_w, align 8 + %shr181 = ashr i64 %42, 2 + %shr182 = ashr i64 %shr181, 3 + %tobool183 = icmp ne i64 %shr182, 0 + %lnot184 = xor i1 %tobool183, true + %lnot.ext185 = zext i1 %lnot184 to i32 + %conv186 = sext i32 %lnot.ext185 to i64 + %add187 = add nsw i64 %add180, %conv186 + %43 = load i64, i64* @hweight_long_w, align 8 + %shr188 = ashr i64 %43, 2 + %shr189 = ashr i64 %shr188, 4 + %tobool190 = icmp ne i64 %shr189, 0 + %lnot191 = xor i1 %tobool190, true + %lnot.ext192 = zext i1 %lnot191 to i32 + %conv193 = sext i32 %lnot.ext192 to i64 + %add194 = add nsw i64 %add187, %conv193 + %44 = load i64, i64* @hweight_long_w, align 8 + %shr195 = ashr i64 %44, 2 + %shr196 = ashr i64 %shr195, 5 + %tobool197 = icmp ne i64 %shr196, 0 + %lnot198 = xor i1 %tobool197, true + %lnot.ext199 = zext i1 %lnot198 to i32 + %conv200 = sext i32 %lnot.ext199 to i64 + %add201 = add nsw i64 %add194, %conv200 + %45 = load i64, i64* @hweight_long_w, align 8 + %add202 = add nsw i64 %add201, %45 + %shr203 = ashr i64 %add202, 2 + %46 = load i64, i64* @hweight_long_w, align 8 + %tobool204 = icmp ne i64 %46, 0 + %lnot205 = xor i1 %tobool204, true + %lnot.ext206 = zext i1 %lnot205 to i32 + %add207 = add nsw i32 6, %lnot.ext206 + %sh_prom208 = zext i32 %add207 to i64 + %shr209 = ashr i64 %shr203, %sh_prom208 + %shr210 = ashr i64 %shr209, 2 + %47 = load i64, i64* @hweight_long_w, align 8 + %shr211 = ashr i64 %47, 2 + %shr212 = ashr i64 %shr211, 16 + %and213 = and i64 %shr212, 1 + %add214 = add nsw i64 7, %and213 + %48 = load i64, i64* @hweight_long_w, align 8 + %cmp215 = icmp slt i64 %48, 1 + %conv216 = zext i1 %cmp215 to i32 + %conv217 = sext i32 %conv216 to i64 + %add218 = add nsw i64 %add214, %conv217 + %49 = load i64, i64* @hweight_long_w, align 8 + %50 = load i64, i64* @hweight_long_w, align 8 + %sh_prom219 = trunc i64 %50 to i32 + %shl220 = shl i32 6, %sh_prom219 + %shr221 = ashr i32 %shl220, 1 + %conv222 = sext i32 %shr221 to i64 + %cmp223 = icmp sgt i64 %49, %conv222 + %conv224 = zext i1 %cmp223 to i32 + %conv225 = sext i32 %conv224 to i64 + %add226 = add nsw i64 %add218, %conv225 + %51 = load i64, i64* @hweight_long_w, align 8 + %52 = load i64, i64* @hweight_long_w, align 8 + %sh_prom227 = trunc i64 %52 to i32 + %shl228 = shl i32 1, %sh_prom227 + %shr229 = ashr i32 %shl228, 5 + %conv230 = sext i32 %shr229 to i64 + %and231 = and i64 %51, %conv230 + %tobool232 = icmp ne i64 %and231, 0 + %lnot233 = xor i1 %tobool232, true + %lnot.ext234 = zext i1 %lnot233 to i32 + %conv235 = sext i32 %lnot.ext234 to i64 + %add236 = add nsw i64 %add226, %conv235 + %53 = load i64, i64* @hweight_long_w, align 8 + %shr237 = ashr i64 %53, 1 + %tobool238 = icmp ne i64 %shr237, 0 + %lnot239 = xor i1 %tobool238, true + %lnot241 = xor i1 %lnot239, true + %lnot.ext242 = zext i1 %lnot241 to i32 + %conv243 = sext i32 %lnot.ext242 to i64 + %add244 = add nsw i64 %add236, %conv243 + %54 = load i64, i64* @hweight_long_w, align 8 + %55 = load i64, i64* @hweight_long_w, align 8 + %56 = load i64, i64* @hweight_long_w, align 8 + %add245 = add nsw i64 2, %56 + %shr246 = ashr i64 %55, %add245 + %shr247 = ashr i64 %shr246, 6 + %shl248 = shl i64 %shr247, 1 + %and249 = and i64 %54, %shl248 + %tobool250 = icmp ne i64 %and249, 0 + %lnot251 = xor i1 %tobool250, true + %lnot.ext252 = zext i1 %lnot251 to i32 + %conv253 = sext i32 %lnot.ext252 to i64 + %add254 = add nsw i64 %add244, %conv253 + %57 = load i64, i64* @hweight_long_w, align 8 + %58 = load i64, i64* @hweight_long_w, align 8 + %add255 = add nsw i64 8, %58 + %shr256 = ashr i64 %57, %add255 + %and257 = and i64 %shr256, 3 + %tobool258 = icmp ne i64 %and257, 0 + %lnot259 = xor i1 %tobool258, true + %lnot.ext260 = zext i1 %lnot259 to i32 + %conv261 = sext i32 %lnot.ext260 to i64 + %add262 = add nsw i64 %add254, %conv261 + %59 = load i64, i64* @hweight_long_w, align 8 + %tobool263 = icmp ne i64 %59, 0 + %lnot264 = xor i1 %tobool263, true + %lnot.ext265 = zext i1 %lnot264 to i32 + %conv266 = sext i32 %lnot.ext265 to i64 + %add267 = add nsw i64 %add262, %conv266 + %shr268 = ashr i64 %shr210, %add267 + %shr269 = ashr i64 %shr268, 6 + %60 = load i64, i64* @hweight_long_w, align 8 + %add270 = add nsw i64 4, %60 + %shr271 = ashr i64 %shr269, %add270 + %shr272 = ashr i64 %shr271, 2 + %61 = load i64, i64* @hweight_long_w, align 8 + %shr273 = ashr i64 %61, 6 + %62 = load i64, i64* @hweight_long_w, align 8 + %add274 = add nsw i64 6, %62 + %shr275 = ashr i64 %shr273, %add274 + %and276 = and i64 %shr275, 7 + %tobool277 = icmp ne i64 %and276, 0 + %lnot278 = xor i1 %tobool277, true + %lnot.ext279 = zext i1 %lnot278 to i32 + %sh_prom280 = zext i32 %lnot.ext279 to i64 + %shr281 = ashr i64 %shr272, %sh_prom280 + %cmp282 = icmp sgt i64 %conv153, %shr281 + %conv283 = zext i1 %cmp282 to i32 + %conv284 = sext i32 %conv283 to i64 + %and285 = and i64 %and149, %conv284 + br label %cond.end + +cond.false: ; preds = %entry + %63 = load i64, i64* @hweight_long_w, align 8 + %call = call i32 (i64, ...) bitcast (i32 (...)* @__arch_hweight64 to i32 (i64, ...)*)(i64 %63) + %conv286 = sext i32 %call to i64 + br label %cond.end + +cond.end: ; preds = %cond.false, %cond.true + %cond = phi i64 [ %and285, %cond.true ], [ %conv286, %cond.false ] + %conv287 = trunc i64 %cond to i32 + ret i32 %conv287 +} + +declare i1 @llvm.is.constant.i64(i64) +declare dso_local i32 @__arch_hweight64(...)