Index: llvm/include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- llvm/include/llvm/Analysis/TargetTransformInfo.h +++ llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -587,6 +587,10 @@ /// considered as "Slow". enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware }; + /// Return true if the target would benefit from recognizing a table-based + /// implementation of cttz. + bool preferCTTZLowering() const; + /// Return true if the specified immediate is legal add immediate, that /// is the target has add instructions which can add a register with the /// immediate without having to materialize the immediate into a register. @@ -1487,6 +1491,7 @@ APInt &UndefElts2, APInt &UndefElts3, std::function SimplifyAndSetOp) = 0; + virtual bool preferCTTZLowering() = 0; virtual bool isLegalAddImmediate(int64_t Imm) = 0; virtual bool isLegalICmpImmediate(int64_t Imm) = 0; virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, @@ -1837,6 +1842,9 @@ IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, SimplifyAndSetOp); } + bool preferCTTZLowering() override { + return Impl.preferCTTZLowering(); + } bool isLegalAddImmediate(int64_t Imm) override { return Impl.isLegalAddImmediate(Imm); } Index: llvm/include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -193,6 +193,8 @@ void getPeelingPreferences(Loop *, ScalarEvolution &, TTI::PeelingPreferences &) const {} + bool preferCTTZLowering() const { return false; } + bool isLegalAddImmediate(int64_t Imm) const { return false; } bool isLegalICmpImmediate(int64_t Imm) const { return false; } Index: llvm/include/llvm/CodeGen/BasicTTIImpl.h =================================================================== --- llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -288,6 +288,10 @@ return nullptr; } + bool preferCTTZLowering() { + return getTLI()->preferCTTZLowering(); + } + bool isLegalAddImmediate(int64_t imm) { return getTLI()->isLegalAddImmediate(imm); } Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -2404,6 +2404,12 @@ return true; } + /// Return true if the target would benefit from recognizing a table-based + /// implementation of cttz. + virtual bool preferCTTZLowering() const { + return false; + } + /// Return true if the specified immediate is legal add immediate, that is the /// target has add instructions which can add a register with the immediate /// without having to materialize the immediate into a register. Index: llvm/lib/Analysis/TargetTransformInfo.cpp =================================================================== --- llvm/lib/Analysis/TargetTransformInfo.cpp +++ llvm/lib/Analysis/TargetTransformInfo.cpp @@ -323,6 +323,10 @@ return TTIImpl->getPeelingPreferences(L, SE, PP); } +bool TargetTransformInfo::preferCTTZLowering() const { + return TTIImpl->preferCTTZLowering(); +} + bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { return TTIImpl->isLegalAddImmediate(Imm); } Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -592,6 +592,7 @@ bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override; + bool preferCTTZLowering() const override; bool isLegalAddImmediate(int64_t) const override; bool isLegalICmpImmediate(int64_t) const override; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -12173,6 +12173,10 @@ return LLT(); } +bool AArch64TargetLowering::preferCTTZLowering() const { + return true; +} + // 12-bit optionally shifted immediates are legal for adds. bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { if (Immed == std::numeric_limits::min()) { Index: llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp =================================================================== --- llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp +++ llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp @@ -21,6 +21,7 @@ #include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" @@ -362,10 +363,172 @@ return false; } +static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul, + uint64_t Shift, uint64_t InputBits) { + unsigned Length = Table.getNumElements(); + uint64_t IntWidth = InputBits / CHAR_BIT; + if (Length < InputBits || Length > InputBits * 2) + return false; + + APInt Mask(InputBits, ((IntWidth << (InputBits - Shift)) - 1) << Shift); + unsigned Matched = 0; + + for (unsigned i = 0; i < Length; i++) { + uint64_t Element = Table.getElementAsInteger(i); + if (Element < InputBits && + (((Mul << Element) & Mask.getZExtValue()) >> Shift) == i) + Matched++; + } + + return Matched == InputBits; +} + +// Try to recognize table-based ctz implementation. +// E.g., an example in C (for more cases please see the llvm/tests): +// int f(unsigned x) { +// static const char table[32] = +// {0, 1, 28, 2, 29, 14, 24, 3, 30, +// 22, 20, 15, 25, 17, 4, 8, 31, 27, +// 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9}; +// return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; +// } +// this can be lowered to `cttz` instruction. +// There is also a special case when the element is 0. +// +// Here are some examples or IR for AARCH64 target: +// +// CASE 1: +// %sub = sub i32 0, %x +// %and = and i32 %sub, %x +// %mul = mul i32 %and, 125613361 +// %shr = lshr i32 %mul, 27 +// %idxprom = zext i32 %shr to i64 +// %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0, +// i64 %idxprom %0 = load i8, i8* %arrayidx, align 1, !tbaa !8 +// +// CASE 2: +// %sub = sub i32 0, %x +// %and = and i32 %sub, %x +// %mul = mul i32 %and, 72416175 +// %shr = lshr i32 %mul, 26 +// %idxprom = zext i32 %shr to i64 +// %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table, i64 +// 0, i64 %idxprom %0 = load i16, i16* %arrayidx, align 2, !tbaa !8 +// +// CASE 3: +// %sub = sub i32 0, %x +// %and = and i32 %sub, %x +// %mul = mul i32 %and, 81224991 +// %shr = lshr i32 %mul, 27 +// %idxprom = zext i32 %shr to i64 +// %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table, i64 +// 0, i64 %idxprom %0 = load i32, i32* %arrayidx, align 4, !tbaa !8 +// +// CASE 4: +// %sub = sub i64 0, %x +// %and = and i64 %sub, %x +// %mul = mul i64 %and, 283881067100198605 +// %shr = lshr i64 %mul, 58 +// %arrayidx = getelementptr inbounds [64 x i8], [64 x i8]* @table, i64 0, i64 +// %shr %0 = load i8, i8* %arrayidx, align 1, !tbaa !8 +// +// All this can be lowered to @llvm.cttz.i32/64 intrinsic. +static bool tryToRecognizeTableBasedCttz(Instruction &I) { + LoadInst *LI = dyn_cast(&I); + if (!LI) + return false; + + Type *ElType = LI->getPointerOperandType()->getPointerElementType(); + if (!ElType->isIntegerTy()) + return false; + + GetElementPtrInst *GEP = dyn_cast(LI->getPointerOperand()); + if (!GEP || !GEP->isInBounds() || GEP->getNumIndices() != 2) + return false; + + Type *GEPPointeeType = GEP->getPointerOperandType()->getPointerElementType(); + if (!GEPPointeeType->isArrayTy()) + return false; + + uint64_t ArraySize = GEPPointeeType->getArrayNumElements(); + if (ArraySize != 32 && ArraySize != 64) + return false; + + User *GEPUser = dyn_cast(GEP->getPointerOperand()); + if (!GEPUser) + return false; + + ConstantDataArray *ConstData = + dyn_cast(GEPUser->getOperand(0)); + if (!ConstData) + return false; + + Value *Idx1 = GEP->idx_begin()->get(); + Constant *Zero = dyn_cast(Idx1); + if (!Zero || !Zero->isZeroValue()) + return false; + + Value *Idx2 = std::next(GEP->idx_begin())->get(); + + bool ConstIsWide = !match(Idx2, m_ZExt(m_Value())); + + Value *X1; + uint64_t MulConst, ShiftConst; + // FIXME: AArch64 has i64 type for the GEP index, so this match will + // probably fail for other targets. + if (!match(Idx2, + m_ZExtOrSelf(m_LShr( + m_ZExtOrSelf(m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), + m_ConstantInt(MulConst))), + m_ConstantInt(ShiftConst))))) + return false; + + unsigned InputBits = ConstIsWide ? 64 : 32; + + // Shift should extract top 5..7 bits. + if (ShiftConst < InputBits - 7 || ShiftConst > InputBits - 5) + return false; + + Type *XType = X1->getType(); + if (!XType->isIntegerTy(InputBits)) + return false; + + if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits)) + return false; + + auto ZeroTableElem = ConstData->getElementAsInteger(0); + bool DefinedForZero = ZeroTableElem == InputBits; + + IRBuilder<> B(LI); + ConstantInt *BoolConst = B.getInt1(!DefinedForZero); + auto Cttz = B.CreateIntrinsic(Intrinsic::cttz, {XType}, {X1, BoolConst}); + Value *ZExtOrTrunc = nullptr; + + if (DefinedForZero) { + ZExtOrTrunc = B.CreateZExtOrTrunc(Cttz, ElType); + } else { + // If the value in elem 0 isn't the same as InputBits, we still want to + // produce the value from the table. + auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0)); + auto Select = + B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz); + + // NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target + // it should be handled as: `cttz(x) & (typeSize - 1)`. + + ZExtOrTrunc = B.CreateZExtOrTrunc(Select, ElType); + } + + LI->replaceAllUsesWith(ZExtOrTrunc); + + return true; +} + /// This is the entry point for folds that could be implemented in regular /// InstCombine, but they are separated because they are not expected to /// occur frequently and/or have more than a constant-length pattern match. -static bool foldUnusualPatterns(Function &F, DominatorTree &DT) { +static bool foldUnusualPatterns(Function &F, DominatorTree &DT, + TargetTransformInfo &TTI) { bool MadeChange = false; for (BasicBlock &BB : F) { // Ignore unreachable basic blocks. @@ -380,7 +543,9 @@ for (Instruction &I : make_range(BB.rbegin(), BB.rend())) { MadeChange |= foldAnyOrAllBitsSet(I); MadeChange |= foldGuardedFunnelShift(I, DT); - MadeChange |= tryToRecognizePopCount(I); + MadeChange |= tryToRecognizePopCount(I); + if (TTI.preferCTTZLowering()) + MadeChange |= tryToRecognizeTableBasedCttz(I); } } @@ -394,12 +559,13 @@ /// This is the entry point for all transforms. Pass manager differences are /// handled in the callers of this function. -static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) { +static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT, + TargetTransformInfo &TTI) { bool MadeChange = false; const DataLayout &DL = F.getParent()->getDataLayout(); TruncInstCombine TIC(TLI, DL, DT); MadeChange |= TIC.run(F); - MadeChange |= foldUnusualPatterns(F, DT); + MadeChange |= foldUnusualPatterns(F, DT, TTI); return MadeChange; } @@ -407,6 +573,7 @@ AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); @@ -417,14 +584,16 @@ bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) { auto &TLI = getAnalysis().getTLI(F); auto &DT = getAnalysis().getDomTree(); - return runImpl(F, TLI, DT); + auto &TTI = getAnalysis().getTTI(F); + return runImpl(F, TLI, DT, TTI); } PreservedAnalyses AggressiveInstCombinePass::run(Function &F, FunctionAnalysisManager &AM) { auto &TLI = AM.getResult(F); auto &DT = AM.getResult(F); - if (!runImpl(F, TLI, DT)) { + auto &TTI = AM.getResult(F); + if (!runImpl(F, TLI, DT, TTI)) { // No changes, all analyses are preserved. return PreservedAnalyses::all(); } @@ -439,6 +608,7 @@ "aggressive-instcombine", "Combine pattern based expressions", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine", "Combine pattern based expressions", false, false) Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/dereferencing-pointer.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/dereferencing-pointer.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -aggressive-instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s + +;; static const unsigned long long magic = 0x03f08c5392f756cdULL; +;; +;; static const int table[64] = { +;; 0, 1, 12, 2, 13, 22, 17, 3, +;; 14, 33, 23, 36, 18, 58, 28, 4, +;; 62, 15, 34, 26, 24, 48, 50, 37, +;; 19, 55, 59, 52, 29, 44, 39, 5, +;; 63, 11, 21, 16, 32, 35, 57, 27, +;; 61, 25, 47, 49, 54, 51, 43, 38, +;; 10, 20, 31, 56, 60, 46, 53, 42, +;; 9, 30, 45, 41, 8, 40, 7, 6, +;; }; +;; +;; int ctz6 (unsigned long long * const b) { +;; return table[(((*b) & -(*b)) * magic) >> 58]; +;; } + +; ModuleID = 'test.c' +source_filename = "test.c" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readonly uwtable willreturn +define dso_local i32 @ctz6(i64* nocapture readonly %b) { +; CHECK-LABEL: @ctz6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[B:%.*]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 true) +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[TMP4]] +; +entry: + %0 = load i64, i64* %b, align 8 + %sub = sub i64 0, %0 + %and = and i64 %0, %sub + %mul = mul i64 %and, 283881067100198605 + %shr = lshr i64 %mul, 58 + %arrayidx = getelementptr inbounds [64 x i32], [64 x i32]* @table, i64 0, i64 %shr + %1 = load i32, i32* %arrayidx, align 4 + ret i32 %1 +} + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 1, !"branch-target-enforcement", i32 0} +!2 = !{i32 1, !"sign-return-address", i32 0} +!3 = !{i32 1, !"sign-return-address-all", i32 0} +!4 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!5 = !{i32 7, !"uwtable", i32 1} +!6 = !{i32 7, !"frame-pointer", i32 1} +!7 = !{!"clang version 14.0.0"} Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/lit.local.cfg =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'AArch64' in config.root.targets: + config.unsupported = True Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/lower-table-based-ctz-basics.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/lower-table-based-ctz-basics.ll @@ -0,0 +1,300 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -aggressive-instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s + +;; C reproducers: +;; int ctz1 (unsigned x) +;; { +;; static const char table[32] = +;; { +;; 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, +;; 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 +;; }; +;; return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; +;; } + +;; int ctz2(unsigned x) +;; { +;; #define u 0 +;; static short table[64] = +;; { +;; 32, 0, 1, 12, 2, 6, u, 13, 3, u, 7, u, u, u, u, 14, +;; 10, 4, u, u, 8, u, u, 25, u, u, u, u, u, 21, 27, 15, +;; 31, 11, 5, u, u, u, u, u, 9, u, u, 24, u, u, 20, 26, +;; 30, u, u, u, u, 23, u, 19, 29, u, 22, 18, 28, 17, 16, u +;; }; +;; x = (x & -x) * 0x0450FBAF; +;; return table[x >> 26]; +;; } + +;; int ctz3(unsigned x) +;;{ +;; static int table[32] = +;; { +;; 0, 1, 2, 24, 3, 19, 6, 25, 22, 4, 20, 10, 16, 7, 12, 26, +;; 31, 23, 18, 5, 21, 9, 15, 11, 30, 17, 8, 14, 29, 13, 28, 27 +;; }; +;; if (x == 0) return 32; +;; x = (x & -x) * 0x04D7651F; +;; return table[x >> 27]; +;; } + +;; static const unsigned long long magic = 0x03f08c5392f756cdULL; +;; +;; static const int table[64] = { +;; 0, 1, 12, 2, 13, 22, 17, 3, 14, 33, 23, 36, 18, 58, 28, 4, +;; 62, 15, 34, 26, 24, 48, 50, 37, 19, 55, 59, 52, 29, 44, 39, 5, +;; 63, 11, 21, 16, 32, 35, 57, 27, 61, 25, 47, 49, 54, 51, 43, 38, +;; 10, 20, 31, 56, 60, 46, 53, 42, 9, 30, 45, 41, 8, 40, 7, 6, +;; }; +;; +;; int ctz4 (unsigned long long b) +;; { +;; unsigned long long lsb = b & -b; +;; return table[(lsb * magic) >> 58]; +;; } +;; +;; int ctz5(unsigned x) +;; { +;; static char table[32] = +;; { +;; 0, 1, 2, 24, 3, 19, 6, 25, 22, 4, 20, 10, 16, 7, 12, 26, +;; 31, 23, 18, 5, 21, 9, 15, 11, 30, 17, 8, 14, 29, 13, 28, 27 +;; }; +;; x = (x & -x)*0x04D7651F; +;; return table[x >> 27]; +;; } + +;; int indexes[] = { +;; 63, 0, 58, 1, 59, 47, 53, 2,60, 39, 48, 27, 54, 33, 42, 3, +;; 61, 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, +;; 62, 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, +;; 56, 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5 +;; }; +;; +;; int ctz6(unsigned long n) +;; { +;; return indexes[((n & (~n + 1)) * 0x07EDD5E59A4E28C2ull) >> 58]; +;; } +;; +;; int ctz7(unsigned x) +;; { +;; static const char table[32] = "\x00\x01\x1c\x02\x1d\x0e\x18\x03\x1e\x16\x14" +;; "\x0f\x19\x11\x04\b\x1f\x1b\r\x17\x15\x13\x10\x07\x1a\f\x12\x06\v\x05\n\t"; +;; return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; +;; } +;; +;; int ctz8(unsigned v) +;; { +;; static const int table[] = +;; { +;; 31 ,0 ,1 ,23 ,2 ,18 ,5 ,24 ,21 ,3 ,19 ,9 ,15 ,6 ,11 ,25 ,30 ,22 ,17 ,4 ,20 ;,8 ,14 ,10 ,29 ,16 ,7 ,13 ,28 ,12 ,27 ,26 +;; }; +;; unsigned x =(-v & v); +;; return table[(unsigned)(x * 0x9AECA3EU) >> 27]; +;; } + +; ModuleID = 'ctz.c' +source_filename = "ctz.c" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@ctz7.table = internal unnamed_addr constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1 + +define i32 @ctz1(i32 %x) { +; CHECK-LABEL: @ctz1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz7.table, i64 0, i64 %idxprom + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ctz7(i32 %x) { +; CHECK-LABEL: @ctz7( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz7.table, i64 0, i64 %idxprom + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +@ctz2.table = internal unnamed_addr constant [64 x i16] [i16 32, i16 0, i16 1, i16 12, i16 2, i16 6, i16 0, i16 13, i16 3, i16 0, i16 7, i16 0, i16 0, i16 0, i16 0, i16 14, i16 10, i16 4, i16 0, i16 0, i16 8, i16 0, i16 0, i16 25, i16 0, i16 0, i16 0, i16 0, i16 0, i16 21, i16 27, i16 15, i16 31, i16 11, i16 5, i16 0, i16 0, i16 0, i16 0, i16 0, i16 9, i16 0, i16 0, i16 24, i16 0, i16 0, i16 20, i16 26, i16 30, i16 0, i16 0, i16 0, i16 0, i16 23, i16 0, i16 19, i16 29, i16 0, i16 22, i16 18, i16 28, i16 17, i16 16, i16 0], align 2 + +define i32 @ctz2(i32 %x) { +; CHECK-LABEL: @ctz2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false) +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[TMP0]] to i16 +; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 72416175 + %shr = lshr i32 %mul, 26 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [64 x i16], [64 x i16]* @ctz2.table, i64 0, i64 %idxprom + %0 = load i16, i16* %arrayidx, align 2 + %conv = sext i16 %0 to i32 + ret i32 %conv +} + +@ctz3.table = internal unnamed_addr constant [32 x i32] [i32 0, i32 1, i32 2, i32 24, i32 3, i32 19, i32 6, i32 25, i32 22, i32 4, i32 20, i32 10, i32 16, i32 7, i32 12, i32 26, i32 31, i32 23, i32 18, i32 5, i32 21, i32 9, i32 15, i32 11, i32 30, i32 17, i32 8, i32 14, i32 29, i32 13, i32 28, i32 27], align 4 + +define i32 @ctz3(i32 %x) { +; CHECK-LABEL: @ctz3( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]] +; CHECK: if.end: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: br label [[RETURN]] +; CHECK: return: +; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[TMP0]], [[IF_END]] ], [ 32, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i32 [[RETVAL_0]] +; +entry: + %cmp = icmp eq i32 %x, 0 + br i1 %cmp, label %return, label %if.end + +if.end: ; preds = %entry + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 81224991 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz3.table, i64 0, i64 %idxprom + %0 = load i32, i32* %arrayidx, align 4 + br label %return + +return: ; preds = %entry, %if.end + %retval.0 = phi i32 [ %0, %if.end ], [ 32, %entry ] + ret i32 %retval.0 +} + +@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4 + +define i32 @ctz4(i64 %b) { +; CHECK-LABEL: @ctz4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.cttz.i64(i64 [[B:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[B]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 0, i64 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] +; +entry: + %sub = sub i64 0, %b + %and = and i64 %sub, %b + %mul = mul i64 %and, 283881067100198605 + %shr = lshr i64 %mul, 58 + %arrayidx = getelementptr inbounds [64 x i32], [64 x i32]* @table, i64 0, i64 %shr + %0 = load i32, i32* %arrayidx, align 4 + ret i32 %0 +} + +@ctz5.table = internal unnamed_addr constant [32 x i8] c"\00\01\02\18\03\13\06\19\16\04\14\0A\10\07\0C\1A\1F\17\12\05\15\09\0F\0B\1E\11\08\0E\1D\0D\1C\1B", align 1 + +define i32 @ctz5(i32 %x) { +; CHECK-LABEL: @ctz5( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 81224991 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz5.table, i64 0, i64 %idxprom + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +@ctz6.table = global [64 x i32] [i32 63, i32 0, i32 58, i32 1, i32 59, i32 47, i32 53, i32 2, i32 60, i32 39, i32 48, i32 27, i32 54, i32 33, i32 42, i32 3, i32 61, i32 51, i32 37, i32 40, i32 49, i32 18, i32 28, i32 20, i32 55, i32 30, i32 34, i32 11, i32 43, i32 14, i32 22, i32 4, i32 62, i32 57, i32 46, i32 52, i32 38, i32 26, i32 32, i32 41, i32 50, i32 36, i32 17, i32 19, i32 29, i32 10, i32 13, i32 21, i32 56, i32 45, i32 25, i32 31, i32 35, i32 16, i32 9, i32 12, i32 44, i32 24, i32 15, i32 8, i32 23, i32 7, i32 6, i32 5], align 4 + +define i32 @ctz6(i64 %n) { +; CHECK-LABEL: @ctz6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.cttz.i64(i64 [[N:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 [[N]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i64 63, i64 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32 +; CHECK-NEXT: ret i32 [[TMP3]] +; +entry: + %add = sub i64 0, %n + %and = and i64 %add, %n + %mul = mul i64 %and, 571347909858961602 + %shr = lshr i64 %mul, 58 + %arrayidx = getelementptr inbounds [64 x i32], [64 x i32]* @ctz6.table, i64 0, i64 %shr + %0 = load i32, i32* %arrayidx, align 4 + ret i32 %0 +} + +@ctz8.table = internal unnamed_addr constant [32 x i32] [i32 31, i32 0, i32 1, i32 23, i32 2, i32 18, i32 5, i32 24, i32 21, i32 3, i32 19, i32 9, i32 15, i32 6, i32 11, i32 25, i32 30, i32 22, i32 17, i32 4, i32 20, i32 8, i32 14, i32 10, i32 29, i32 16, i32 7, i32 13, i32 28, i32 12, i32 27, i32 26], align 4 + +define i32 @ctz8(i32 %v) { +; CHECK-LABEL: @ctz8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[V:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[V]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 31, i32 [[TMP0]] +; CHECK-NEXT: ret i32 [[TMP2]] +; +entry: + %sub = sub i32 0, %v + %and = and i32 %sub, %v + %mul = mul i32 %and, 162449982 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i32], [32 x i32]* @ctz8.table, i64 0, i64 %idxprom + %0 = load i32, i32* %arrayidx, align 4 + ret i32 %0 +} + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 1, !"branch-target-enforcement", i32 0} +!2 = !{i32 1, !"sign-return-address", i32 0} +!3 = !{i32 1, !"sign-return-address-all", i32 0} +!4 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!5 = !{i32 7, !"uwtable", i32 1} +!6 = !{i32 7, !"frame-pointer", i32 1} +!7 = !{!"clang version 14.0.0"} Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/lower-table-based-ctz.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/lower-table-based-ctz.ll @@ -0,0 +1,43 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -aggressive-instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s + +; ModuleID = 'test.c' +source_filename = "test.c" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux" + +@f.table = internal unnamed_addr constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1 + +define i32 @f(i32 %x) { +; CHECK-LABEL: @f( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @f.table, i64 0, i64 %idxprom + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 1, !"branch-target-enforcement", i32 0} +!2 = !{i32 1, !"sign-return-address", i32 0} +!3 = !{i32 1, !"sign-return-address-all", i32 0} +!4 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!5 = !{i32 7, !"uwtable", i32 1} +!6 = !{i32 7, !"frame-pointer", i32 1} +!7 = !{!"clang version 14.0.0"} Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/non-argument-value.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/non-argument-value.ll @@ -0,0 +1,111 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -aggressive-instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s + +;; C reproducers: +;; #include "stdio.h" +;; unsigned x; +;; +;; int globalVar () +;; { +;; static const char table[32] = +;; { +;; 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, +;; 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 +;; }; +;; return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; +;; } +;; +;; int localVar () +;; { +;; unsigned x; +;; scanf("%u", &x); +;; static const char table[32] = +;; { +;; 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, +;; 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 +;; }; +;; return table[((unsigned)((x & -x) * 0x077CB531U)) >> 27]; +;; } + +; ModuleID = 'x_not_arg.c' +source_filename = "x_not_arg.c" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@x = global i32 0, align 4 +@.str = private constant [3 x i8] c"%u\00", align 1 +@localVar.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1 + +define i32 @globalVar() { +; CHECK-LABEL: @globalVar( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* @x, align 4 +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 true) +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP0]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP4]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %0 = load i32, i32* @x, align 4 + %sub = sub i32 0, %0 + %and = and i32 %0, %sub + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @localVar.table, i64 0, i64 %idxprom + %1 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %1 to i32 + ret i32 %conv +} + +define i32 @localVar() { +; CHECK-LABEL: @localVar( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[X:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[X]] to i8* +; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull [[TMP0]]) +; CHECK-NEXT: [[CALL:%.*]] = call i32 (i8*, ...) @__isoc99_scanf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i32* nonnull [[X]]) +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP1]], i1 true) +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP5]] to i32 +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull [[TMP0]]) +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %x = alloca i32, align 4 + %0 = bitcast i32* %x to i8* + call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %0) + %call = call i32 (i8*, ...) @__isoc99_scanf(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i32* nonnull %x) + %1 = load i32, i32* %x, align 4 + %sub = sub i32 0, %1 + %and = and i32 %1, %sub + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @localVar.table, i64 0, i64 %idxprom + %2 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %2 to i32 + call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %0) + ret i32 %conv +} + +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) +declare noundef i32 @__isoc99_scanf(i8* nocapture noundef readonly, ...) +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 1, !"branch-target-enforcement", i32 0} +!2 = !{i32 1, !"sign-return-address", i32 0} +!3 = !{i32 1, !"sign-return-address-all", i32 0} +!4 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!5 = !{i32 7, !"uwtable", i32 1} +!6 = !{i32 7, !"frame-pointer", i32 1} +!7 = !{!"clang version 14.0.0"} Index: llvm/test/Transforms/AggressiveInstCombine/AARCH64/zero-element.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/AggressiveInstCombine/AARCH64/zero-element.ll @@ -0,0 +1,44 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -aggressive-instcombine -mtriple aarch64-linux-gnu -S < %s | FileCheck %s + +; ModuleID = 'handle-zero-element.c' +source_filename = "handle-zero-element.c" +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@ctz1.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1 + +; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone uwtable willreturn +define i32 @ctz1(i32 %x) { +; CHECK-LABEL: @ctz1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32 +; CHECK-NEXT: ret i32 [[CONV]] +; +entry: + %sub = sub i32 0, %x + %and = and i32 %sub, %x + %mul = mul i32 %and, 125613361 + %shr = lshr i32 %mul, 27 + %idxprom = zext i32 %shr to i64 + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @ctz1.table, i64 0, i64 %idxprom + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +!llvm.module.flags = !{!0, !1, !2, !3, !4, !5, !6} +!llvm.ident = !{!7} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{i32 1, !"branch-target-enforcement", i32 0} +!2 = !{i32 1, !"sign-return-address", i32 0} +!3 = !{i32 1, !"sign-return-address-all", i32 0} +!4 = !{i32 1, !"sign-return-address-with-bkey", i32 0} +!5 = !{i32 7, !"uwtable", i32 1} +!6 = !{i32 7, !"frame-pointer", i32 1} +!7 = !{!"clang version 14.0.0"}