diff --git a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp index 0378ea79ef74..d62b6434f002 100644 --- a/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp +++ b/llvm/lib/Transforms/AggressiveInstCombine/TruncInstCombine.cpp @@ -1,409 +1,417 @@ //===- TruncInstCombine.cpp -----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // TruncInstCombine - looks for expression dags post-dominated by TruncInst and // for each eligible dag, it will create a reduced bit-width expression, replace // the old expression with this new one and remove the old expression. // Eligible expression dag is such that: // 1. Contains only supported instructions. // 2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value. // 3. Can be evaluated into type with reduced legal bit-width. // 4. All instructions in the dag must not have users outside the dag. // The only exception is for {ZExt, SExt}Inst with operand type equal to // the new reduced type evaluated in (3). // // The motivation for this optimization is that evaluating and expression using // smaller bit-width is preferable, especially for vectorization where we can // fit more values in one vectorized instruction. In addition, this optimization // may decrease the number of cast instructions, but will not increase it. // //===----------------------------------------------------------------------===// #include "AggressiveInstCombineInternal.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" using namespace llvm; #define DEBUG_TYPE "aggressive-instcombine" /// Given an instruction and a container, it fills all the relevant operands of /// that instruction, with respect to the Trunc expression dag optimizaton. static void getRelevantOperands(Instruction *I, SmallVectorImpl &Ops) { unsigned Opc = I->getOpcode(); switch (Opc) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: // These CastInst are considered leaves of the evaluated expression, thus, // their operands are not relevent. break; case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::And: case Instruction::Or: case Instruction::Xor: Ops.push_back(I->getOperand(0)); Ops.push_back(I->getOperand(1)); break; default: llvm_unreachable("Unreachable!"); } } bool TruncInstCombine::buildTruncExpressionDag() { SmallVector Worklist; SmallVector Stack; // Clear old expression dag. InstInfoMap.clear(); Worklist.push_back(CurrentTruncInst->getOperand(0)); while (!Worklist.empty()) { Value *Curr = Worklist.back(); if (isa(Curr)) { Worklist.pop_back(); continue; } auto *I = dyn_cast(Curr); if (!I) return false; if (!Stack.empty() && Stack.back() == I) { // Already handled all instruction operands, can remove it from both the // Worklist and the Stack, and add it to the instruction info map. Worklist.pop_back(); Stack.pop_back(); // Insert I to the Info map. InstInfoMap.insert(std::make_pair(I, Info())); continue; } if (InstInfoMap.count(I)) { Worklist.pop_back(); continue; } // Add the instruction to the stack before start handling its operands. Stack.push_back(I); unsigned Opc = I->getOpcode(); switch (Opc) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: // trunc(trunc(x)) -> trunc(x) // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest // trunc(ext(x)) -> trunc(x) if the source type is larger than the new // dest break; case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::And: case Instruction::Or: case Instruction::Xor: { SmallVector Operands; getRelevantOperands(I, Operands); for (Value *Operand : Operands) Worklist.push_back(Operand); break; } default: // TODO: Can handle more cases here: // 1. select, shufflevector, extractelement, insertelement // 2. udiv, urem // 3. shl, lshr, ashr // 4. phi node(and loop handling) // ... return false; } } return true; } unsigned TruncInstCombine::getMinBitWidth() { SmallVector Worklist; SmallVector Stack; Value *Src = CurrentTruncInst->getOperand(0); Type *DstTy = CurrentTruncInst->getType(); unsigned TruncBitWidth = DstTy->getScalarSizeInBits(); unsigned OrigBitWidth = CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits(); if (isa(Src)) return TruncBitWidth; Worklist.push_back(Src); InstInfoMap[cast(Src)].ValidBitWidth = TruncBitWidth; while (!Worklist.empty()) { Value *Curr = Worklist.back(); if (isa(Curr)) { Worklist.pop_back(); continue; } // Otherwise, it must be an instruction. auto *I = cast(Curr); auto &Info = InstInfoMap[I]; SmallVector Operands; getRelevantOperands(I, Operands); if (!Stack.empty() && Stack.back() == I) { // Already handled all instruction operands, can remove it from both, the // Worklist and the Stack, and update MinBitWidth. Worklist.pop_back(); Stack.pop_back(); for (auto *Operand : Operands) if (auto *IOp = dyn_cast(Operand)) Info.MinBitWidth = std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth); continue; } // Add the instruction to the stack before start handling its operands. Stack.push_back(I); unsigned ValidBitWidth = Info.ValidBitWidth; // Update minimum bit-width before handling its operands. This is required // when the instruction is part of a loop. Info.MinBitWidth = std::max(Info.MinBitWidth, Info.ValidBitWidth); for (auto *Operand : Operands) if (auto *IOp = dyn_cast(Operand)) { // If we already calculated the minimum bit-width for this valid // bit-width, or for a smaller valid bit-width, then just keep the // answer we already calculated. unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth; if (IOpBitwidth >= ValidBitWidth) continue; InstInfoMap[IOp].ValidBitWidth = std::max(ValidBitWidth, IOpBitwidth); Worklist.push_back(IOp); } } unsigned MinBitWidth = InstInfoMap.lookup(cast(Src)).MinBitWidth; assert(MinBitWidth >= TruncBitWidth); if (MinBitWidth > TruncBitWidth) { // In this case reducing expression with vector type might generate a new // vector type, which is not preferable as it might result in generating // sub-optimal code. if (DstTy->isVectorTy()) return OrigBitWidth; // Use the smallest integer type in the range [MinBitWidth, OrigBitWidth). Type *Ty = DL.getSmallestLegalIntType(DstTy->getContext(), MinBitWidth); // Update minimum bit-width with the new destination type bit-width if // succeeded to find such, otherwise, with original bit-width. MinBitWidth = Ty ? Ty->getScalarSizeInBits() : OrigBitWidth; } else { // MinBitWidth == TruncBitWidth // In this case the expression can be evaluated with the trunc instruction // destination type, and trunc instruction can be omitted. However, we // should not perform the evaluation if the original type is a legal scalar // type and the target type is illegal. bool FromLegal = MinBitWidth == 1 || DL.isLegalInteger(OrigBitWidth); bool ToLegal = MinBitWidth == 1 || DL.isLegalInteger(MinBitWidth); if (!DstTy->isVectorTy() && FromLegal && !ToLegal) return OrigBitWidth; } return MinBitWidth; } Type *TruncInstCombine::getBestTruncatedType() { if (!buildTruncExpressionDag()) return nullptr; // We don't want to duplicate instructions, which isn't profitable. Thus, we // can't shrink something that has multiple users, unless all users are // post-dominated by the trunc instruction, i.e., were visited during the // expression evaluation. unsigned DesiredBitWidth = 0; for (auto Itr : InstInfoMap) { Instruction *I = Itr.first; if (I->hasOneUse()) continue; bool IsExtInst = (isa(I) || isa(I)); for (auto *U : I->users()) if (auto *UI = dyn_cast(U)) if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) { if (!IsExtInst) return nullptr; // If this is an extension from the dest type, we can eliminate it, // even if it has multiple users. Thus, update the DesiredBitWidth and // validate all extension instructions agrees on same DesiredBitWidth. unsigned ExtInstBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth) return nullptr; DesiredBitWidth = ExtInstBitWidth; } } unsigned OrigBitWidth = CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits(); // Calculate minimum allowed bit-width allowed for shrinking the currently // visited truncate's operand. unsigned MinBitWidth = getMinBitWidth(); // Check that we can shrink to smaller bit-width than original one and that // it is similar to the DesiredBitWidth is such exists. if (MinBitWidth >= OrigBitWidth || (DesiredBitWidth && DesiredBitWidth != MinBitWidth)) return nullptr; return IntegerType::get(CurrentTruncInst->getContext(), MinBitWidth); } /// Given a reduced scalar type \p Ty and a \p V value, return a reduced type /// for \p V, according to its type, if it vector type, return the vector /// version of \p Ty, otherwise return \p Ty. static Type *getReducedType(Value *V, Type *Ty) { assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type"); if (auto *VTy = dyn_cast(V->getType())) return VectorType::get(Ty, VTy->getNumElements()); return Ty; } Value *TruncInstCombine::getReducedOperand(Value *V, Type *SclTy) { Type *Ty = getReducedType(V, SclTy); if (auto *C = dyn_cast(V)) { C = ConstantExpr::getIntegerCast(C, Ty, false); // If we got a constantexpr back, try to simplify it with DL info. if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI)) C = FoldedC; return C; } auto *I = cast(V); Info Entry = InstInfoMap.lookup(I); assert(Entry.NewValue); return Entry.NewValue; } void TruncInstCombine::ReduceExpressionDag(Type *SclTy) { for (auto &Itr : InstInfoMap) { // Forward Instruction *I = Itr.first; TruncInstCombine::Info &NodeInfo = Itr.second; assert(!NodeInfo.NewValue && "Instruction has been evaluated"); IRBuilder<> Builder(I); Value *Res = nullptr; unsigned Opc = I->getOpcode(); switch (Opc) { case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: { Type *Ty = getReducedType(I, SclTy); // If the source type of the cast is the type we're trying for then we can // just return the source. There's no need to insert it because it is not // new. if (I->getOperand(0)->getType() == Ty) { + assert(!isa(I) && "Cannot reach here with TruncInst"); NodeInfo.NewValue = I->getOperand(0); continue; } // Otherwise, must be the same type of cast, so just reinsert a new one. // This also handles the case of zext(trunc(x)) -> zext(x). Res = Builder.CreateIntCast(I->getOperand(0), Ty, Opc == Instruction::SExt); // Update Worklist entries with new value if needed. - if (auto *NewCI = dyn_cast(Res)) { - auto Entry = find(Worklist, I); - if (Entry != Worklist.end()) + // There are three possible changes to the Worklist: + // 1. Update Old-TruncInst -> New-TruncInst. + // 2. Remove Old-TruncInst (if New node is not TruncInst). + // 3. Add New-TruncInst (if Old node was not TruncInst). + auto Entry = find(Worklist, I); + if (Entry != Worklist.end()) { + if (auto *NewCI = dyn_cast(Res)) *Entry = NewCI; - } + else + Worklist.erase(Entry); + } else if (auto *NewCI = dyn_cast(Res)) + Worklist.push_back(NewCI); break; } case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::And: case Instruction::Or: case Instruction::Xor: { Value *LHS = getReducedOperand(I->getOperand(0), SclTy); Value *RHS = getReducedOperand(I->getOperand(1), SclTy); Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS); break; } default: llvm_unreachable("Unhandled instruction"); } NodeInfo.NewValue = Res; if (auto *ResI = dyn_cast(Res)) ResI->takeName(I); } Value *Res = getReducedOperand(CurrentTruncInst->getOperand(0), SclTy); Type *DstTy = CurrentTruncInst->getType(); if (Res->getType() != DstTy) { IRBuilder<> Builder(CurrentTruncInst); Res = Builder.CreateIntCast(Res, DstTy, false); if (auto *ResI = dyn_cast(Res)) ResI->takeName(CurrentTruncInst); } CurrentTruncInst->replaceAllUsesWith(Res); // Erase old expression dag, which was replaced by the reduced expression dag. // We iterate backward, which means we visit the instruction before we visit // any of its operands, this way, when we get to the operand, we already // removed the instructions (from the expression dag) that uses it. CurrentTruncInst->eraseFromParent(); for (auto I = InstInfoMap.rbegin(), E = InstInfoMap.rend(); I != E; ++I) { // We still need to check that the instruction has no users before we erase // it, because {SExt, ZExt}Inst Instruction might have other users that was // not reduced, in such case, we need to keep that instruction. if (!I->first->getNumUses()) I->first->eraseFromParent(); } } bool TruncInstCombine::run(Function &F) { bool MadeIRChange = false; // Collect all TruncInst in the function into the Worklist for evaluating. for (auto &BB : F) { // Ignore unreachable basic block. if (!DT.isReachableFromEntry(&BB)) continue; for (auto &I : BB) if (auto *CI = dyn_cast(&I)) Worklist.push_back(CI); } // Process all TruncInst in the Worklist, for each instruction: // 1. Check if it dominates an eligible expression dag to be reduced. // 2. Create a reduced expression dag and replace the old one with it. while (!Worklist.empty()) { CurrentTruncInst = Worklist.pop_back_val(); if (Type *NewDstSclTy = getBestTruncatedType()) { DEBUG(dbgs() << "ICE: TruncInstCombine reducing type of expression dag " "dominated by: " << CurrentTruncInst << '\n'); ReduceExpressionDag(NewDstSclTy); MadeIRChange = true; } } return MadeIRChange; } diff --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_const_expr.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_const_expr.ll index af4982dc2b5d..b83fcb470cc3 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/trunc_const_expr.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_const_expr.ll @@ -1,79 +1,110 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -aggressive-instcombine -S | FileCheck %s ; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; Aggressive Instcombine should be able to reduce width of these constant ; expressions, without crashing. declare i32 @use32(i32) declare <2 x i32> @use32_vec(<2 x i32>) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; These tests check cases where expression dag post-dominated by TruncInst ;; contains instruction, which has more than one usage. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; define void @const_expression_mul() { ; CHECK-LABEL: @const_expression_mul( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 242) ; CHECK-NEXT: ret void ; %A = mul i64 11, 22 %T = trunc i64 %A to i32 call i32 @use32(i32 %T) ret void } define void @const_expression_zext() { ; CHECK-LABEL: @const_expression_zext( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 33) ; CHECK-NEXT: ret void ; %A = zext i32 33 to i64 %T = trunc i64 %A to i32 call i32 @use32(i32 %T) ret void } define void @const_expression_trunc() { ; CHECK-LABEL: @const_expression_trunc( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 44) ; CHECK-NEXT: ret void ; %T = trunc i64 44 to i32 call i32 @use32(i32 %T) ret void } +; Check that we handle constant expression trunc instruction, when it is a leaf +; of other trunc expression pattern: +; 1. %T1 is the constant expression trunc instruction. +; 2. %T2->%T1 is the trunc expression pattern we want to reduce. +define void @const_expression_trunc_leaf() { +; CHECK-LABEL: @const_expression_trunc_leaf( +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 44) +; CHECK-NEXT: ret void +; + %T1 = trunc i64 44 to i48 + %T2 = trunc i48 %T1 to i32 + call i32 @use32(i32 %T2) + ret void +} + +; Check that we handle zext instruction, which turns into trunc instruction. +; Notice that there are two expression patterns below: +; 1. %T2->%T1 +; 2. %T1`->%A (where %T1` is the reduced node of %T1 into trunc instruction) +define void @const_expression_zext_to_trunc() { +; CHECK-LABEL: @const_expression_zext_to_trunc( +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 44) +; CHECK-NEXT: ret void +; + %A = add i64 11, 33 + %T1 = zext i64 %A to i128 + %T2 = trunc i128 %T1 to i32 + call i32 @use32(i32 %T2) + ret void +} + define void @const_expression_mul_vec() { ; CHECK-LABEL: @const_expression_mul_vec( ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> ) ; CHECK-NEXT: ret void ; %A = mul <2 x i64> , %T = trunc <2 x i64> %A to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T) ret void } define void @const_expression_zext_vec() { ; CHECK-LABEL: @const_expression_zext_vec( ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> ) ; CHECK-NEXT: ret void ; %A = zext <2 x i32> to <2 x i64> %T = trunc <2 x i64> %A to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T) ret void } define void @const_expression_trunc_vec() { ; CHECK-LABEL: @const_expression_trunc_vec( ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> ) ; CHECK-NEXT: ret void ; %T = trunc <2 x i64> to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T) ret void } diff --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll index 389f77d4c705..51c110dcc7b7 100644 --- a/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll +++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll @@ -1,214 +1,270 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -aggressive-instcombine -S | FileCheck %s ; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; Aggressive Instcombine should be able to reduce width of these expressions. declare i32 @use32(i32) declare i32 @use64(i64) declare <2 x i32> @use32_vec(<2 x i32>) declare <2 x i32> @use64_vec(<2 x i64>) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; These tests check cases where expression dag post-dominated by TruncInst ;; contains instruction, which has more than one usage. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; define void @multi_uses_add(i32 %X) { ; CHECK-LABEL: @multi_uses_add( ; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[B1:%.*]] = add i32 [[X]], 15 ; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext i32 %X to i64 %B1 = add i64 %A1, 15 %C1 = mul i64 %B1, %B1 %T1 = trunc i64 %C1 to i32 call i32 @use32(i32 %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call i32 @use64(i64 %A1) ret void } define void @multi_uses_or(i32 %X) { ; CHECK-LABEL: @multi_uses_or( ; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[B1:%.*]] = or i32 [[X]], 15 ; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext i32 %X to i64 %B1 = or i64 %A1, 15 %C1 = mul i64 %B1, %B1 %T1 = trunc i64 %C1 to i32 call i32 @use32(i32 %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call i32 @use64(i64 %A1) ret void } define void @multi_uses_xor(i32 %X) { ; CHECK-LABEL: @multi_uses_xor( ; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[B1:%.*]] = xor i32 [[X]], 15 ; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext i32 %X to i64 %B1 = xor i64 %A1, 15 %C1 = mul i64 %B1, %B1 %T1 = trunc i64 %C1 to i32 call i32 @use32(i32 %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call i32 @use64(i64 %A1) ret void } define void @multi_uses_and(i32 %X) { ; CHECK-LABEL: @multi_uses_and( ; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[B1:%.*]] = and i32 [[X]], 15 ; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext i32 %X to i64 %B1 = and i64 %A1, 15 %C1 = mul i64 %B1, %B1 %T1 = trunc i64 %C1 to i32 call i32 @use32(i32 %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call i32 @use64(i64 %A1) ret void } define void @multi_uses_sub(i32 %X, i32 %Y) { ; CHECK-LABEL: @multi_uses_sub( ; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[A2:%.*]] = zext i32 [[Y:%.*]] to i64 ; CHECK-NEXT: [[B1:%.*]] = sub i32 [[X]], [[Y]] ; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use64(i64 [[A2]]) ; CHECK-NEXT: ret void ; %A1 = zext i32 %X to i64 %A2 = zext i32 %Y to i64 %B1 = sub i64 %A1, %A2 %C1 = mul i64 %B1, %B1 %T1 = trunc i64 %C1 to i32 call i32 @use32(i32 %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call i32 @use64(i64 %A1) call i32 @use64(i64 %A2) ret void } define void @multi_use_vec_add(<2 x i32> %X) { ; CHECK-LABEL: @multi_use_vec_add( ; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> ; CHECK-NEXT: [[B1:%.*]] = add <2 x i32> [[X]], ; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext <2 x i32> %X to <2 x i64> %B1 = add <2 x i64> %A1, %C1 = mul <2 x i64> %B1, %B1 %T1 = trunc <2 x i64> %C1 to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call <2 x i32> @use64_vec(<2 x i64> %A1) ret void } define void @multi_use_vec_or(<2 x i32> %X) { ; CHECK-LABEL: @multi_use_vec_or( ; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> ; CHECK-NEXT: [[B1:%.*]] = or <2 x i32> [[X]], ; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext <2 x i32> %X to <2 x i64> %B1 = or <2 x i64> %A1, %C1 = mul <2 x i64> %B1, %B1 %T1 = trunc <2 x i64> %C1 to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call <2 x i32> @use64_vec(<2 x i64> %A1) ret void } define void @multi_use_vec_xor(<2 x i32> %X) { ; CHECK-LABEL: @multi_use_vec_xor( ; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> ; CHECK-NEXT: [[B1:%.*]] = xor <2 x i32> [[X]], ; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext <2 x i32> %X to <2 x i64> %B1 = xor <2 x i64> %A1, %C1 = mul <2 x i64> %B1, %B1 %T1 = trunc <2 x i64> %C1 to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call <2 x i32> @use64_vec(<2 x i64> %A1) ret void } define void @multi_use_vec_and(<2 x i32> %X) { ; CHECK-LABEL: @multi_use_vec_and( ; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> ; CHECK-NEXT: [[B1:%.*]] = and <2 x i32> [[X]], ; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) ; CHECK-NEXT: ret void ; %A1 = zext <2 x i32> %X to <2 x i64> %B1 = and <2 x i64> %A1, %C1 = mul <2 x i64> %B1, %B1 %T1 = trunc <2 x i64> %C1 to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call <2 x i32> @use64_vec(<2 x i64> %A1) ret void } define void @multi_use_vec_sub(<2 x i32> %X, <2 x i32> %Y) { ; CHECK-LABEL: @multi_use_vec_sub( ; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> ; CHECK-NEXT: [[A2:%.*]] = zext <2 x i32> [[Y:%.*]] to <2 x i64> ; CHECK-NEXT: [[B1:%.*]] = sub <2 x i32> [[X]], [[Y]] ; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) ; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A2]]) ; CHECK-NEXT: ret void ; %A1 = zext <2 x i32> %X to <2 x i64> %A2 = zext <2 x i32> %Y to <2 x i64> %B1 = sub <2 x i64> %A1, %A2 %C1 = mul <2 x i64> %B1, %B1 %T1 = trunc <2 x i64> %C1 to <2 x i32> call <2 x i32> @use32_vec(<2 x i32> %T1) ; make sure zext have another use that is not post-dominated by the TruncInst. call <2 x i32> @use64_vec(<2 x i64> %A1) call <2 x i32> @use64_vec(<2 x i64> %A2) ret void } + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; These tests check cases where expression dag post-dominated by TruncInst +;; contains TruncInst leaf or ZEXT/SEXT leafs which turn into TruncInst leaves. +;; Check that both expressions are reduced and no TruncInst remains or (was +;; generated). +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; Notice that there are two expression patterns below: +; 1. %T2->%C2->(%B2->(%T1, 15), %B2->(%T1, 15)) +; 2. %T1`->%C1->(%B1->(%A1, 15), %B1->(%A1, 15)) +; (where %T1` is the reduced node of %T1 into trunc instruction) +define void @trunc_as_a_leaf(i32 %X) { +; CHECK-LABEL: @trunc_as_a_leaf( +; CHECK-NEXT: [[B1:%.*]] = add i32 [[X:%.*]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[B2:%.*]] = add i32 [[C1]], 15 +; CHECK-NEXT: [[C2:%.*]] = mul i32 [[B2]], [[B2]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C2]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %B1 = add i64 %A1, 15 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i48 ; leaf trunc + %B2 = add i48 %T1, 15 + %C2 = mul i48 %B2, %B2 + %T2 = trunc i48 %C2 to i32 + call i32 @use32(i32 %T2) + ret void +} + +; Notice that there are two expression patterns below: +; 1. %T2->%C2->(%B2->(%T1, 15), %B2->(%T1, 15)) +; 2. %T1`->%C1->(%B1->(%A1, 15), %B1->(%A1, 15)) +; (where %T1` is the reduced node of %T1 into trunc instruction) +define void @zext_as_a_leaf(i16 %X) { +; CHECK-LABEL: @zext_as_a_leaf( +; CHECK-NEXT: [[A1:%.*]] = zext i16 [[X:%.*]] to i32 +; CHECK-NEXT: [[B1:%.*]] = add i32 [[A1]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[B2:%.*]] = add i32 [[C1]], 15 +; CHECK-NEXT: [[C2:%.*]] = mul i32 [[B2]], [[B2]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C2]]) +; CHECK-NEXT: ret void +; + %A1 = zext i16 %X to i48 + %B1 = add i48 %A1, 15 + %C1 = mul i48 %B1, %B1 + %T1 = zext i48 %C1 to i64 ; leaf zext, which will turn into trunc + %B2 = add i64 %T1, 15 + %C2 = mul i64 %B2, %B2 + %T2 = trunc i64 %C2 to i32 + call i32 @use32(i32 %T2) + ret void +}