Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -316,6 +316,9 @@ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); + /// Transform zext(trunc(x)) to x. + bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg); + /// Transform [asz]ext([asz]ext(x)) to [asz]ext x. bool matchCombineExtOfExt(MachineInstr &MI, std::tuple &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -426,6 +426,16 @@ (apply [{ return Helper.applyCombineAnyExtTrunc(*${root}, ${matchinfo}); }]) >; +// Fold (zext (trunc x)) -> x if the source type is same as the destination type +// and truncated bits are known to be zero. +def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">; +def zext_trunc_fold: GICombineRule < + (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_ZEXT):$root, + [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) +>; + // Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple">; def ext_ext_fold: GICombineRule < @@ -575,7 +585,8 @@ def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>; def known_bits_simplifications : GICombineGroup<[ - redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask]>; + redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask, + zext_trunc_fold]>; def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>; Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2301,6 +2301,19 @@ return true; } +bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) { + assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT"); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (mi_match(SrcReg, MRI, + m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { + return KB->getKnownBits(Reg).countMinLeadingZeros() >= + DstTy.getSizeInBits() - MRI.getType(SrcReg).getSizeInBits(); + } + return false; +} + bool CombinerHelper::matchCombineExtOfExt( MachineInstr &MI, std::tuple &MatchInfo) { assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || Index: llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll @@ -404,18 +404,14 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff ; GFX8-NEXT: s_and_b32 s0, s0, 0x3fff -; GFX8-NEXT: s_bfe_u32 s1, 2, 0x100000 -; GFX8-NEXT: s_lshl_b32 s0, s0, s1 -; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX8-NEXT: s_lshl_b32 s0, s0, 2 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_shl_i32_zext_i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_and_b32 s0, s0, 0xffff ; GFX9-NEXT: s_and_b32 s0, s0, 0x3fff -; GFX9-NEXT: s_bfe_u32 s1, 2, 0x100000 -; GFX9-NEXT: s_lshl_b32 s0, s0, s1 -; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX9-NEXT: s_lshl_b32 s0, s0, 2 ; GFX9-NEXT: ; return to shader part epilog %and = and i16 %x, 16383 %ext = zext i16 %and to i32