Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -316,6 +316,9 @@ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg); + /// Transform zext(trunc(x)) to x. + bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg); + /// Transform [asz]ext([asz]ext(x)) to [asz]ext x. bool matchCombineExtOfExt(MachineInstr &MI, std::tuple &MatchInfo); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -426,6 +426,16 @@ (apply [{ return Helper.applyCombineAnyExtTrunc(*${root}, ${matchinfo}); }]) >; +// Fold (zext (trunc x)) -> x if the source type is same as the destination type +// and truncated bits are known to be zero. +def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">; +def zext_trunc_fold: GICombineRule < + (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_ZEXT):$root, + [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) +>; + // Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple">; def ext_ext_fold: GICombineRule < @@ -575,7 +585,8 @@ def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>; def known_bits_simplifications : GICombineGroup<[ - redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask]>; + redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask, + zext_trunc_fold]>; def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>; Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2301,6 +2301,20 @@ return true; } +bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) { + assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT"); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (mi_match(SrcReg, MRI, + m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { + unsigned DstSize = DstTy.getScalarSizeInBits(); + unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits(); + return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize; + } + return false; +} + bool CombinerHelper::matchCombineExtOfExt( MachineInstr &MI, std::tuple &MatchInfo) { assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-zext-trunc.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-zext-trunc.mir @@ -0,0 +1,198 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s + +--- +name: zext_trunc_s32_s16_s32 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: zext_trunc_s32_s16_s32 + ; GCN: liveins: $vgpr0 + ; GCN: %var:_(s32) = COPY $vgpr0 + ; GCN: %c3FFF:_(s32) = G_CONSTANT i32 16383 + ; GCN: %low_bits:_(s32) = G_AND %var, %c3FFF + ; GCN: $vgpr0 = COPY %low_bits(s32) + %var:_(s32) = COPY $vgpr0 + %c3FFF:_(s32) = G_CONSTANT i32 16383 + %low_bits:_(s32) = G_AND %var, %c3FFF + %trunc:_(s16) = G_TRUNC %low_bits(s32) + %zext:_(s32) = G_ZEXT %trunc(s16) + $vgpr0 = COPY %zext(s32) +... + +--- +name: zext_trunc_s32_s16_s32_unknown_high_bits +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: zext_trunc_s32_s16_s32_unknown_high_bits + ; GCN: liveins: $vgpr0 + ; GCN: %var:_(s32) = COPY $vgpr0 + ; GCN: %cFFFFF:_(s32) = G_CONSTANT i32 1048575 + ; GCN: %low_bits:_(s32) = G_AND %var, %cFFFFF + ; GCN: %trunc:_(s16) = G_TRUNC %low_bits(s32) + ; GCN: %zext:_(s32) = G_ZEXT %trunc(s16) + ; GCN: $vgpr0 = COPY %zext(s32) + %var:_(s32) = COPY $vgpr0 + %cFFFFF:_(s32) = G_CONSTANT i32 1048575 + %low_bits:_(s32) = G_AND %var, %cFFFFF + %trunc:_(s16) = G_TRUNC %low_bits(s32) + %zext:_(s32) = G_ZEXT %trunc(s16) + $vgpr0 = COPY %zext(s32) +... + +--- +name: zext_trunc_s64_s16_s32 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: zext_trunc_s64_s16_s32 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: %var:_(s64) = COPY $vgpr0_vgpr1 + ; GCN: %c3FFF:_(s64) = G_CONSTANT i64 16383 + ; GCN: %low_bits:_(s64) = G_AND %var, %c3FFF + ; GCN: %trunc:_(s16) = G_TRUNC %low_bits(s64) + ; GCN: %zext:_(s32) = G_ZEXT %trunc(s16) + ; GCN: $vgpr0 = COPY %zext(s32) + %var:_(s64) = COPY $vgpr0_vgpr1 + %c3FFF:_(s64) = G_CONSTANT i64 16383 + %low_bits:_(s64) = G_AND %var, %c3FFF + %trunc:_(s16) = G_TRUNC %low_bits(s64) + %zext:_(s32) = G_ZEXT %trunc(s16) + $vgpr0 = COPY %zext(s32) +... + +--- +name: zext_trunc_s32_s16_s64 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: zext_trunc_s32_s16_s64 + ; GCN: liveins: $vgpr0 + ; GCN: %var:_(s32) = COPY $vgpr0 + ; GCN: %c3FFF:_(s32) = G_CONSTANT i32 16383 + ; GCN: %low_bits:_(s32) = G_AND %var, %c3FFF + ; GCN: %trunc:_(s16) = G_TRUNC %low_bits(s32) + ; GCN: %zext:_(s64) = G_ZEXT %trunc(s16) + ; GCN: $vgpr0_vgpr1 = COPY %zext(s64) + %var:_(s32) = COPY $vgpr0 + %c3FFF:_(s32) = G_CONSTANT i32 16383 + %low_bits:_(s32) = G_AND %var, %c3FFF + %trunc:_(s16) = G_TRUNC %low_bits(s32) + %zext:_(s64) = G_ZEXT %trunc(s16) + $vgpr0_vgpr1 = COPY %zext(s64) +... + +--- +name: zext_trunc_v2s32_v2s16_v2s32 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: zext_trunc_v2s32_v2s16_v2s32 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GCN: %c3FFF:_(s32) = G_CONSTANT i32 16383 + ; GCN: %c7FFF:_(s32) = G_CONSTANT i32 32767 + ; GCN: %c:_(<2 x s32>) = G_BUILD_VECTOR %c3FFF(s32), %c7FFF(s32) + ; GCN: %low_bits:_(<2 x s32>) = G_AND %var, %c + ; GCN: $vgpr0_vgpr1 = COPY %low_bits(<2 x s32>) + %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %c3FFF:_(s32) = G_CONSTANT i32 16383 + %c7FFF:_(s32) = G_CONSTANT i32 32767 + %c:_(<2 x s32>) = G_BUILD_VECTOR %c3FFF(s32), %c7FFF(s32) + %low_bits:_(<2 x s32>) = G_AND %var, %c + %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s32>) + %zext:_(<2 x s32>) = G_ZEXT %trunc(<2 x s16>) + $vgpr0_vgpr1 = COPY %zext(<2 x s32>) +... + +--- +name: zext_trunc_v2s32_v2s16_v2s32_unknown_high_bits +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: zext_trunc_v2s32_v2s16_v2s32_unknown_high_bits + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GCN: %cFFFFF:_(s32) = G_CONSTANT i32 1048575 + ; GCN: %c7FFF:_(s32) = G_CONSTANT i32 32767 + ; GCN: %c:_(<2 x s32>) = G_BUILD_VECTOR %cFFFFF(s32), %c7FFF(s32) + ; GCN: %low_bits:_(<2 x s32>) = G_AND %var, %c + ; GCN: %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s32>) + ; GCN: %zext:_(<2 x s32>) = G_ZEXT %trunc(<2 x s16>) + ; GCN: $vgpr0_vgpr1 = COPY %zext(<2 x s32>) + %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %cFFFFF:_(s32) = G_CONSTANT i32 1048575 + %c7FFF:_(s32) = G_CONSTANT i32 32767 + %c:_(<2 x s32>) = G_BUILD_VECTOR %cFFFFF(s32), %c7FFF(s32) + %low_bits:_(<2 x s32>) = G_AND %var, %c + %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s32>) + %zext:_(<2 x s32>) = G_ZEXT %trunc(<2 x s16>) + $vgpr0_vgpr1 = COPY %zext(<2 x s32>) +... + +--- +name: zext_trunc_v2s64_v2s16_v2s32 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3 + + ; GCN-LABEL: name: zext_trunc_v2s64_v2s16_v2s32 + ; GCN: liveins: $vgpr0_vgpr1_vgpr2_vgpr3 + ; GCN: %var:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; GCN: %c3FFF:_(s64) = G_CONSTANT i64 16383 + ; GCN: %c7FFF:_(s64) = G_CONSTANT i64 32767 + ; GCN: %c:_(<2 x s64>) = G_BUILD_VECTOR %c3FFF(s64), %c7FFF(s64) + ; GCN: %low_bits:_(<2 x s64>) = G_AND %var, %c + ; GCN: %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s64>) + ; GCN: %zext:_(<2 x s32>) = G_ZEXT %trunc(<2 x s16>) + ; GCN: $vgpr0_vgpr1 = COPY %zext(<2 x s32>) + %var:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %c3FFF:_(s64) = G_CONSTANT i64 16383 + %c7FFF:_(s64) = G_CONSTANT i64 32767 + %c:_(<2 x s64>) = G_BUILD_VECTOR %c3FFF(s64), %c7FFF(s64) + %low_bits:_(<2 x s64>) = G_AND %var, %c + %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s64>) + %zext:_(<2 x s32>) = G_ZEXT %trunc(<2 x s16>) + $vgpr0_vgpr1 = COPY %zext(<2 x s32>) +... + +--- +name: zext_trunc_v2s32_v2s16_v2s64 +tracksRegLiveness: true +body:| + bb.0: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: zext_trunc_v2s32_v2s16_v2s64 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; GCN: %c3FFF:_(s32) = G_CONSTANT i32 16383 + ; GCN: %c7FFF:_(s32) = G_CONSTANT i32 32767 + ; GCN: %c:_(<2 x s32>) = G_BUILD_VECTOR %c3FFF(s32), %c7FFF(s32) + ; GCN: %low_bits:_(<2 x s32>) = G_AND %var, %c + ; GCN: %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s32>) + ; GCN: %zext:_(<2 x s64>) = G_ZEXT %trunc(<2 x s16>) + ; GCN: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %zext(<2 x s64>) + %var:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %c3FFF:_(s32) = G_CONSTANT i32 16383 + %c7FFF:_(s32) = G_CONSTANT i32 32767 + %c:_(<2 x s32>) = G_BUILD_VECTOR %c3FFF(s32), %c7FFF(s32) + %low_bits:_(<2 x s32>) = G_AND %var, %c + %trunc:_(<2 x s16>) = G_TRUNC %low_bits(<2 x s32>) + %zext:_(<2 x s64>) = G_ZEXT %trunc(<2 x s16>) + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %zext(<2 x s64>) +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/shl-ext-reduce.ll @@ -404,18 +404,14 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_and_b32 s0, s0, 0xffff ; GFX8-NEXT: s_and_b32 s0, s0, 0x3fff -; GFX8-NEXT: s_bfe_u32 s1, 2, 0x100000 -; GFX8-NEXT: s_lshl_b32 s0, s0, s1 -; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX8-NEXT: s_lshl_b32 s0, s0, 2 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_shl_i32_zext_i16: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_and_b32 s0, s0, 0xffff ; GFX9-NEXT: s_and_b32 s0, s0, 0x3fff -; GFX9-NEXT: s_bfe_u32 s1, 2, 0x100000 -; GFX9-NEXT: s_lshl_b32 s0, s0, s1 -; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX9-NEXT: s_lshl_b32 s0, s0, 2 ; GFX9-NEXT: ; return to shader part epilog %and = and i16 %x, 16383 %ext = zext i16 %and to i32 @@ -468,13 +464,10 @@ ; GFX8-LABEL: s_shl_v2i32_zext_v2i16: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_movk_i32 s2, 0x3fff -; GFX8-NEXT: s_mov_b32 s4, 0xffff ; GFX8-NEXT: s_lshr_b32 s1, s0, 16 ; GFX8-NEXT: s_mov_b32 s3, s2 -; GFX8-NEXT: s_and_b32 s0, s0, s4 +; GFX8-NEXT: s_and_b32 s0, s0, 0xffff ; GFX8-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3] -; GFX8-NEXT: s_mov_b32 s5, s4 -; GFX8-NEXT: s_and_b64 s[0:1], s[0:1], s[4:5] ; GFX8-NEXT: s_lshl_b32 s0, s0, 2 ; GFX8-NEXT: s_lshl_b32 s1, s1, 2 ; GFX8-NEXT: ; return to shader part epilog