Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1406,7 +1406,8 @@ unsigned TargetShiftSize, unsigned &ShiftVal) { assert((MI.getOpcode() == TargetOpcode::G_SHL || - MI.getOpcode() == TargetOpcode::G_LSHR) && "Expected a shift"); + MI.getOpcode() == TargetOpcode::G_LSHR || + MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift"); LLT Ty = MRI.getType(MI.getOperand(0).getReg()); if (Ty.isVector()) // TODO: @@ -1433,8 +1434,8 @@ LLT Ty = MRI.getType(SrcReg); unsigned Size = Ty.getSizeInBits(); unsigned HalfSize = Size / 2; - assert(ShiftVal >= HalfSize); + LLT HalfTy = LLT::scalar(HalfSize); Builder.setInstr(MI); @@ -1456,16 +1457,12 @@ auto Zero = Builder.buildConstant(HalfTy, 0); Builder.buildMerge(DstReg, { Narrowed, Zero }); - } else { + } else if (MI.getOpcode() == TargetOpcode::G_SHL) { Register Narrowed = Unmerge.getReg(0); // dst = G_SHL s64:x, C for C >= 32 // => // lo, hi = G_UNMERGE_VALUES x // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) - - // TODO: ashr - assert(MI.getOpcode() == TargetOpcode::G_SHL); - if (NarrowShiftAmt != 0) { Narrowed = Builder.buildShl(HalfTy, Narrowed, Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); @@ -1473,6 +1470,31 @@ auto Zero = Builder.buildConstant(HalfTy, 0); Builder.buildMerge(DstReg, { Zero, Narrowed }); + } else { + assert(MI.getOpcode() == TargetOpcode::G_ASHR); + auto Hi = Builder.buildAShr( + HalfTy, Unmerge.getReg(1), + Builder.buildConstant(HalfTy, HalfSize - 1)); + + if (ShiftVal == HalfSize) { + // (G_ASHR i64:x, 32) -> + // G_MERGE_VALUES lo_32(x), (G_ASHR hi_32(x), 31) + Builder.buildMerge(DstReg, { Unmerge.getReg(0), Hi }); + } else if (ShiftVal == Size - 1) { + // Don't need a second shift. + // (G_ASHR i64:x, 63) -> + // %narrowed = (G_ASHR hi_32(x), 31) + // G_MERGE_VALUES %narrowed, %narrowed + Builder.buildMerge(DstReg, { Hi, Hi }); + } else { + auto Lo = Builder.buildAShr( + HalfTy, Unmerge.getReg(1), + Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); + + // (G_ASHR i64:x, C) ->, for C >= 32 + // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) + Builder.buildMerge(DstReg, { Lo, Hi }); + } } MI.eraseFromParent(); Index: llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUPreLegalizerCombiner.cpp @@ -167,6 +167,7 @@ switch (MI.getOpcode()) { case TargetOpcode::G_SHL: case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: // On some subtargets, 64-bit shift is a quarter rate instruction. In the // common case, splitting this into a move and a 32-bit shift is faster and // the same code size. Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir @@ -0,0 +1,204 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: narrow_ashr_s64_32_s64amt +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_32_s64amt + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[ASHR]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_CONSTANT i64 32 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_32 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[ASHR]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 32 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_33 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_33 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C1]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR1]](s32), [[ASHR]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 33 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_31 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_31 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ASHR]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 31 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_63 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_63 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR]](s32), [[ASHR]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 63 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_64 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64 + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ASHR]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 64 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s64_65 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_s64_65 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65 + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ASHR]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 65 + %2:_(s64) = G_ASHR %0, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: narrow_ashr_s32_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: narrow_ashr_s32_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0 = COPY [[ASHR]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 16 + %2:_(s32) = G_ASHR %0, %1 + $vgpr0 = COPY %2 +... + +--- +name: narrow_ashr_s32_17 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: narrow_ashr_s32_17 + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 17 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32) + ; CHECK: $vgpr0 = COPY [[ASHR]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 17 + %2:_(s32) = G_ASHR %0, %1 + $vgpr0 = COPY %2 +... + +--- +name: narrow_ashr_v2s32_17 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: narrow_ashr_v2s32_17 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 17 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s32>) = G_ASHR [[COPY]], [[BUILD_VECTOR]](<2 x s32>) + ; CHECK: $vgpr0_vgpr1 = COPY [[ASHR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 17 + %2:_(<2 x s32>) = G_BUILD_VECTOR %1, %1 + %3:_(<2 x s32>) = G_ASHR %0, %2 + $vgpr0_vgpr1 = COPY %3 +...