diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1449,8 +1449,8 @@ if (ShiftVal == HalfSize) { // (G_ASHR i64:x, 32) -> - // G_MERGE_VALUES lo_32(x), (G_ASHR hi_32(x), 31) - Builder.buildMerge(DstReg, { Unmerge.getReg(0), Hi }); + // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) + Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi }); } else if (ShiftVal == Size - 1) { // Don't need a second shift. // (G_ASHR i64:x, 63) -> diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-ashr-narrow.mir @@ -14,7 +14,7 @@ ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) - ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[ASHR]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV1]](s32), [[ASHR]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = G_CONSTANT i64 32 @@ -35,7 +35,7 @@ ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) - ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[ASHR]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV1]](s32), [[ASHR]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s32) = G_CONSTANT i32 32