diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fsh.mir @@ -131,6 +131,38 @@ $vgpr2 = COPY %or ... +--- +name: fsh_v2i32_const +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5 + + ; CHECK-LABEL: name: fsh_v2i32_const + ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %a:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: %b:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK-NEXT: %scalar_amt0:_(s32) = G_CONSTANT i32 20 + ; CHECK-NEXT: %amt0:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt0(s32), %scalar_amt0(s32) + ; CHECK-NEXT: %scalar_amt1:_(s32) = G_CONSTANT i32 12 + ; CHECK-NEXT: %amt1:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt1(s32), %scalar_amt1(s32) + ; CHECK-NEXT: %shl:_(<2 x s32>) = G_SHL %a, %amt0(<2 x s32>) + ; CHECK-NEXT: %lshr:_(<2 x s32>) = G_LSHR %b, %amt1(<2 x s32>) + ; CHECK-NEXT: %or:_(<2 x s32>) = G_OR %shl, %lshr + ; CHECK-NEXT: $vgpr4_vgpr5 = COPY %or(<2 x s32>) + %a:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %b:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %scalar_amt0:_(s32) = G_CONSTANT i32 20 + %amt0:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt0, %scalar_amt0 + %scalar_amt1:_(s32) = G_CONSTANT i32 12 + %amt1:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt1, %scalar_amt1 + %shl:_(<2 x s32>) = G_SHL %a, %amt0 + %lshr:_(<2 x s32>) = G_LSHR %b, %amt1 + %or:_(<2 x s32>) = G_OR %shl, %lshr + $vgpr4_vgpr5 = COPY %or +... + --- name: fsh_i32_bad_const tracksRegLiveness: true diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-rot.mir @@ -121,6 +121,36 @@ $vgpr1 = COPY %or ... +--- +name: rot_v2i32_const +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; CHECK-LABEL: name: rot_v2i32_const + ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: %a:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: %scalar_amt0:_(s32) = G_CONSTANT i32 20 + ; CHECK-NEXT: %amt0:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt0(s32), %scalar_amt0(s32) + ; CHECK-NEXT: %scalar_amt1:_(s32) = G_CONSTANT i32 12 + ; CHECK-NEXT: %amt1:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt1(s32), %scalar_amt1(s32) + ; CHECK-NEXT: %shl:_(<2 x s32>) = G_SHL %a, %amt0(<2 x s32>) + ; CHECK-NEXT: %lshr:_(<2 x s32>) = G_LSHR %a, %amt1(<2 x s32>) + ; CHECK-NEXT: %or:_(<2 x s32>) = G_OR %shl, %lshr + ; CHECK-NEXT: $vgpr2_vgpr3 = COPY %or(<2 x s32>) + %a:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %scalar_amt0:_(s32) = G_CONSTANT i32 20 + %amt0:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt0, %scalar_amt0 + %scalar_amt1:_(s32) = G_CONSTANT i32 12 + %amt1:_(<2 x s32>) = G_BUILD_VECTOR %scalar_amt1, %scalar_amt1 + %shl:_(<2 x s32>) = G_SHL %a, %amt0 + %lshr:_(<2 x s32>) = G_LSHR %a, %amt1 + %or:_(<2 x s32>) = G_OR %shl, %lshr + $vgpr2_vgpr3 = COPY %or +... + --- name: rot_i32_bad_const tracksRegLiveness: true