diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -1608,12 +1608,16 @@ } break; } + case TargetOpcode::G_SHL: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: case TargetOpcode::G_ROTR: case TargetOpcode::G_ROTL: { LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg()); LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg()); if (Src1Ty.isVector() != Src2Ty.isVector()) { - report("Rotate requires operands to be either all scalars or all vectors", + report("Shifts and rotates require operands to be either all scalars or " + "all vectors", MI); break; } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-urem-pow-2.mir @@ -176,15 +176,15 @@ ; GCN: liveins: $vgpr0, $vgpr1 ; GCN-NEXT: {{ $}} ; GCN-NEXT: %var:_(<2 x s16>) = COPY $vgpr0 - ; GCN-NEXT: %shift_amt:_(<2 x s16>) = COPY $vgpr1 + ; GCN-NEXT: %shift_amt:_(s32) = COPY $vgpr1 ; GCN-NEXT: %two:_(s32) = G_CONSTANT i32 2 ; GCN-NEXT: %four:_(s32) = G_CONSTANT i32 4 - ; GCN-NEXT: %shift:_(s32) = G_SHL %two, %shift_amt(<2 x s16>) + ; GCN-NEXT: %shift:_(s32) = G_SHL %two, %shift_amt(s32) ; GCN-NEXT: %four_vec:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC %four(s32), %shift(s32) ; GCN-NEXT: %rem:_(<2 x s16>) = G_UREM %var, %four_vec ; GCN-NEXT: $vgpr0 = COPY %rem(<2 x s16>) %var:_(<2 x s16>) = COPY $vgpr0 - %shift_amt:_(<2 x s16>) = COPY $vgpr1 + %shift_amt:_(s32) = COPY $vgpr1 %two:_(s32) = G_CONSTANT i32 2 %four:_(s32) = G_CONSTANT i32 4 %shift:_(s32) = G_SHL %two, %shift_amt diff --git a/llvm/test/MachineVerifier/test_g_rotr_rotl.mir b/llvm/test/MachineVerifier/test_g_rotr_rotl.mir --- a/llvm/test/MachineVerifier/test_g_rotr_rotl.mir +++ b/llvm/test/MachineVerifier/test_g_rotr_rotl.mir @@ -7,7 +7,7 @@ %src:_(<2 x s64>) = G_IMPLICIT_DEF %amt:_(s64) = G_IMPLICIT_DEF - ; CHECK: Rotate requires operands to be either all scalars or all vectors + ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors %rotr:_(<2 x s64>) = G_ROTR %src, %amt ... diff --git a/llvm/test/MachineVerifier/test_g_shift.mir b/llvm/test/MachineVerifier/test_g_shift.mir new file mode 100644 --- /dev/null +++ b/llvm/test/MachineVerifier/test_g_shift.mir @@ -0,0 +1,21 @@ +# RUN: not --crash llc -march=arm64 -verify-machineinstrs -run-pass none -o /dev/null %s 2>&1 | FileCheck %s +# REQUIRES: aarch64-registered-target + +--- +name: test_shift +body: | + bb.0: + %s32:_(s32) = G_IMPLICIT_DEF + %v2s32:_(<2 x s32>) = G_IMPLICIT_DEF + %s64:_(s64) = G_IMPLICIT_DEF + %v2s64:_(<2 x s64>) = G_IMPLICIT_DEF + + ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors + %shl:_(<2 x s64>) = G_SHL %v2s64, %s64 + + ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors + %lshr:_(s32) = G_LSHR %s32, %v2s32 + + ; CHECK: Shifts and rotates require operands to be either all scalars or all vectors + %ashr:_(<2 x s32>) = G_ASHR %v2s32, %s64 +... diff --git a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp --- a/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp +++ b/llvm/unittests/CodeGen/GlobalISel/KnownBitsVectorTest.cpp @@ -201,10 +201,11 @@ %10:_(s8) = G_CONSTANT i8 5 %11:_(<2 x s8>) = G_BUILD_VECTOR %10:_(s8), %10:_(s8) %12:_(s8) = G_CONSTANT i8 1 + %16:_(<2 x s8>) = G_BUILD_VECTOR %12:_(s8), %12:_(s8) bb.12: %13:_(<2 x s8>) = PHI %11(<2 x s8>), %bb.10, %14(<2 x s8>), %bb.12 - %14:_(<2 x s8>) = G_LSHR %13, %12 + %14:_(<2 x s8>) = G_LSHR %13, %16 %15:_(<2 x s8>) = COPY %14 G_BR %bb.12 )";