diff --git a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -1006,6 +1006,64 @@ return true; } +/// Returns the element immediate value of a vector shift operand if found. +/// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR. +static Optional getVectorShiftImm(Register Reg, + MachineRegisterInfo &MRI) { + const LLT Ty = MRI.getType(Reg); + assert(Ty.isVector() && "Expected a *vector* shift operand"); + MachineInstr *OpMI = MRI.getVRegDef(Reg); + assert(OpMI && "Expected to find a vreg def for vector shift operand"); + if (OpMI->getOpcode() != TargetOpcode::G_BUILD_VECTOR) + return None; + + // Check all operands are identical immediates. + int64_t ImmVal = 0; + for (unsigned Idx = 1; Idx < OpMI->getNumOperands(); ++Idx) { + auto VRegAndVal = getConstantVRegValWithLookThrough(OpMI->getOperand(Idx).getReg(), MRI); + if (!VRegAndVal) + return None; + + if (Idx == 1) + ImmVal = VRegAndVal->Value; + if (ImmVal != VRegAndVal->Value) + return None; + } + + return ImmVal; +} + +/// Matches and returns the shift immediate value for a SHL instruction given +/// a shift operand. +static Optional getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegisterInfo &MRI) { + Optional ShiftImm = getVectorShiftImm(Reg, MRI); + if (!ShiftImm) + return None; + // Check the immediate is in range for a SHL. + int64_t Imm = *ShiftImm; + if (Imm < 0) + return None; + switch (SrcTy.getElementType().getSizeInBits()) { + case 8: + if (Imm > 7) + return None; + break; + case 16: + if (Imm > 15) + return None; + break; + case 32: + if (Imm > 31) + return None; + break; + case 64: + if (Imm > 63) + return None; + break; + } + return Imm; +} + bool AArch64InstructionSelector::selectVectorSHL( MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_SHL); @@ -1017,21 +1075,29 @@ if (!Ty.isVector()) return false; + // Check if we have a vector of constants on RHS that we can select as the + // immediate form. + Optional ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI); + unsigned Opc = 0; if (Ty == LLT::vector(2, 64)) { - Opc = AArch64::USHLv2i64; + Opc = ImmVal ? AArch64::SHLv2i64_shift : AArch64::USHLv2i64; } else if (Ty == LLT::vector(4, 32)) { - Opc = AArch64::USHLv4i32; + Opc = ImmVal ? AArch64::SHLv4i32_shift : AArch64::USHLv4i32; } else if (Ty == LLT::vector(2, 32)) { - Opc = AArch64::USHLv2i32; + Opc = ImmVal ? AArch64::SHLv2i32_shift : AArch64::USHLv2i32; } else { LLVM_DEBUG(dbgs() << "Unhandled G_SHL type"); return false; } MachineIRBuilder MIB(I); - auto UShl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg, Src2Reg}); - constrainSelectedInstRegOperands(*UShl, TII, TRI, RBI); + auto Shl = MIB.buildInstr(Opc, {DstReg}, {Src1Reg}); + if (ImmVal) + Shl.addImm(*ImmVal); + else + Shl.addUse(Src2Reg); + constrainSelectedInstRegOperands(*Shl, TII, TRI, RBI); I.eraseFromParent(); return true; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-icmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-icmp.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-icmp.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-icmp.mir @@ -2293,17 +2293,17 @@ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[MOVi32imm]], %subreg.ssub ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[MOVi32imm]] ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub - ; CHECK: [[USHLv2i32_:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY]], [[COPY2]] + ; CHECK: [[SHLv2i32_shift:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY]], 16 ; CHECK: [[NEGv2i32_:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY2]] - ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_]], [[NEGv2i32_]] + ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift]], [[NEGv2i32_]] ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 16 ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[MOVi32imm1]], %subreg.ssub ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG1]], 1, [[MOVi32imm1]] ; CHECK: [[COPY3:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr1]].dsub - ; CHECK: [[USHLv2i32_1:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY1]], [[COPY3]] + ; CHECK: [[SHLv2i32_shift1:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY1]], 16 ; CHECK: [[NEGv2i32_1:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY3]] - ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_1]], [[NEGv2i32_1]] + ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift1]], [[NEGv2i32_1]] ; CHECK: [[CMGTv2i32_:%[0-9]+]]:fpr64 = CMGTv2i32 [[SSHLv2i32_]], [[SSHLv2i32_1]] ; CHECK: $d0 = COPY [[CMGTv2i32_]] ; CHECK: RET_ReallyLR implicit $d0 @@ -2591,17 +2591,17 @@ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[MOVi32imm]], %subreg.ssub ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[MOVi32imm]] ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub - ; CHECK: [[USHLv2i32_:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY]], [[COPY2]] + ; CHECK: [[SHLv2i32_shift:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY]], 16 ; CHECK: [[NEGv2i32_:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY2]] - ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_]], [[NEGv2i32_]] + ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift]], [[NEGv2i32_]] ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 16 ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[MOVi32imm1]], %subreg.ssub ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG1]], 1, [[MOVi32imm1]] ; CHECK: [[COPY3:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr1]].dsub - ; CHECK: [[USHLv2i32_1:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY1]], [[COPY3]] + ; CHECK: [[SHLv2i32_shift1:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY1]], 16 ; CHECK: [[NEGv2i32_1:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY3]] - ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_1]], [[NEGv2i32_1]] + ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift1]], [[NEGv2i32_1]] ; CHECK: [[CMGEv2i32_:%[0-9]+]]:fpr64 = CMGEv2i32 [[SSHLv2i32_]], [[SSHLv2i32_1]] ; CHECK: $d0 = COPY [[CMGEv2i32_]] ; CHECK: RET_ReallyLR implicit $d0 @@ -2889,17 +2889,17 @@ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[MOVi32imm]], %subreg.ssub ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[MOVi32imm]] ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub - ; CHECK: [[USHLv2i32_:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY]], [[COPY2]] + ; CHECK: [[SHLv2i32_shift:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY]], 16 ; CHECK: [[NEGv2i32_:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY2]] - ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_]], [[NEGv2i32_]] + ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift]], [[NEGv2i32_]] ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 16 ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[MOVi32imm1]], %subreg.ssub ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG1]], 1, [[MOVi32imm1]] ; CHECK: [[COPY3:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr1]].dsub - ; CHECK: [[USHLv2i32_1:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY1]], [[COPY3]] + ; CHECK: [[SHLv2i32_shift1:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY1]], 16 ; CHECK: [[NEGv2i32_1:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY3]] - ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_1]], [[NEGv2i32_1]] + ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift1]], [[NEGv2i32_1]] ; CHECK: [[CMGTv2i32_:%[0-9]+]]:fpr64 = CMGTv2i32 [[SSHLv2i32_1]], [[SSHLv2i32_]] ; CHECK: $d0 = COPY [[CMGTv2i32_]] ; CHECK: RET_ReallyLR implicit $d0 @@ -3187,17 +3187,17 @@ ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[MOVi32imm]], %subreg.ssub ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[MOVi32imm]] ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub - ; CHECK: [[USHLv2i32_:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY]], [[COPY2]] + ; CHECK: [[SHLv2i32_shift:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY]], 16 ; CHECK: [[NEGv2i32_:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY2]] - ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_]], [[NEGv2i32_]] + ; CHECK: [[SSHLv2i32_:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift]], [[NEGv2i32_]] ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 16 ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[MOVi32imm1]], %subreg.ssub ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG1]], 1, [[MOVi32imm1]] ; CHECK: [[COPY3:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr1]].dsub - ; CHECK: [[USHLv2i32_1:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY1]], [[COPY3]] + ; CHECK: [[SHLv2i32_shift1:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY1]], 16 ; CHECK: [[NEGv2i32_1:%[0-9]+]]:fpr64 = NEGv2i32 [[COPY3]] - ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[USHLv2i32_1]], [[NEGv2i32_1]] + ; CHECK: [[SSHLv2i32_1:%[0-9]+]]:fpr64 = SSHLv2i32 [[SHLv2i32_shift1]], [[NEGv2i32_1]] ; CHECK: [[CMGEv2i32_:%[0-9]+]]:fpr64 = CMGEv2i32 [[SSHLv2i32_1]], [[SSHLv2i32_]] ; CHECK: $d0 = COPY [[CMGEv2i32_]] ; CHECK: RET_ReallyLR implicit $d0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-vector-shift.mir @@ -28,6 +28,79 @@ $d0 = COPY %2(<2 x s32>) RET_ReallyLR implicit $d0 +... +--- +name: shl_v2i32_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +liveins: + - { reg: '$d0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $d0 + + ; CHECK-LABEL: name: shl_v2i32_imm + ; CHECK: liveins: $d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[SHLv2i32_shift:%[0-9]+]]:fpr64 = SHLv2i32_shift [[COPY]], 24 + ; CHECK: $d0 = COPY [[SHLv2i32_shift]] + ; CHECK: RET_ReallyLR implicit $d0 + %0:fpr(<2 x s32>) = COPY $d0 + %2:gpr(s32) = G_CONSTANT i32 24 + %1:fpr(<2 x s32>) = G_BUILD_VECTOR %2(s32), %2(s32) + %3:fpr(<2 x s32>) = G_SHL %0, %1(<2 x s32>) + $d0 = COPY %3(<2 x s32>) + RET_ReallyLR implicit $d0 + +... +--- +name: shl_v2i32_imm_out_of_range +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +liveins: + - { reg: '$d0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $d0 + + ; CHECK-LABEL: name: shl_v2i32_imm_out_of_range + ; CHECK: liveins: $d0 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 40 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[MOVi32imm]], %subreg.ssub + ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[MOVi32imm]] + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[INSvi32gpr]].dsub + ; CHECK: [[USHLv2i32_:%[0-9]+]]:fpr64 = USHLv2i32 [[COPY]], [[COPY1]] + ; CHECK: $d0 = COPY [[USHLv2i32_]] + ; CHECK: RET_ReallyLR implicit $d0 + %0:fpr(<2 x s32>) = COPY $d0 + %2:gpr(s32) = G_CONSTANT i32 40 + %1:fpr(<2 x s32>) = G_BUILD_VECTOR %2(s32), %2(s32) + %3:fpr(<2 x s32>) = G_SHL %0, %1(<2 x s32>) + $d0 = COPY %3(<2 x s32>) + RET_ReallyLR implicit $d0 + ... --- name: shl_v4i32 @@ -57,6 +130,40 @@ $q0 = COPY %2(<4 x s32>) RET_ReallyLR implicit $q0 +... +--- +name: shl_v4i32_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +liveins: + - { reg: '$q0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $q0 + + ; CHECK-LABEL: name: shl_v4i32_imm + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[SHLv4i32_shift:%[0-9]+]]:fpr128 = SHLv4i32_shift [[COPY]], 24 + ; CHECK: $q0 = COPY [[SHLv4i32_shift]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:fpr(<4 x s32>) = COPY $q0 + %2:gpr(s32) = G_CONSTANT i32 24 + %1:fpr(<4 x s32>) = G_BUILD_VECTOR %2(s32), %2(s32), %2(s32), %2(s32) + %3:fpr(<4 x s32>) = G_SHL %0, %1(<4 x s32>) + $q0 = COPY %3(<4 x s32>) + RET_ReallyLR implicit $q0 + ... --- name: shl_v2i64 @@ -86,6 +193,79 @@ $q0 = COPY %2(<2 x s64>) RET_ReallyLR implicit $q0 +... +--- +name: shl_v2i64_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +liveins: + - { reg: '$q0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $q0 + + ; CHECK-LABEL: name: shl_v2i64_imm + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[SHLv2i64_shift:%[0-9]+]]:fpr128 = SHLv2i64_shift [[COPY]], 24 + ; CHECK: $q0 = COPY [[SHLv2i64_shift]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:fpr(<2 x s64>) = COPY $q0 + %2:gpr(s64) = G_CONSTANT i64 24 + %1:fpr(<2 x s64>) = G_BUILD_VECTOR %2(s64), %2(s64) + %3:fpr(<2 x s64>) = G_SHL %0, %1(<2 x s64>) + $q0 = COPY %3(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: shl_v2i64_imm_out_of_range +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: fpr } + - { id: 1, class: fpr } + - { id: 2, class: gpr } + - { id: 3, class: fpr } +liveins: + - { reg: '$q0' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.1: + liveins: $q0 + + ; CHECK-LABEL: name: shl_v2i64_imm_out_of_range + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 70 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[SUBREG_TO_REG]], %subreg.dsub + ; CHECK: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[SUBREG_TO_REG]] + ; CHECK: [[USHLv2i64_:%[0-9]+]]:fpr128 = USHLv2i64 [[COPY]], [[INSvi64gpr]] + ; CHECK: $q0 = COPY [[USHLv2i64_]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:fpr(<2 x s64>) = COPY $q0 + %2:gpr(s64) = G_CONSTANT i64 70 + %1:fpr(<2 x s64>) = G_BUILD_VECTOR %2(s64), %2(s64) + %3:fpr(<2 x s64>) = G_SHL %0, %1(<2 x s64>) + $q0 = COPY %3(<2 x s64>) + RET_ReallyLR implicit $q0 + ... --- name: ashr_v2i32