Index: lib/Target/X86/X86InstructionSelector.cpp =================================================================== --- lib/Target/X86/X86InstructionSelector.cpp +++ lib/Target/X86/X86InstructionSelector.cpp @@ -112,6 +112,8 @@ bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const; bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const; + bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI, + MachineFunction &MF) const; // emit insert subreg instruction and insert it before MachineInstr &I bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I, @@ -373,6 +375,10 @@ case TargetOpcode::G_IMPLICIT_DEF: case TargetOpcode::G_PHI: return selectImplicitDefOrPHI(I, MRI); + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + return selectShift(I, MRI, MF); } return false; @@ -1396,6 +1402,85 @@ return true; } +// Currently GlobalIsel TableGen generates patterns for shift imm and shift 1, +// but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments +// has the same type, so for now only shift i8 can use auto generated +// TableGen patterns. +bool X86InstructionSelector::selectShift(MachineInstr &I, + MachineRegisterInfo &MRI, + MachineFunction &MF) const { + + assert((I.getOpcode() == TargetOpcode::G_SHL || + I.getOpcode() == TargetOpcode::G_ASHR || + I.getOpcode() == TargetOpcode::G_LSHR) && + "unexpected instruction"); + + unsigned DstReg = I.getOperand(0).getReg(); + const LLT DstTy = MRI.getType(DstReg); + const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); + + const static struct ShiftEntry { + unsigned SizeInBits; + unsigned CReg; + unsigned OpLSHR; + unsigned OpASHR; + unsigned OpSHL; + } OpTable[] = { + {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8 + {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16 + {32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32 + {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64 + }; + + if (DstRB.getID() != X86::GPRRegBankID) + return false; + + auto ShiftEntryIt = std::find_if( + std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) { + return El.SizeInBits == DstTy.getSizeInBits(); + }); + if (ShiftEntryIt == std::end(OpTable)) + return false; + + unsigned CReg = ShiftEntryIt->CReg; + unsigned Opcode = 0; + switch (I.getOpcode()) { + case TargetOpcode::G_SHL: + Opcode = ShiftEntryIt->OpSHL; + break; + case TargetOpcode::G_ASHR: + Opcode = ShiftEntryIt->OpASHR; + break; + case TargetOpcode::G_LSHR: + Opcode = ShiftEntryIt->OpLSHR; + break; + default: + return false; + } + + unsigned Op0Reg = I.getOperand(1).getReg(); + unsigned Op1Reg = I.getOperand(2).getReg(); + + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), + ShiftEntryIt->CReg) + .addReg(Op1Reg); + + // The shift instruction uses X86::CL. If we defined a super-register + // of X86::CL, emit a subreg KILL to precisely describe what we're doing here. + if (CReg != X86::CL) + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL), + X86::CL) + .addReg(CReg, RegState::Kill); + + MachineInstr &ShiftInst = + *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg) + .addReg(Op0Reg); + + constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI); + I.eraseFromParent(); + return true; +} + InstructionSelector * llvm::createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &Subtarget, Index: lib/Target/X86/X86LegalizerInfo.cpp =================================================================== --- lib/Target/X86/X86LegalizerInfo.cpp +++ lib/Target/X86/X86LegalizerInfo.cpp @@ -130,6 +130,11 @@ .maxScalar(0, s32) .widenScalarToNextPow2(0, /*Min*/ 8); getActionDefinitionsBuilder(G_INTTOPTR).legalFor({s32, p0}); + + // Shifts + getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) + .legalFor({s8, s16, s32}) + .clampScalar(0, s8, s32); } // Control-flow @@ -209,6 +214,11 @@ // Comparison setAction({G_ICMP, 1, s64}, Legal); + // Shifts + getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) + .legalFor({s8, s16, s32, s64}) + .clampScalar(0, s8, s64); + // Merge/Unmerge setAction({G_MERGE_VALUES, s128}, Legal); setAction({G_UNMERGE_VALUES, 1, s128}, Legal); Index: lib/Target/X86/X86RegisterBankInfo.cpp =================================================================== --- lib/Target/X86/X86RegisterBankInfo.cpp +++ lib/Target/X86/X86RegisterBankInfo.cpp @@ -173,6 +173,10 @@ switch (Opc) { case TargetOpcode::G_ADD: case TargetOpcode::G_SUB: + case TargetOpcode::G_MUL: + case TargetOpcode::G_SHL: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_ASHR: return getSameOperandsMapping(MI, false); break; case TargetOpcode::G_FADD: Index: test/CodeGen/X86/GlobalISel/ashr-scalar.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/ashr-scalar.ll @@ -0,0 +1,182 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 + +define i64 @test_ashr_i64(i64 %arg1, i64 %arg2) { +; X64-LABEL: test_ashr_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rsi, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: sarq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = ashr i64 %arg1, %arg2 + ret i64 %res +} + +define i64 @test_ashr_i64_imm(i64 %arg1) { +; X64-LABEL: test_ashr_i64_imm: +; X64: # %bb.0: +; X64-NEXT: movq $5, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: sarq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = ashr i64 %arg1, 5 + ret i64 %res +} + +define i64 @test_ashr_i64_imm1(i64 %arg1) { +; X64-LABEL: test_ashr_i64_imm1: +; X64: # %bb.0: +; X64-NEXT: movq $1, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: sarq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = ashr i64 %arg1, 1 + ret i64 %res +} + +define i32 @test_ashr_i32(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_ashr_i32: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: sarl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = ashr i32 %arg1, %arg2 + ret i32 %res +} + +define i32 @test_ashr_i32_imm(i32 %arg1) { +; X64-LABEL: test_ashr_i32_imm: +; X64: # %bb.0: +; X64-NEXT: movl $5, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: sarl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = ashr i32 %arg1, 5 + ret i32 %res +} + +define i32 @test_ashr_i32_imm1(i32 %arg1) { +; X64-LABEL: test_ashr_i32_imm1: +; X64: # %bb.0: +; X64-NEXT: movl $1, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: sarl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = ashr i32 %arg1, 1 + ret i32 %res +} + +define i16 @test_ashr_i16(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_ashr_i16: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: sarw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = ashr i16 %a, %a2 + ret i16 %res +} + +define i16 @test_ashr_i16_imm(i32 %arg1) { +; X64-LABEL: test_ashr_i16_imm: +; X64: # %bb.0: +; X64-NEXT: movw $5, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: sarw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = ashr i16 %a, 5 + ret i16 %res +} + +define i16 @test_ashr_i16_imm1(i32 %arg1) { +; X64-LABEL: test_ashr_i16_imm1: +; X64: # %bb.0: +; X64-NEXT: movw $1, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: sarw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = ashr i16 %a, 1 + ret i16 %res +} + +define i8 @test_ashr_i8(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_ashr_i8: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: sarb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = ashr i8 %a, %a2 + ret i8 %res +} + +define i8 @test_ashr_i8_imm(i32 %arg1) { +; X64-LABEL: test_ashr_i8_imm: +; X64: # %bb.0: +; X64-NEXT: sarb $5, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = ashr i8 %a, 5 + ret i8 %res +} + +define i8 @test_ashr_i8_imm1(i32 %arg1) { +; X64-LABEL: test_ashr_i8_imm1: +; X64: # %bb.0: +; X64-NEXT: sarb %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = ashr i8 %a, 1 + ret i8 %res +} + +define i1 @test_ashr_i1(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_ashr_i1: +; X64: # %bb.0: +; X64-NEXT: shlb $7, %dil +; X64-NEXT: sarb $7, %dil +; X64-NEXT: shlb $7, %sil +; X64-NEXT: sarb $7, %sil +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: sarb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %a2 = trunc i32 %arg2 to i1 + %res = ashr i1 %a, %a2 + ret i1 %res +} + +define i1 @test_ashr_i1_imm1(i32 %arg1) { +; X64-LABEL: test_ashr_i1_imm1: +; X64: # %bb.0: +; X64-NEXT: movb $-1, %cl +; X64-NEXT: shlb $7, %dil +; X64-NEXT: sarb $7, %dil +; X64-NEXT: shlb $7, %cl +; X64-NEXT: sarb $7, %cl +; X64-NEXT: sarb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %res = ashr i1 %a, 1 + ret i1 %res +} Index: test/CodeGen/X86/GlobalISel/ext-x86-64.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext-x86-64.ll +++ test/CodeGen/X86/GlobalISel/ext-x86-64.ll @@ -18,7 +18,14 @@ define i64 @test_sext_i8(i8 %val) { ; X64-LABEL: test_sext_i8: ; X64: # %bb.0: -; X64-NEXT: movsbq %dil, %rax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: movq $56, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shlq %cl, %rdi +; X64-NEXT: movq $56, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: sarq %cl, %rdi +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: retq %r = sext i8 %val to i64 ret i64 %r @@ -27,7 +34,14 @@ define i64 @test_sext_i16(i16 %val) { ; X64-LABEL: test_sext_i16: ; X64: # %bb.0: -; X64-NEXT: movswq %di, %rax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: movq $48, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shlq %cl, %rdi +; X64-NEXT: movq $48, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: sarq %cl, %rdi +; X64-NEXT: movq %rdi, %rax ; X64-NEXT: retq %r = sext i16 %val to i64 ret i64 %r Index: test/CodeGen/X86/GlobalISel/ext.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext.ll +++ test/CodeGen/X86/GlobalISel/ext.ll @@ -86,7 +86,13 @@ define i32 @test_sext_i8(i8 %val) { ; X64-LABEL: test_sext_i8: ; X64: # %bb.0: -; X64-NEXT: movsbl %dil, %eax +; X64-NEXT: movl $24, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shll %cl, %edi +; X64-NEXT: movl $24, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: sarl %cl, %edi +; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: test_sext_i8: @@ -100,7 +106,13 @@ define i32 @test_sext_i16(i16 %val) { ; X64-LABEL: test_sext_i16: ; X64: # %bb.0: -; X64-NEXT: movswl %di, %eax +; X64-NEXT: movl $16, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shll %cl, %edi +; X64-NEXT: movl $16, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: sarl %cl, %edi +; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: test_sext_i16: Index: test/CodeGen/X86/GlobalISel/gep.ll =================================================================== --- test/CodeGen/X86/GlobalISel/gep.ll +++ test/CodeGen/X86/GlobalISel/gep.ll @@ -5,10 +5,16 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) { ; X64_GISEL-LABEL: test_gep_i8: ; X64_GISEL: # %bb.0: +; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi ; X64_GISEL-NEXT: movq $4, %rax -; X64_GISEL-NEXT: movsbq %sil, %rcx -; X64_GISEL-NEXT: imulq %rax, %rcx -; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: movq $56, %rcx +; X64_GISEL-NEXT: # kill: def $cl killed $rcx +; X64_GISEL-NEXT: shlq %cl, %rsi +; X64_GISEL-NEXT: movq $56, %rcx +; X64_GISEL-NEXT: # kill: def $cl killed $rcx +; X64_GISEL-NEXT: sarq %cl, %rsi +; X64_GISEL-NEXT: imulq %rax, %rsi +; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i8: @@ -39,10 +45,16 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) { ; X64_GISEL-LABEL: test_gep_i16: ; X64_GISEL: # %bb.0: +; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi ; X64_GISEL-NEXT: movq $4, %rax -; X64_GISEL-NEXT: movswq %si, %rcx -; X64_GISEL-NEXT: imulq %rax, %rcx -; X64_GISEL-NEXT: leaq (%rdi,%rcx), %rax +; X64_GISEL-NEXT: movq $48, %rcx +; X64_GISEL-NEXT: # kill: def $cl killed $rcx +; X64_GISEL-NEXT: shlq %cl, %rsi +; X64_GISEL-NEXT: movq $48, %rcx +; X64_GISEL-NEXT: # kill: def $cl killed $rcx +; X64_GISEL-NEXT: sarq %cl, %rsi +; X64_GISEL-NEXT: imulq %rax, %rsi +; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i16: Index: test/CodeGen/X86/GlobalISel/legalize-ashr-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/legalize-ashr-scalar.mir @@ -0,0 +1,84 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s +--- | + + define void @test_ashr() { ret void } + define void @test_ashr_i1() { ret void } +... +--- +name: test_ashr +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: _, preferred-register: '' } + - { id: 10, class: _, preferred-register: '' } + - { id: 11, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_ashr + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_ASHR %0, %1 + + %3(s32) = G_TRUNC %0 + %4(s32) = G_TRUNC %1 + %5(s32) = G_ASHR %3, %4 + + %6(s16) = G_TRUNC %0 + %7(s16) = G_TRUNC %1 + %8(s16) = G_ASHR %6, %7 + + %9(s8) = G_TRUNC %0 + %10(s8) = G_TRUNC %1 + %11(s8) = G_ASHR %9, %10 + + RET 0 + +... +--- +name: test_ashr_i1 +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_ashr_i1 + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s1) = G_TRUNC %0 + %3(s1) = G_TRUNC %1 + %4(s1) = G_ASHR %2, %3 + + RET 0 + +... Index: test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir +++ test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir @@ -77,9 +77,11 @@ ; CHECK-LABEL: name: test_sext_i1 ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil - ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s8) - ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s1) - ; CHECK: $rax = COPY [[SEXT]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) + ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]] + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]] + ; CHECK: $rax = COPY [[ASHR]](s64) ; CHECK: RET 0, implicit $rax %0(s8) = COPY $dil %1(s1) = G_TRUNC %0(s8) Index: test/CodeGen/X86/GlobalISel/legalize-lshr-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/legalize-lshr-scalar.mir @@ -0,0 +1,84 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s +--- | + + define void @test_lshr() { ret void } + define void @test_lshr_i1() { ret void } +... +--- +name: test_lshr +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: _, preferred-register: '' } + - { id: 10, class: _, preferred-register: '' } + - { id: 11, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_lshr + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_LSHR %0, %1 + + %3(s32) = G_TRUNC %0 + %4(s32) = G_TRUNC %1 + %5(s32) = G_LSHR %3, %4 + + %6(s16) = G_TRUNC %0 + %7(s16) = G_TRUNC %1 + %8(s16) = G_LSHR %6, %7 + + %9(s8) = G_TRUNC %0 + %10(s8) = G_TRUNC %1 + %11(s8) = G_LSHR %9, %10 + + RET 0 + +... +--- +name: test_lshr_i1 +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_lshr_i1 + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s1) = G_TRUNC %0 + %3(s1) = G_TRUNC %1 + %4(s1) = G_LSHR %2, %3 + + RET 0 + +... Index: test/CodeGen/X86/GlobalISel/legalize-shl-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/legalize-shl-scalar.mir @@ -0,0 +1,84 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s +--- | + + define void @test_shl() { ret void } + define void @test_shl_i1() { ret void } +... +--- +name: test_shl +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: _, preferred-register: '' } + - { id: 10, class: _, preferred-register: '' } + - { id: 11, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_shl + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_SHL %0, %1 + + %3(s32) = G_TRUNC %0 + %4(s32) = G_TRUNC %1 + %5(s32) = G_SHL %3, %4 + + %6(s16) = G_TRUNC %0 + %7(s16) = G_TRUNC %1 + %8(s16) = G_SHL %6, %7 + + %9(s8) = G_TRUNC %0 + %10(s8) = G_TRUNC %1 + %11(s8) = G_SHL %9, %10 + + RET 0 + +... +--- +name: test_shl_i1 +alignment: 4 +legalized: false +regBankSelected: false +tracksRegLiveness: true +registers: + - { id: 0, class: _, preferred-register: '' } + - { id: 1, class: _, preferred-register: '' } + - { id: 2, class: _, preferred-register: '' } + - { id: 3, class: _, preferred-register: '' } + - { id: 4, class: _, preferred-register: '' } +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; CHECK-LABEL: name: test_shl_i1 + ; CHECK: liveins: $rdi, $rsi + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi + ; CHECK: RET 0 + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s1) = G_TRUNC %0 + %3(s1) = G_TRUNC %1 + %4(s1) = G_SHL %2, %3 + + RET 0 + +... Index: test/CodeGen/X86/GlobalISel/lshr-scalar.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/lshr-scalar.ll @@ -0,0 +1,178 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 + +define i64 @test_lshr_i64(i64 %arg1, i64 %arg2) { +; X64-LABEL: test_lshr_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rsi, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shrq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = lshr i64 %arg1, %arg2 + ret i64 %res +} + +define i64 @test_lshr_i64_imm(i64 %arg1) { +; X64-LABEL: test_lshr_i64_imm: +; X64: # %bb.0: +; X64-NEXT: movq $5, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shrq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = lshr i64 %arg1, 5 + ret i64 %res +} + +define i64 @test_lshr_i64_imm1(i64 %arg1) { +; X64-LABEL: test_lshr_i64_imm1: +; X64: # %bb.0: +; X64-NEXT: movq $1, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shrq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = lshr i64 %arg1, 1 + ret i64 %res +} + +define i32 @test_lshr_i32(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_lshr_i32: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shrl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = lshr i32 %arg1, %arg2 + ret i32 %res +} + +define i32 @test_lshr_i32_imm(i32 %arg1) { +; X64-LABEL: test_lshr_i32_imm: +; X64: # %bb.0: +; X64-NEXT: movl $5, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shrl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = lshr i32 %arg1, 5 + ret i32 %res +} + +define i32 @test_lshr_i32_imm1(i32 %arg1) { +; X64-LABEL: test_lshr_i32_imm1: +; X64: # %bb.0: +; X64-NEXT: movl $1, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shrl %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = lshr i32 %arg1, 1 + ret i32 %res +} + +define i16 @test_lshr_i16(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_lshr_i16: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shrw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = lshr i16 %a, %a2 + ret i16 %res +} + +define i16 @test_lshr_i16_imm(i32 %arg1) { +; X64-LABEL: test_lshr_i16_imm: +; X64: # %bb.0: +; X64-NEXT: movw $5, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shrw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = lshr i16 %a, 5 + ret i16 %res +} + +define i16 @test_lshr_i16_imm1(i32 %arg1) { +; X64-LABEL: test_lshr_i16_imm1: +; X64: # %bb.0: +; X64-NEXT: movw $1, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shrw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = lshr i16 %a, 1 + ret i16 %res +} + +define i8 @test_lshr_i8(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_lshr_i8: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shrb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = lshr i8 %a, %a2 + ret i8 %res +} + +define i8 @test_lshr_i8_imm(i32 %arg1) { +; X64-LABEL: test_lshr_i8_imm: +; X64: # %bb.0: +; X64-NEXT: shrb $5, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = lshr i8 %a, 5 + ret i8 %res +} + +define i8 @test_lshr_i8_imm1(i32 %arg1) { +; X64-LABEL: test_lshr_i8_imm1: +; X64: # %bb.0: +; X64-NEXT: shrb %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = lshr i8 %a, 1 + ret i8 %res +} + +define i1 @test_lshr_i1(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_lshr_i1: +; X64: # %bb.0: +; X64-NEXT: andb $1, %dil +; X64-NEXT: andb $1, %sil +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shrb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %a2 = trunc i32 %arg2 to i1 + %res = lshr i1 %a, %a2 + ret i1 %res +} + +define i1 @test_lshr_i1_imm1(i32 %arg1) { +; X64-LABEL: test_lshr_i1_imm1: +; X64: # %bb.0: +; X64-NEXT: movb $-1, %cl +; X64-NEXT: andb $1, %dil +; X64-NEXT: andb $1, %cl +; X64-NEXT: shrb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %res = lshr i1 %a, 1 + ret i1 %res +} Index: test/CodeGen/X86/GlobalISel/select-ashr-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/select-ashr-scalar.mir @@ -0,0 +1,497 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL +--- | + + define i64 @test_ashr_i64(i64 %arg1, i64 %arg2) { + %res = ashr i64 %arg1, %arg2 + ret i64 %res + } + + define i64 @test_ashr_i64_imm(i64 %arg1) { + %res = ashr i64 %arg1, 5 + ret i64 %res + } + + define i64 @test_ashr_i64_imm1(i64 %arg1) { + %res = ashr i64 %arg1, 1 + ret i64 %res + } + + define i32 @test_ashr_i32(i32 %arg1, i32 %arg2) { + %res = ashr i32 %arg1, %arg2 + ret i32 %res + } + + define i32 @test_ashr_i32_imm(i32 %arg1) { + %res = ashr i32 %arg1, 5 + ret i32 %res + } + + define i32 @test_ashr_i32_imm1(i32 %arg1) { + %res = ashr i32 %arg1, 1 + ret i32 %res + } + + define i16 @test_ashr_i16(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = ashr i16 %a, %a2 + ret i16 %res + } + + define i16 @test_ashr_i16_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = ashr i16 %a, 5 + ret i16 %res + } + + define i16 @test_ashr_i16_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = ashr i16 %a, 1 + ret i16 %res + } + + define i8 @test_ashr_i8(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = ashr i8 %a, %a2 + ret i8 %res + } + + define i8 @test_ashr_i8_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = ashr i8 %a, 5 + ret i8 %res + } + + define i8 @test_ashr_i8_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = ashr i8 %a, 1 + ret i8 %res + } +... +--- +name: test_ashr_i64 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; ALL-LABEL: name: test_ashr_i64 + ; ALL: liveins: $rdi, $rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: $rcx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SAR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_ASHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_ashr_i64_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_ashr_i64_imm + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SAR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 5 + %2(s64) = G_ASHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_ashr_i64_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_ashr_i64_imm1 + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SAR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 1 + %2(s64) = G_ASHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_ashr_i32 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_ashr_i32 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: $ecx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SAR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = G_ASHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_ashr_i32_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i32_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SAR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 5 + %2(s32) = G_ASHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_ashr_i32_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i32_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SAR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 1 + %2(s32) = G_ASHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_ashr_i16 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_ashr_i16 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit + ; ALL: $cx = COPY [[COPY3]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SAR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s16) = G_TRUNC %0(s32) + %3(s16) = G_TRUNC %1(s32) + %4(s16) = G_ASHR %2, %3 + $ax = COPY %4(s16) + RET 0, implicit $ax + +... +--- +name: test_ashr_i16_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i16_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SAR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 5 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_ASHR %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_ashr_i16_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i16_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SAR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 1 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_ASHR %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_ashr_i8 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_ashr_i8 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; ALL: $cl = COPY [[COPY3]] + ; ALL: [[SAR8rCL:%[0-9]+]]:gr8 = SAR8rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $al = COPY [[SAR8rCL]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s8) = G_TRUNC %0(s32) + %3(s8) = G_TRUNC %1(s32) + %4(s8) = G_ASHR %2, %3 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: test_ashr_i8_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i8_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[SAR8ri:%[0-9]+]]:gr8 = SAR8ri [[COPY1]], 5, implicit-def $eflags + ; ALL: $al = COPY [[SAR8ri]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 5 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_ASHR %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... +--- +name: test_ashr_i8_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_ashr_i8_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[SAR8r1_:%[0-9]+]]:gr8 = SAR8r1 [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[SAR8r1_]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 1 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_ASHR %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... Index: test/CodeGen/X86/GlobalISel/select-lshr-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/select-lshr-scalar.mir @@ -0,0 +1,497 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL +--- | + + define i64 @test_lshr_i64(i64 %arg1, i64 %arg2) { + %res = lshr i64 %arg1, %arg2 + ret i64 %res + } + + define i64 @test_lshr_i64_imm(i64 %arg1) { + %res = lshr i64 %arg1, 5 + ret i64 %res + } + + define i64 @test_lshr_i64_imm1(i64 %arg1) { + %res = lshr i64 %arg1, 1 + ret i64 %res + } + + define i32 @test_lshr_i32(i32 %arg1, i32 %arg2) { + %res = lshr i32 %arg1, %arg2 + ret i32 %res + } + + define i32 @test_lshr_i32_imm(i32 %arg1) { + %res = lshr i32 %arg1, 5 + ret i32 %res + } + + define i32 @test_lshr_i32_imm1(i32 %arg1) { + %res = lshr i32 %arg1, 1 + ret i32 %res + } + + define i16 @test_lshr_i16(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = lshr i16 %a, %a2 + ret i16 %res + } + + define i16 @test_lshr_i16_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = lshr i16 %a, 5 + ret i16 %res + } + + define i16 @test_lshr_i16_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = lshr i16 %a, 1 + ret i16 %res + } + + define i8 @test_lshr_i8(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = lshr i8 %a, %a2 + ret i8 %res + } + + define i8 @test_lshr_i8_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = lshr i8 %a, 5 + ret i8 %res + } + + define i8 @test_lshr_i8_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = lshr i8 %a, 1 + ret i8 %res + } +... +--- +name: test_lshr_i64 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; ALL-LABEL: name: test_lshr_i64 + ; ALL: liveins: $rdi, $rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: $rcx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_LSHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_lshr_i64_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_lshr_i64_imm + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 5 + %2(s64) = G_LSHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_lshr_i64_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_lshr_i64_imm1 + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHR64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 1 + %2(s64) = G_LSHR %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_lshr_i32 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_lshr_i32 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: $ecx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = G_LSHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_lshr_i32_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i32_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 5 + %2(s32) = G_LSHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_lshr_i32_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i32_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHR32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 1 + %2(s32) = G_LSHR %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_lshr_i16 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_lshr_i16 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit + ; ALL: $cx = COPY [[COPY3]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s16) = G_TRUNC %0(s32) + %3(s16) = G_TRUNC %1(s32) + %4(s16) = G_LSHR %2, %3 + $ax = COPY %4(s16) + RET 0, implicit $ax + +... +--- +name: test_lshr_i16_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i16_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 5 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_LSHR %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_lshr_i16_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i16_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHR16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 1 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_LSHR %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_lshr_i8 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_lshr_i8 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; ALL: $cl = COPY [[COPY3]] + ; ALL: [[SHR8rCL:%[0-9]+]]:gr8 = SHR8rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $al = COPY [[SHR8rCL]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s8) = G_TRUNC %0(s32) + %3(s8) = G_TRUNC %1(s32) + %4(s8) = G_LSHR %2, %3 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: test_lshr_i8_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i8_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[SHR8ri:%[0-9]+]]:gr8 = SHR8ri [[COPY1]], 5, implicit-def $eflags + ; ALL: $al = COPY [[SHR8ri]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 5 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_LSHR %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... +--- +name: test_lshr_i8_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_lshr_i8_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[SHR8r1_:%[0-9]+]]:gr8 = SHR8r1 [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[SHR8r1_]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 1 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_LSHR %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... Index: test/CodeGen/X86/GlobalISel/select-shl-scalar.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/select-shl-scalar.mir @@ -0,0 +1,498 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL +--- | + + define i64 @test_shl_i64(i64 %arg1, i64 %arg2) { + %res = shl i64 %arg1, %arg2 + ret i64 %res + } + + define i64 @test_shl_i64_imm(i64 %arg1) { + %res = shl i64 %arg1, 5 + ret i64 %res + } + + define i64 @test_shl_i64_imm1(i64 %arg1) { + %res = shl i64 %arg1, 1 + ret i64 %res + } + + define i32 @test_shl_i32(i32 %arg1, i32 %arg2) { + %res = shl i32 %arg1, %arg2 + ret i32 %res + } + + define i32 @test_shl_i32_imm(i32 %arg1) { + %res = shl i32 %arg1, 5 + ret i32 %res + } + + define i32 @test_shl_i32_imm1(i32 %arg1) { + %res = shl i32 %arg1, 1 + ret i32 %res + } + + define i16 @test_shl_i16(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = shl i16 %a, %a2 + ret i16 %res + } + + define i16 @test_shl_i16_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = shl i16 %a, 5 + ret i16 %res + } + + define i16 @test_shl_i16_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i16 + %res = shl i16 %a, 1 + ret i16 %res + } + + define i8 @test_shl_i8(i32 %arg1, i32 %arg2) { + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = shl i8 %a, %a2 + ret i8 %res + } + + define i8 @test_shl_i8_imm(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = shl i8 %a, 5 + ret i8 %res + } + + define i8 @test_shl_i8_imm1(i32 %arg1) { + %a = trunc i32 %arg1 to i8 + %res = shl i8 %a, 1 + ret i8 %res + } + +... +--- +name: test_shl_i64 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi, $rsi + + ; ALL-LABEL: name: test_shl_i64 + ; ALL: liveins: $rdi, $rsi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi + ; ALL: $rcx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHL64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = COPY $rsi + %2(s64) = G_SHL %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_shl_i64_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_shl_i64_imm + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHL64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 5 + %2(s64) = G_SHL %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_shl_i64_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $rdi + + ; ALL-LABEL: name: test_shl_i64_imm1 + ; ALL: liveins: $rdi + ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi + ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1 + ; ALL: $rcx = COPY [[MOV64ri32_]] + ; ALL: $cl = KILL killed $rcx + ; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $rax = COPY [[SHL64rCL]] + ; ALL: RET 0, implicit $rax + %0(s64) = COPY $rdi + %1(s64) = G_CONSTANT i64 1 + %2(s64) = G_SHL %0, %1 + $rax = COPY %2(s64) + RET 0, implicit $rax + +... +--- +name: test_shl_i32 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_shl_i32 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: $ecx = COPY [[COPY1]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHL32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s32) = G_SHL %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_shl_i32_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i32_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHL32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 5 + %2(s32) = G_SHL %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_shl_i32_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i32_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1 + ; ALL: $ecx = COPY [[MOV32ri]] + ; ALL: $cl = KILL killed $ecx + ; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl + ; ALL: $eax = COPY [[SHL32rCL]] + ; ALL: RET 0, implicit $eax + %0(s32) = COPY $edi + %1(s32) = G_CONSTANT i32 1 + %2(s32) = G_SHL %0, %1 + $eax = COPY %2(s32) + RET 0, implicit $eax + +... +--- +name: test_shl_i16 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_shl_i16 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit + ; ALL: $cx = COPY [[COPY3]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHL16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s16) = G_TRUNC %0(s32) + %3(s16) = G_TRUNC %1(s32) + %4(s16) = G_SHL %2, %3 + $ax = COPY %4(s16) + RET 0, implicit $ax + +... +--- +name: test_shl_i16_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i16_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHL16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 5 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_SHL %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_shl_i16_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i16_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1 + ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit + ; ALL: $cx = COPY [[MOV16ri]] + ; ALL: $cl = KILL killed $cx + ; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY1]], implicit-def $eflags, implicit $cl + ; ALL: $ax = COPY [[SHL16rCL]] + ; ALL: RET 0, implicit $ax + %0(s32) = COPY $edi + %2(s16) = G_CONSTANT i16 1 + %1(s16) = G_TRUNC %0(s32) + %3(s16) = G_SHL %1, %2 + $ax = COPY %3(s16) + RET 0, implicit $ax + +... +--- +name: test_shl_i8 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi, $esi + + ; ALL-LABEL: name: test_shl_i8 + ; ALL: liveins: $edi, $esi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi + ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit + ; ALL: $cl = COPY [[COPY3]] + ; ALL: [[SHL8rCL:%[0-9]+]]:gr8 = SHL8rCL [[COPY2]], implicit-def $eflags, implicit $cl + ; ALL: $al = COPY [[SHL8rCL]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %1(s32) = COPY $esi + %2(s8) = G_TRUNC %0(s32) + %3(s8) = G_TRUNC %1(s32) + %4(s8) = G_SHL %2, %3 + $al = COPY %4(s8) + RET 0, implicit $al + +... +--- +name: test_shl_i8_imm +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i8_imm + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[SHL8ri:%[0-9]+]]:gr8 = SHL8ri [[COPY1]], 5, implicit-def $eflags + ; ALL: $al = COPY [[SHL8ri]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 5 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_SHL %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... +--- +name: test_shl_i8_imm1 +alignment: 4 +legalized: true +regBankSelected: true +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } +liveins: +fixedStack: +stack: +constants: +body: | + bb.1 (%ir-block.0): + liveins: $edi + + ; ALL-LABEL: name: test_shl_i8_imm1 + ; ALL: liveins: $edi + ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi + ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit + ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY1]], [[COPY1]], implicit-def $eflags + ; ALL: $al = COPY [[ADD8rr]] + ; ALL: RET 0, implicit $al + %0(s32) = COPY $edi + %2(s8) = G_CONSTANT i8 1 + %1(s8) = G_TRUNC %0(s32) + %3(s8) = G_SHL %1, %2 + $al = COPY %3(s8) + RET 0, implicit $al + +... Index: test/CodeGen/X86/GlobalISel/shl-scalar.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/shl-scalar.ll @@ -0,0 +1,174 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64 + +define i64 @test_shl_i64(i64 %arg1, i64 %arg2) { +; X64-LABEL: test_shl_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rsi, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shlq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = shl i64 %arg1, %arg2 + ret i64 %res +} + +define i64 @test_shl_i64_imm(i64 %arg1) { +; X64-LABEL: test_shl_i64_imm: +; X64: # %bb.0: +; X64-NEXT: movq $5, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shlq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = shl i64 %arg1, 5 + ret i64 %res +} + +define i64 @test_shl_i64_imm1(i64 %arg1) { +; X64-LABEL: test_shl_i64_imm1: +; X64: # %bb.0: +; X64-NEXT: movq $1, %rcx +; X64-NEXT: # kill: def $cl killed $rcx +; X64-NEXT: shlq %cl, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %res = shl i64 %arg1, 1 + ret i64 %res +} + +define i32 @test_shl_i32(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_shl_i32: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shll %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = shl i32 %arg1, %arg2 + ret i32 %res +} + +define i32 @test_shl_i32_imm(i32 %arg1) { +; X64-LABEL: test_shl_i32_imm: +; X64: # %bb.0: +; X64-NEXT: movl $5, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shll %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = shl i32 %arg1, 5 + ret i32 %res +} + +define i32 @test_shl_i32_imm1(i32 %arg1) { +; X64-LABEL: test_shl_i32_imm1: +; X64: # %bb.0: +; X64-NEXT: movl $1, %ecx +; X64-NEXT: # kill: def $cl killed $ecx +; X64-NEXT: shll %cl, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %res = shl i32 %arg1, 1 + ret i32 %res +} + +define i16 @test_shl_i16(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_shl_i16: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shlw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %a2 = trunc i32 %arg2 to i16 + %res = shl i16 %a, %a2 + ret i16 %res +} + +define i16 @test_shl_i16_imm(i32 %arg1) { +; X64-LABEL: test_shl_i16_imm: +; X64: # %bb.0: +; X64-NEXT: movw $5, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shlw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = shl i16 %a, 5 + ret i16 %res +} + +define i16 @test_shl_i16_imm1(i32 %arg1) { +; X64-LABEL: test_shl_i16_imm1: +; X64: # %bb.0: +; X64-NEXT: movw $1, %cx +; X64-NEXT: # kill: def $cl killed $cx +; X64-NEXT: shlw %cl, %di +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i16 + %res = shl i16 %a, 1 + ret i16 %res +} + +define i8 @test_shl_i8(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_shl_i8: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shlb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %a2 = trunc i32 %arg2 to i8 + %res = shl i8 %a, %a2 + ret i8 %res +} + +define i8 @test_shl_i8_imm(i32 %arg1) { +; X64-LABEL: test_shl_i8_imm: +; X64: # %bb.0: +; X64-NEXT: shlb $5, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = shl i8 %a, 5 + ret i8 %res +} + +define i8 @test_shl_i8_imm1(i32 %arg1) { +; X64-LABEL: test_shl_i8_imm1: +; X64: # %bb.0: +; X64-NEXT: addb %dil, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i8 + %res = shl i8 %a, 1 + ret i8 %res +} + +define i1 @test_shl_i1(i32 %arg1, i32 %arg2) { +; X64-LABEL: test_shl_i1: +; X64: # %bb.0: +; X64-NEXT: movl %esi, %ecx +; X64-NEXT: shlb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %a2 = trunc i32 %arg2 to i1 + %res = shl i1 %a, %a2 + ret i1 %res +} + +define i1 @test_shl_i1_imm1(i32 %arg1) { +; X64-LABEL: test_shl_i1_imm1: +; X64: # %bb.0: +; X64-NEXT: movb $-1, %cl +; X64-NEXT: shlb %cl, %dil +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %a = trunc i32 %arg1 to i1 + %res = shl i1 %a, 1 + ret i1 %res +}