Index: lib/CodeGen/GlobalISel/Utils.cpp =================================================================== --- lib/CodeGen/GlobalISel/Utils.cpp +++ lib/CodeGen/GlobalISel/Utils.cpp @@ -183,18 +183,51 @@ Optional llvm::getConstantVRegVal(unsigned VReg, const MachineRegisterInfo &MRI) { - MachineInstr *MI = MRI.getVRegDef(VReg); - if (MI->getOpcode() != TargetOpcode::G_CONSTANT) + SmallVector, 4> SeenOpcodes; + MachineInstr *MI; + while ((MI = MRI.getVRegDef(VReg)) && + MI->getOpcode() != TargetOpcode::G_CONSTANT) { + switch (MI->getOpcode()) { + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + SeenOpcodes.push_back(std::make_pair( + MI->getOpcode(), + MRI.getType(MI->getOperand(0).getReg()).getSizeInBits())); + VReg = MI->getOperand(1).getReg(); + break; + default: + return None; + } + } + if (!MI || (!MI->getOperand(1).isImm() && !MI->getOperand(1).isCImm())) return None; - if (MI->getOperand(1).isImm()) - return MI->getOperand(1).getImm(); + const MachineOperand &CstVal = MI->getOperand(1); + unsigned BitWidth = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits(); + APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm()) + : CstVal.getCImm()->getValue(); + assert(Val.getBitWidth() == BitWidth && + "Value bitwidth doesn't match definition type"); + while (!SeenOpcodes.empty()) { + std::pair OpcodeAndSize = SeenOpcodes.pop_back_val(); + switch (OpcodeAndSize.first) { + case TargetOpcode::G_TRUNC: + Val = Val.trunc(OpcodeAndSize.second); + break; + case TargetOpcode::G_SEXT: + Val = Val.sext(OpcodeAndSize.second); + break; + case TargetOpcode::G_ZEXT: + Val = Val.zext(OpcodeAndSize.second); + break; + } + } - if (MI->getOperand(1).isCImm() && - MI->getOperand(1).getCImm()->getBitWidth() <= 64) - return MI->getOperand(1).getCImm()->getSExtValue(); + if (Val.getBitWidth() > 64) + return None; - return None; + return Val.getSExtValue(); } const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg, Index: test/CodeGen/X86/GlobalISel/ashr-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ashr-scalar.ll +++ test/CodeGen/X86/GlobalISel/ashr-scalar.ll @@ -28,8 +28,7 @@ ; X64-LABEL: test_ashr_i64_imm1: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: sarq %cl, %rax +; X64-NEXT: sarq %rax ; X64-NEXT: retq %res = ashr i64 %arg1, 1 ret i64 %res @@ -62,8 +61,7 @@ ; X64-LABEL: test_ashr_i32_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: sarl %cl, %eax +; X64-NEXT: sarl %eax ; X64-NEXT: retq %res = ashr i32 %arg1, 1 ret i32 %res @@ -101,8 +99,7 @@ ; X64-LABEL: test_ashr_i16_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: sarw %cl, %ax +; X64-NEXT: sarw %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 Index: test/CodeGen/X86/GlobalISel/lshr-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/lshr-scalar.ll +++ test/CodeGen/X86/GlobalISel/lshr-scalar.ll @@ -28,8 +28,7 @@ ; X64-LABEL: test_lshr_i64_imm1: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: shrq %cl, %rax +; X64-NEXT: shrq %rax ; X64-NEXT: retq %res = lshr i64 %arg1, 1 ret i64 %res @@ -62,8 +61,7 @@ ; X64-LABEL: test_lshr_i32_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: shrl %cl, %eax +; X64-NEXT: shrl %eax ; X64-NEXT: retq %res = lshr i32 %arg1, 1 ret i32 %res @@ -101,8 +99,7 @@ ; X64-LABEL: test_lshr_i16_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: shrw %cl, %ax +; X64-NEXT: shrw %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 Index: test/CodeGen/X86/GlobalISel/shl-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/shl-scalar.ll +++ test/CodeGen/X86/GlobalISel/shl-scalar.ll @@ -27,9 +27,7 @@ define i64 @test_shl_i64_imm1(i64 %arg1) { ; X64-LABEL: test_shl_i64_imm1: ; X64: # %bb.0: -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: shlq %cl, %rax +; X64-NEXT: leaq (%rdi,%rdi), %rax ; X64-NEXT: retq %res = shl i64 %arg1, 1 ret i64 %res @@ -61,9 +59,8 @@ define i32 @test_shl_i32_imm1(i32 %arg1) { ; X64-LABEL: test_shl_i32_imm1: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: shll %cl, %eax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal (%rdi,%rdi), %eax ; X64-NEXT: retq %res = shl i32 %arg1, 1 ret i32 %res @@ -100,9 +97,8 @@ define i16 @test_shl_i16_imm1(i32 %arg1) { ; X64-LABEL: test_shl_i16_imm1: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: shlw %cl, %ax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal (%rdi,%rdi), %eax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16