Index: include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -129,6 +129,30 @@ return tryFoldImplicitDef(MI, DeadInsts); } + bool tryCombineTrunc(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + if (MI.getOpcode() != TargetOpcode::G_TRUNC) + return false; + + Builder.setInstr(MI); + + // t1 = trunc (g_constant t0) -> g_constant t1 + int64_t Cst; + unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); + + if (mi_match(SrcReg, MRI, m_ICst(Cst))) { + unsigned DstReg = MI.getOperand(0).getReg(); + if (isInstUnsupported({TargetOpcode::G_CONSTANT, {MRI.getType(DstReg)}})) + return false; + + Builder.buildConstant(DstReg, Cst); + markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts); + return true; + } + + return false; + } + /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF). bool tryFoldImplicitDef(MachineInstr &MI, SmallVectorImpl &DeadInsts) { @@ -271,7 +295,10 @@ bool Changed = false; for (auto &Use : MRI.use_instructions(MI.getOperand(0).getReg())) Changed |= tryCombineInstruction(Use, DeadInsts); - return Changed; + + if (Changed) + return true; + return tryCombineTrunc(MI, DeadInsts); } } } Index: test/CodeGen/AMDGPU/GlobalISel/combine-ext-legalizer.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/combine-ext-legalizer.mir +++ test/CodeGen/AMDGPU/GlobalISel/combine-ext-legalizer.mir @@ -9,12 +9,11 @@ ; CHECK-LABEL: name: test_sext_trunc_i64_i32_i64 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) - ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY1]], [[TRUNC]](s32) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC1]](s32) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY1]], [[C]](s32) + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32 + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[ASHR]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s32) = G_TRUNC %0 Index: test/CodeGen/X86/GlobalISel/add-ext.ll =================================================================== --- test/CodeGen/X86/GlobalISel/add-ext.ll +++ test/CodeGen/X86/GlobalISel/add-ext.ll @@ -45,8 +45,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addl $-5, %edi ; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: movq $3, %rcx -; CHECK: retq +; CHECK-NEXT: shlq $3, %rax +; CHECK-NEXT: addq %rsi, %rax +; CHECK-NEXT: retq %add = add nsw i32 %i, -5 %ext = sext i32 %add to i64 @@ -159,21 +160,22 @@ define void @PR20134(i32* %a, i32 %i) { ; CHECK-LABEL: PR20134: ; CHECK: # %bb.0: -; CHECK: movq $4, %rax -; CHECK-NEXT: leal 1(%rsi), %ecx -; CHECK-NEXT: movslq %ecx, %rcx -; CHECK-NEXT: imulq %rax, %rcx -; CHECK-NEXT: leaq (%rdi,%rcx), %rcx -; CHECK-NEXT: leal 2(%rsi), %edx -; CHECK-NEXT: movslq %edx, %rdx -; CHECK-NEXT: imulq %rax, %rdx -; CHECK-NEXT: leaq (%rdi,%rdx), %rdx -; CHECK-NEXT: movl (%rdx), %edx -; CHECK-NEXT: addl (%rcx), %edx -; CHECK-NEXT: movslq %esi, %rcx -; CHECK-NEXT: imulq %rax, %rcx -; CHECK-NEXT: leaq (%rdi,%rcx), %rax -; CHECK-NEXT: movl %edx, (%rax) +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: movq $4, %rax +; CHECK-NEXT: leal 1(%rsi), %ecx +; CHECK-NEXT: movslq %ecx, %rcx +; CHECK-NEXT: imulq %rax, %rcx +; CHECK-NEXT: leaq (%rdi,%rcx), %rcx +; CHECK-NEXT: leal 2(%rsi), %edx +; CHECK-NEXT: movslq %edx, %rdx +; CHECK-NEXT: imulq %rax, %rdx +; CHECK-NEXT: leaq (%rdi,%rdx), %rdx +; CHECK-NEXT: movl (%rdx), %edx +; CHECK-NEXT: addl (%rcx), %edx +; CHECK-NEXT: movslq %esi, %rcx +; CHECK-NEXT: imulq %rax, %rcx +; CHECK-NEXT: leaq (%rdi,%rcx), %rax +; CHECK-NEXT: movl %edx, (%rax) ; CHECK-NEXT: retq %add1 = add nsw i32 %i, 1 @@ -195,19 +197,21 @@ ; The same as @PR20134 but sign extension is replaced with zero extension define void @PR20134_zext(i32* %a, i32 %i) { -; CHECK: # %bb.0: -; CHECK: movq $4, %rax -; CHECK-NEXT: leal 1(%rsi), %ecx -; CHECK-NEXT: imulq %rax, %rcx -; CHECK-NEXT: leaq (%rdi,%rcx), %rcx -; CHECK-NEXT: leal 2(%rsi), %edx -; CHECK-NEXT: imulq %rax, %rdx -; CHECK-NEXT: leaq (%rdi,%rdx), %rdx -; CHECK-NEXT: movl (%rdx), %edx -; CHECK-NEXT: addl (%rcx), %edx -; CHECK-NEXT: imulq %rax, %rsi -; CHECK-NEXT: leaq (%rdi,%rsi), %rax -; CHECK-NEXT: movl %edx, (%rax) +; CHECK-LABEL: PR20134_zext: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $esi killed $esi def $rsi +; CHECK-NEXT: movq $4, %rax +; CHECK-NEXT: leal 1(%rsi), %ecx +; CHECK-NEXT: imulq %rax, %rcx +; CHECK-NEXT: leaq (%rdi,%rcx), %rcx +; CHECK-NEXT: leal 2(%rsi), %edx +; CHECK-NEXT: imulq %rax, %rdx +; CHECK-NEXT: leaq (%rdi,%rdx), %rdx +; CHECK-NEXT: movl (%rdx), %edx +; CHECK-NEXT: addl (%rcx), %edx +; CHECK-NEXT: imulq %rax, %rsi +; CHECK-NEXT: leaq (%rdi,%rsi), %rax +; CHECK-NEXT: movl %edx, (%rax) ; CHECK-NEXT: retq %add1 = add nuw i32 %i, 1 Index: test/CodeGen/X86/GlobalISel/ashr-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ashr-scalar.ll +++ test/CodeGen/X86/GlobalISel/ashr-scalar.ll @@ -17,8 +17,7 @@ ; X64-LABEL: test_ashr_i64_imm: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $5, %rcx -; X64-NEXT: sarq %cl, %rax +; X64-NEXT: sarq $5, %rax ; X64-NEXT: retq %res = ashr i64 %arg1, 5 ret i64 %res @@ -28,8 +27,7 @@ ; X64-LABEL: test_ashr_i64_imm1: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: sarq %cl, %rax +; X64-NEXT: sarq %rax ; X64-NEXT: retq %res = ashr i64 %arg1, 1 ret i64 %res @@ -51,8 +49,7 @@ ; X64-LABEL: test_ashr_i32_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $5, %ecx -; X64-NEXT: sarl %cl, %eax +; X64-NEXT: sarl $5, %eax ; X64-NEXT: retq %res = ashr i32 %arg1, 5 ret i32 %res @@ -62,8 +59,7 @@ ; X64-LABEL: test_ashr_i32_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: sarl %cl, %eax +; X64-NEXT: sarl %eax ; X64-NEXT: retq %res = ashr i32 %arg1, 1 ret i32 %res @@ -88,8 +84,7 @@ ; X64-LABEL: test_ashr_i16_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $5, %cx -; X64-NEXT: sarw %cl, %ax +; X64-NEXT: sarw $5, %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 @@ -101,8 +96,7 @@ ; X64-LABEL: test_ashr_i16_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: sarw %cl, %ax +; X64-NEXT: sarw %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 Index: test/CodeGen/X86/GlobalISel/ext-x86-64.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext-x86-64.ll +++ test/CodeGen/X86/GlobalISel/ext-x86-64.ll @@ -18,10 +18,8 @@ ; X64-LABEL: test_sext_i8: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movq $56, %rcx -; X64-NEXT: shlq %cl, %rax -; X64-NEXT: movq $56, %rcx -; X64-NEXT: sarq %cl, %rax +; X64-NEXT: shlq $56, %rax +; X64-NEXT: sarq $56, %rax ; X64-NEXT: retq %r = sext i8 %val to i64 ret i64 %r @@ -31,10 +29,8 @@ ; X64-LABEL: test_sext_i16: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movq $48, %rcx -; X64-NEXT: shlq %cl, %rax -; X64-NEXT: movq $48, %rcx -; X64-NEXT: sarq %cl, %rax +; X64-NEXT: shlq $48, %rax +; X64-NEXT: sarq $48, %rax ; X64-NEXT: retq %r = sext i16 %val to i64 ret i64 %r Index: test/CodeGen/X86/GlobalISel/ext.ll =================================================================== --- test/CodeGen/X86/GlobalISel/ext.ll +++ test/CodeGen/X86/GlobalISel/ext.ll @@ -89,10 +89,8 @@ ; X64-LABEL: test_sext_i8: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $24, %ecx -; X64-NEXT: shll %cl, %eax -; X64-NEXT: movl $24, %ecx -; X64-NEXT: sarl %cl, %eax +; X64-NEXT: shll $24, %eax +; X64-NEXT: sarl $24, %eax ; X64-NEXT: retq ; ; X32-LABEL: test_sext_i8: @@ -107,10 +105,8 @@ ; X64-LABEL: test_sext_i16: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $16, %ecx -; X64-NEXT: shll %cl, %eax -; X64-NEXT: movl $16, %ecx -; X64-NEXT: sarl %cl, %eax +; X64-NEXT: shll $16, %eax +; X64-NEXT: sarl $16, %eax ; X64-NEXT: retq ; ; X32-LABEL: test_sext_i16: Index: test/CodeGen/X86/GlobalISel/gep.ll =================================================================== --- test/CodeGen/X86/GlobalISel/gep.ll +++ test/CodeGen/X86/GlobalISel/gep.ll @@ -7,10 +7,8 @@ ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi ; X64_GISEL-NEXT: movq $4, %rax -; X64_GISEL-NEXT: movq $56, %rcx -; X64_GISEL-NEXT: shlq %cl, %rsi -; X64_GISEL-NEXT: movq $56, %rcx -; X64_GISEL-NEXT: sarq %cl, %rsi +; X64_GISEL-NEXT: shlq $56, %rsi +; X64_GISEL-NEXT: sarq $56, %rsi ; X64_GISEL-NEXT: imulq %rax, %rsi ; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax ; X64_GISEL-NEXT: retq @@ -45,10 +43,8 @@ ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi ; X64_GISEL-NEXT: movq $4, %rax -; X64_GISEL-NEXT: movq $48, %rcx -; X64_GISEL-NEXT: shlq %cl, %rsi -; X64_GISEL-NEXT: movq $48, %rcx -; X64_GISEL-NEXT: sarq %cl, %rsi +; X64_GISEL-NEXT: shlq $48, %rsi +; X64_GISEL-NEXT: sarq $48, %rsi ; X64_GISEL-NEXT: imulq %rax, %rsi ; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax ; X64_GISEL-NEXT: retq Index: test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir +++ test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir @@ -77,12 +77,11 @@ ; CHECK-LABEL: name: test_sext_i1 ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil - ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) - ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[TRUNC]](s8) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64) - ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC1]](s8) + ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 63 + ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]](s8) + ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 63 + ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s8) ; CHECK: $rax = COPY [[ASHR]](s64) ; CHECK: RET 0, implicit $rax %0(s8) = COPY $dil Index: test/CodeGen/X86/GlobalISel/lshr-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/lshr-scalar.ll +++ test/CodeGen/X86/GlobalISel/lshr-scalar.ll @@ -17,8 +17,7 @@ ; X64-LABEL: test_lshr_i64_imm: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $5, %rcx -; X64-NEXT: shrq %cl, %rax +; X64-NEXT: shrq $5, %rax ; X64-NEXT: retq %res = lshr i64 %arg1, 5 ret i64 %res @@ -28,8 +27,7 @@ ; X64-LABEL: test_lshr_i64_imm1: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: shrq %cl, %rax +; X64-NEXT: shrq %rax ; X64-NEXT: retq %res = lshr i64 %arg1, 1 ret i64 %res @@ -51,8 +49,7 @@ ; X64-LABEL: test_lshr_i32_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $5, %ecx -; X64-NEXT: shrl %cl, %eax +; X64-NEXT: shrl $5, %eax ; X64-NEXT: retq %res = lshr i32 %arg1, 5 ret i32 %res @@ -62,8 +59,7 @@ ; X64-LABEL: test_lshr_i32_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: shrl %cl, %eax +; X64-NEXT: shrl %eax ; X64-NEXT: retq %res = lshr i32 %arg1, 1 ret i32 %res @@ -88,8 +84,7 @@ ; X64-LABEL: test_lshr_i16_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $5, %cx -; X64-NEXT: shrw %cl, %ax +; X64-NEXT: shrw $5, %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 @@ -101,8 +96,7 @@ ; X64-LABEL: test_lshr_i16_imm1: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: shrw %cl, %ax +; X64-NEXT: shrw %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 Index: test/CodeGen/X86/GlobalISel/shl-scalar.ll =================================================================== --- test/CodeGen/X86/GlobalISel/shl-scalar.ll +++ test/CodeGen/X86/GlobalISel/shl-scalar.ll @@ -17,8 +17,7 @@ ; X64-LABEL: test_shl_i64_imm: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $5, %rcx -; X64-NEXT: shlq %cl, %rax +; X64-NEXT: shlq $5, %rax ; X64-NEXT: retq %res = shl i64 %arg1, 5 ret i64 %res @@ -27,9 +26,7 @@ define i64 @test_shl_i64_imm1(i64 %arg1) { ; X64-LABEL: test_shl_i64_imm1: ; X64: # %bb.0: -; X64-NEXT: movq %rdi, %rax -; X64-NEXT: movq $1, %rcx -; X64-NEXT: shlq %cl, %rax +; X64-NEXT: leaq (%rdi,%rdi), %rax ; X64-NEXT: retq %res = shl i64 %arg1, 1 ret i64 %res @@ -51,8 +48,7 @@ ; X64-LABEL: test_shl_i32_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $5, %ecx -; X64-NEXT: shll %cl, %eax +; X64-NEXT: shll $5, %eax ; X64-NEXT: retq %res = shl i32 %arg1, 5 ret i32 %res @@ -61,9 +57,8 @@ define i32 @test_shl_i32_imm1(i32 %arg1) { ; X64-LABEL: test_shl_i32_imm1: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: movl $1, %ecx -; X64-NEXT: shll %cl, %eax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal (%rdi,%rdi), %eax ; X64-NEXT: retq %res = shl i32 %arg1, 1 ret i32 %res @@ -88,8 +83,7 @@ ; X64-LABEL: test_shl_i16_imm: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $5, %cx -; X64-NEXT: shlw %cl, %ax +; X64-NEXT: shlw $5, %ax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 @@ -100,9 +94,8 @@ define i16 @test_shl_i16_imm1(i32 %arg1) { ; X64-LABEL: test_shl_i16_imm1: ; X64: # %bb.0: -; X64-NEXT: movl %edi, %eax -; X64-NEXT: movw $1, %cx -; X64-NEXT: shlw %cl, %ax +; X64-NEXT: # kill: def $edi killed $edi def $rdi +; X64-NEXT: leal (%rdi,%rdi), %eax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %a = trunc i32 %arg1 to i16 Index: test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir +++ test/CodeGen/X86/GlobalISel/x86_64-legalize-sitofp.mir @@ -88,12 +88,11 @@ ; CHECK-LABEL: name: int8_to_float ; CHECK: liveins: $edi ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8) + ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 24 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s8) + ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 24 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s8) ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s32) ; CHECK: $xmm0 = COPY [[ANYEXT]](s128) @@ -122,12 +121,11 @@ ; CHECK-LABEL: name: int16_to_float ; CHECK: liveins: $edi ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8) + ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 16 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s8) + ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 16 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s8) ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32) ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s32) ; CHECK: $xmm0 = COPY [[ANYEXT]](s128) @@ -208,12 +206,11 @@ ; CHECK-LABEL: name: int8_to_double ; CHECK: liveins: $edi ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8) + ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 24 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s8) + ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 24 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s8) ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32) ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s64) ; CHECK: $xmm0 = COPY [[ANYEXT]](s128) @@ -242,12 +239,11 @@ ; CHECK-LABEL: name: int16_to_double ; CHECK: liveins: $edi ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) - ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8) - ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8) + ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 16 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s8) + ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 16 + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s8) ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32) ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s64) ; CHECK: $xmm0 = COPY [[ANYEXT]](s128)