Index: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2926,10 +2926,27 @@ Results.push_back(Tmp1); break; case ISD::SIGN_EXTEND_INREG: { - // NOTE: we could fall back on load/store here too for targets without - // SAR. However, it is doubtful that any exist. EVT ExtraVT = cast(Node->getOperand(1))->getVT(); EVT VT = Node->getValueType(0); + + // An in-register sign-extend of a boolean is a negation: + // 'true' (1) sign-extended is -1. + // 'false' (0) sign-extended is 0. + // However, we must mask the high bits of the source operand because the + // SIGN_EXTEND_INREG does not guarantee that the high bits are already zero. + + // TODO: Do this for vectors too? + if (ExtraVT.getSizeInBits() == 1) { + SDValue One = DAG.getConstant(1, dl, VT); + SDValue And = DAG.getNode(ISD::AND, dl, VT, Node->getOperand(0), One); + SDValue Zero = DAG.getConstant(0, dl, VT); + SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, Zero, And); + Results.push_back(Neg); + break; + } + + // NOTE: we could fall back on load/store here too for targets without + // SRA. However, it is doubtful that any exist. EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); if (VT.isVector()) ShiftAmountTy = VT; Index: llvm/trunk/test/CodeGen/ARM/negate-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/negate-i1.ll +++ llvm/trunk/test/CodeGen/ARM/negate-i1.ll @@ -5,8 +5,8 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: ; CHECK-NEXT: @ BB#0: -; CHECK-NEXT: lsl r0, r0, #31 -; CHECK-NEXT: asr r0, r0, #31 +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr ; %b = sext i1 %a to i32 @@ -16,8 +16,7 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: ; CHECK-NEXT: @ BB#0: -; CHECK-NEXT: lsl r0, r0, #31 -; CHECK-NEXT: asr r0, r0, #31 +; CHECK-NEXT: rsb r0, r0, #0 ; CHECK-NEXT: mov pc, lr ; %b = sext i1 %a to i32 Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/add.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/add.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/add.ll @@ -31,21 +31,27 @@ ; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \ ; RUN: -check-prefixes=ALL,MMR6,MM64 + +; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'. +; This sequence is even better as it's a single instruction. See D25485 for the rest of +; the cases where this sequence occurs. + define signext i1 @add_i1(i1 signext %a, i1 signext %b) { entry: ; ALL-LABEL: add_i1: - ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; NOT-R2-R6: sll $[[T0]], $[[T0]], 31 - ; NOT-R2-R6: sra $2, $[[T0]], 31 - - ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; R2-R6: sll $[[T0]], $[[T0]], 31 - ; R2-R6: sra $2, $[[T0]], 31 + ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; NOT-R2-R6: andi $[[T0]], $[[T0]], 1 + ; NOT-R2-R6: negu $2, $[[T0]] + + ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; R2-R6: andi $[[T0]], $[[T0]], 1 + ; R2-R6: negu $2, $[[T0]] ; MMR6: addu16 $[[T0:[0-9]+]], $4, $5 - ; MMR6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MMR6: sra $2, $[[T1]], 31 + ; MMR6: andi16 $[[T0]], $[[T0]], 1 + ; MMR6: li16 $[[T1:[0-9]+]], 0 + ; MMR6: subu16 $[[T0]], $[[T1]], $[[T0]] %r = add i1 %a, %b ret i1 %r @@ -303,18 +309,18 @@ define signext i1 @add_i1_3(i1 signext %a) { ; ALL-LABEL: add_i1_3: - - ; ALL: sll $[[T0:[0-9]+]], $4, 31 - ; ALL: lui $[[T1:[0-9]+]], 32768 - - ; GP32: addu $[[T0]], $[[T0]], $[[T1]] - ; GP32: sra $[[T1]], $[[T0]], 31 - - ; GP64: addu $[[T0]], $[[T0]], $[[T1]] - ; GP64: sra $[[T1]], $[[T0]], 31 - - ; MMR6: addu16 $[[T0]], $[[T0]], $[[T1]] - ; MMR6: sra $[[T0]], $[[T0]], 31 + ; GP32: addiu $[[T0:[0-9]+]], $4, 1 + ; GP32: andi $[[T0]], $[[T0]], 1 + ; GP32: negu $2, $[[T0]] + + ; GP64: addiu $[[T0:[0-9]+]], $4, 1 + ; GP64: andi $[[T0]], $[[T0]], 1 + ; GP64: negu $2, $[[T0]] + + ; MMR6: addiur2 $[[T0:[0-9]+]], $4, 1 + ; MMR6: andi16 $[[T0]], $[[T0]], 1 + ; MMR6: li16 $[[T1:[0-9]+]], 0 + ; MMR6: subu16 $2, $[[T1]], $[[T0]] %r = add i1 3, %a ret i1 %r Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/mul.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/mul.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/mul.ll @@ -27,7 +27,7 @@ ; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -relocation-model=pic | \ ; RUN: FileCheck %s -check-prefixes=MM32,MM32R6 ; RUN: llc < %s -march=mips -mcpu=mips64r6 -mattr=+micromips -target-abi n64 -relocation-model=pic | \ -; RUN: FileCheck %s -check-prefix=64R6 +; RUN: FileCheck %s -check-prefix=MM64R6 define signext i1 @mul_i1(i1 signext %a, i1 signext %b) { entry: @@ -35,33 +35,39 @@ ; M2: mult $4, $5 ; M2: mflo $[[T0:[0-9]+]] - ; M2: sll $[[T0]], $[[T0]], 31 - ; M2: sra $2, $[[T0]], 31 + ; M2: andi $[[T0]], $[[T0]], 1 + ; M2: negu $2, $[[T0]] ; 32R1-R5: mul $[[T0:[0-9]+]], $4, $5 - ; 32R1-R5: sll $[[T0]], $[[T0]], 31 - ; 32R1-R5: sra $2, $[[T0]], 31 + ; 32R1-R5: andi $[[T0]], $[[T0]], 1 + ; 32R1-R5: negu $2, $[[T0]] ; 32R6: mul $[[T0:[0-9]+]], $4, $5 - ; 32R6: sll $[[T0]], $[[T0]], 31 - ; 32R6: sra $2, $[[T0]], 31 + ; 32R6: andi $[[T0]], $[[T0]], 1 + ; 32R6: negu $2, $[[T0]] ; M4: mult $4, $5 ; M4: mflo $[[T0:[0-9]+]] - ; M4: sll $[[T0]], $[[T0]], 31 - ; M4: sra $2, $[[T0]], 31 + ; M4: andi $[[T0]], $[[T0]], 1 + ; M4: negu $2, $[[T0]] ; 64R1-R5: mul $[[T0:[0-9]+]], $4, $5 - ; 64R1-R5: sll $[[T0]], $[[T0]], 31 - ; 64R1-R5: sra $2, $[[T0]], 31 + ; 64R1-R5: andi $[[T0]], $[[T0]], 1 + ; 64R1-R5: negu $2, $[[T0]] ; 64R6: mul $[[T0:[0-9]+]], $4, $5 - ; 64R6: sll $[[T0]], $[[T0]], 31 - ; 64R6: sra $2, $[[T0]], 31 + ; 64R6: andi $[[T0]], $[[T0]], 1 + ; 64R6: negu $2, $[[T0]] + + ; MM64R6: mul $[[T0:[0-9]+]], $4, $5 + ; MM64R6: andi16 $[[T0]], $[[T0]], 1 + ; MM64R6: li16 $[[T1:[0-9]+]], 0 + ; MM64R6: subu16 $2, $[[T1]], $[[T0]] ; MM32: mul $[[T0:[0-9]+]], $4, $5 - ; MM32: sll $[[T0]], $[[T0]], 31 - ; MM32: sra $2, $[[T0]], 31 + ; MM32: andi16 $[[T0]], $[[T0]], 1 + ; MM32: li16 $[[T1:[0-9]+]], 0 + ; MM32: subu16 $2, $[[T1]], $[[T0]] %r = mul i1 %a, %b ret i1 %r @@ -101,6 +107,9 @@ ; 64R6: mul $[[T0:[0-9]+]], $4, $5 ; 64R6: seb $2, $[[T0]] + ; MM64R6: mul $[[T0:[0-9]+]], $4, $5 + ; MM64R6: seb $2, $[[T0]] + ; MM32: mul $[[T0:[0-9]+]], $4, $5 ; MM32: seb $2, $[[T0]] @@ -142,6 +151,9 @@ ; 64R6: mul $[[T0:[0-9]+]], $4, $5 ; 64R6: seh $2, $[[T0]] + ; MM64R6: mul $[[T0:[0-9]+]], $4, $5 + ; MM64R6: seh $2, $[[T0]] + ; MM32: mul $[[T0:[0-9]+]], $4, $5 ; MM32: seh $2, $[[T0]] @@ -161,6 +173,7 @@ ; 64R1-R5: mul $2, $4, $5 ; 64R6: mul $2, $4, $5 + ; MM64R6: mul $2, $4, $5 ; MM32: mul $2, $4, $5 @@ -204,6 +217,7 @@ ; 64R1-R5: mflo $2 ; 64R6: dmul $2, $4, $5 + ; MM64R6: dmul $2, $4, $5 ; MM32R3: multu $[[T0:[0-9]+]], $7 ; MM32R3: mflo $[[T1:[0-9]+]] @@ -247,6 +261,13 @@ ; 64R6: daddu $2, $[[T1]], $[[T0]] ; 64R6-DAG: dmul $3, $5, $7 + ; MM64R6-DAG: dmul $[[T1:[0-9]+]], $5, $6 + ; MM64R6: dmuhu $[[T2:[0-9]+]], $5, $7 + ; MM64R6: daddu $[[T3:[0-9]+]], $[[T2]], $[[T1]] + ; MM64R6-DAG: dmul $[[T0:[0-9]+]], $4, $7 + ; MM64R6: daddu $2, $[[T1]], $[[T0]] + ; MM64R6-DAG: dmul $3, $5, $7 + ; MM32: lw $25, %call16(__multi3)($16) %r = mul i128 %a, %b Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/sdiv.ll @@ -40,26 +40,28 @@ ; NOT-R6: div $zero, $4, $5 ; NOT-R6: teq $5, $zero, 7 ; NOT-R6: mflo $[[T0:[0-9]+]] - ; FIXME: The sll/sra instructions are redundant since div is signed. - ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; NOT-R6: sra $2, $[[T1]], 31 + ; FIXME: The andi/negu instructions are redundant since div is signed. + ; NOT-R6: andi $[[T0]], $[[T0]], 1 + ; NOT-R6: negu $2, $[[T0]] ; R6: div $[[T0:[0-9]+]], $4, $5 ; R6: teq $5, $zero, 7 - ; FIXME: The sll/sra instructions are redundant since div is signed. - ; R6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; R6: sra $2, $[[T1]], 31 + ; FIXME: The andi/negu instructions are redundant since div is signed. + ; R6: andi $[[T0]], $[[T0]], 1 + ; R6: negu $2, $[[T0]] ; MMR3: div $zero, $4, $5 ; MMR3: teq $5, $zero, 7 ; MMR3: mflo $[[T0:[0-9]+]] - ; MMR3: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MMR3: sra $2, $[[T1]], 31 + ; MMR3: andi16 $[[T0]], $[[T0]], 1 + ; MMR3: li16 $[[T1:[0-9]+]], 0 + ; MMR3: subu16 $2, $[[T1]], $[[T0]] ; MMR6: div $[[T0:[0-9]+]], $4, $5 ; MMR6: teq $5, $zero, 7 - ; MMR6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MMR6: sra $2, $[[T1]], 31 + ; MMR6: andi16 $[[T0]], $[[T0]], 1 + ; MMR6: li16 $[[T1:[0-9]+]], 0 + ; MMR6: subu16 $2, $[[T1]], $[[T0]] %r = sdiv i1 %a, %b ret i1 %r Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/srem.ll @@ -40,24 +40,26 @@ ; NOT-R6: div $zero, $4, $5 ; NOT-R6: teq $5, $zero, 7 ; NOT-R6: mfhi $[[T0:[0-9]+]] - ; NOT-R6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; NOT-R6: sra $2, $[[T1]], 31 + ; NOT-R6: andi $[[T0]], $[[T0]], 1 + ; NOT-R6: negu $2, $[[T0]] ; R6: mod $[[T0:[0-9]+]], $4, $5 ; R6: teq $5, $zero, 7 - ; R6: sll $[[T3:[0-9]+]], $[[T0]], 31 - ; R6: sra $2, $[[T3]], 31 + ; R6: andi $[[T0]], $[[T0]], 1 + ; R6: negu $2, $[[T0]] ; MMR3: div $zero, $4, $5 ; MMR3: teq $5, $zero, 7 ; MMR3: mfhi $[[T0:[0-9]+]] - ; MMR3: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MMR3: sra $2, $[[T1]], 31 + ; MMR3: andi16 $[[T0]], $[[T0]], 1 + ; MMR3: li16 $[[T1:[0-9]+]], 0 + ; MMR3: subu16 $2, $[[T1]], $[[T0]] ; MMR6: mod $[[T0:[0-9]+]], $4, $5 ; MMR6: teq $5, $zero, 7 - ; MMR6: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MMR6: sra $2, $[[T1]], 31 + ; MMR6: andi16 $[[T0]], $[[T0]], 1 + ; MMR6: li16 $[[T1:[0-9]+]], 0 + ; MMR6: subu16 $2, $[[T1]], $[[T0]] %r = srem i1 %a, %b ret i1 %r Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/sub.ll @@ -36,12 +36,13 @@ ; ALL-LABEL: sub_i1: ; NOT-MM: subu $[[T0:[0-9]+]], $4, $5 - ; NOT-MM: sll $[[T0]], $[[T0]], 31 - ; NOT-MM: sra $2, $[[T0]], 31 + ; NOT-MM: andi $[[T0]], $[[T0]], 1 + ; NOT-MM: negu $2, $[[T0]] ; MM: subu16 $[[T0:[0-9]+]], $4, $5 - ; MM: sll $[[T1:[0-9]+]], $[[T0]], 31 - ; MM: sra $[[T0]], $[[T1]], 31 + ; MM: andi16 $[[T0]], $[[T0]], 1 + ; MM: li16 $[[T1:[0-9]+]], 0 + ; MM: subu16 $2, $[[T1]], $[[T0]] %r = sub i1 %a, %b ret i1 %r Index: llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll +++ llvm/trunk/test/CodeGen/Mips/llvm-ir/urem.ll @@ -42,30 +42,30 @@ ; NOT-R6: divu $zero, $[[T1]], $[[T0]] ; NOT-R6: teq $[[T0]], $zero, 7 ; NOT-R6: mfhi $[[T2:[0-9]+]] - ; NOT-R6: sll $[[T3:[0-9]+]], $[[T2]], 31 - ; NOT-R6: sra $2, $[[T3]], 31 + ; NOT-R6: andi $[[T0]], $[[T0]], 1 + ; NOT-R6: negu $2, $[[T0]] ; R6: andi $[[T0:[0-9]+]], $5, 1 ; R6: andi $[[T1:[0-9]+]], $4, 1 ; R6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]] ; R6: teq $[[T0]], $zero, 7 - ; R6: sll $[[T3:[0-9]+]], $[[T2]], 31 - ; R6: sra $2, $[[T3]], 31 + ; R6: negu $2, $[[T2]] ; MMR3: andi16 $[[T0:[0-9]+]], $5, 1 ; MMR3: andi16 $[[T1:[0-9]+]], $4, 1 ; MMR3: divu $zero, $[[T1]], $[[T0]] ; MMR3: teq $[[T0]], $zero, 7 ; MMR3: mfhi $[[T2:[0-9]+]] - ; MMR3: sll $[[T3:[0-9]+]], $[[T2]], 31 - ; MMR3: sra $2, $[[T3]], 31 + ; MMR3: andi16 $[[T0]], $[[T0]], 1 + ; MMR3: li16 $[[T1:[0-9]+]], 0 + ; MMR3: subu16 $2, $[[T1]], $[[T0]] ; MMR6: andi16 $[[T0:[0-9]+]], $5, 1 ; MMR6: andi16 $[[T1:[0-9]+]], $4, 1 ; MMR6: modu $[[T2:[0-9]+]], $[[T1]], $[[T0]] ; MMR6: teq $[[T0]], $zero, 7 - ; MMR6: sll $[[T3:[0-9]+]], $[[T2]], 31 - ; MMR6: sra $2, $[[T3]], 31 + ; MMR6: li16 $[[T3:[0-9]+]], 0 + ; MMR6: subu16 $2, $[[T3]], $[[T2]] %r = urem i1 %a, %b ret i1 %r Index: llvm/trunk/test/CodeGen/Mips/select.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/select.ll +++ llvm/trunk/test/CodeGen/Mips/select.ll @@ -140,9 +140,10 @@ ; 32R2-DAG: mtc1 $6, $[[F1:f0]] ; 32R2: movn.s $[[F1]], $[[F0]], $4 +; 32R6: sltu $[[T0:[0-9]+]], $zero, $4 +; 32R6: negu $[[T0]], $[[T0]] ; 32R6-DAG: mtc1 $5, $[[F0:f[0-9]+]] ; 32R6-DAG: mtc1 $6, $[[F1:f[0-9]+]] -; 32R6: sltu $[[T0:[0-9]+]], $zero, $4 ; 32R6: mtc1 $[[T0]], $[[CC:f0]] ; 32R6: sel.s $[[CC]], $[[F1]], $[[F0]] Index: llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll +++ llvm/trunk/test/CodeGen/PowerPC/negate-i1.ll @@ -1,12 +1,12 @@ -; RUN: llc < %s -mtriple=powerpc64-apple-darwin | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s ; PR30661 - https://llvm.org/bugs/show_bug.cgi?id=30661 define i32 @select_i32_neg1_or_0(i1 %a) { ; CHECK-LABEL: select_i32_neg1_or_0: -; CHECK: ; BB#0: -; CHECK-NEXT: sldi r2, r3, 63 -; CHECK-NEXT: sradi r3, r2, 63 +; CHECK: # BB#0: +; CHECK-NEXT: clrldi 3, 3, 63 +; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr ; %b = sext i1 %a to i32 @@ -15,9 +15,8 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) { ; CHECK-LABEL: select_i32_neg1_or_0_zeroext: -; CHECK: ; BB#0: -; CHECK-NEXT: sldi r2, r3, 63 -; CHECK-NEXT: sradi r3, r2, 63 +; CHECK: # BB#0: +; CHECK-NEXT: neg 3, 3 ; CHECK-NEXT: blr ; %b = sext i1 %a to i32 Index: llvm/trunk/test/CodeGen/SystemZ/branch-07.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/branch-07.ll +++ llvm/trunk/test/CodeGen/SystemZ/branch-07.ll @@ -129,8 +129,8 @@ ; CHECK-LABEL: f9: ; CHECK: ipm [[REG:%r[0-5]]] ; CHECK: afi [[REG]], -268435456 -; CHECK: sllg [[REG2:%r[0-5]]], [[REG]], 32 -; CHECK: srag {{%r[0-5]}}, [[REG2]], 63 +; CHECK: risbg [[REG2:%r[0-5]]], [[REG]], 63, 191, 33 +; CHECK: lcgr {{%r[0-5]}}, [[REG2]] ; CHECK: br %r14 %avec = bitcast i64 %a to <2 x i32> %bvec = bitcast i64 %b to <2 x i32> @@ -145,8 +145,8 @@ ; CHECK-LABEL: f10: ; CHECK: ipm [[REG:%r[0-5]]] ; CHECK: afi [[REG]], 1879048192 -; CHECK: sllg [[REG2:%r[0-5]]], [[REG]], 32 -; CHECK: srag {{%r[0-5]}}, [[REG2]], 63 +; CHECK: risbg [[REG2:%r[0-5]]], [[REG]], 63, 191, 33 +; CHECK: lcgr {{%r[0-5]}}, [[REG2]] ; CHECK: br %r14 %avec = bitcast i64 %a to <2 x i32> %bvec = bitcast i64 %b to <2 x i32> Index: llvm/trunk/test/CodeGen/SystemZ/risbg-01.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/risbg-01.ll +++ llvm/trunk/test/CodeGen/SystemZ/risbg-01.ll @@ -472,9 +472,9 @@ ; when testing whether the shifted-in bits of the shift right were significant. define i64 @f42(i1 %x) { ; CHECK-LABEL: f42: -; CHECK: sll %r2, 31 -; CHECK: sra %r2, 31 -; CHECK: llgcr %r2, %r2 +; CHECK: nilf %r2, 1 +; CHECK: lcr %r0, %r2 +; CHECK: llgcr %r2, %r0 ; CHECK: br %r14 %ext = sext i1 %x to i8 %ext2 = zext i8 %ext to i64 Index: llvm/trunk/test/CodeGen/SystemZ/shift-10.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/shift-10.ll +++ llvm/trunk/test/CodeGen/SystemZ/shift-10.ll @@ -5,8 +5,8 @@ ; Test a shift right followed by a sign extension. This can use two shifts. define i64 @f1(i32 %a) { ; CHECK-LABEL: f1: -; CHECK: sllg [[REG:%r[0-5]]], %r2, 62 -; CHECK: srag %r2, [[REG]], 63 +; CHECK: risbg %r0, %r2, 63, 191, 63 +; CHECK: lcgr %r2, %r0 ; CHECK: br %r14 %shr = lshr i32 %a, 1 %trunc = trunc i32 %shr to i1 @@ -18,8 +18,8 @@ ; ashr/sext pair. define i64 @f2(i32 %a) { ; CHECK-LABEL: f2: -; CHECK: sllg [[REG:%r[0-5]]], %r2, 33 -; CHECK: srag %r2, [[REG]], 63 +; CHECK: risbg %r0, %r2, 63, 191, 34 +; CHECK: lcgr %r2, %r0 ; CHECK: br %r14 %shr = lshr i32 %a, 30 %trunc = trunc i32 %shr to i1 Index: llvm/trunk/test/CodeGen/X86/negate-i1.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/negate-i1.ll +++ llvm/trunk/test/CodeGen/X86/negate-i1.ll @@ -5,16 +5,16 @@ define i8 @select_i8_neg1_or_0(i1 %a) { ; X64-LABEL: select_i8_neg1_or_0: ; X64: # BB#0: -; X64-NEXT: shlb $7, %dil -; X64-NEXT: sarb $7, %dil +; X64-NEXT: andb $1, %dil +; X64-NEXT: negb %dil ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: select_i8_neg1_or_0: ; X32: # BB#0: ; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: shlb $7, %al -; X32-NEXT: sarb $7, %al +; X32-NEXT: andb $1, %al +; X32-NEXT: negb %al ; X32-NEXT: retl ; %b = sext i1 %a to i8 @@ -24,16 +24,14 @@ define i8 @select_i8_neg1_or_0_zeroext(i1 zeroext %a) { ; X64-LABEL: select_i8_neg1_or_0_zeroext: ; X64: # BB#0: -; X64-NEXT: shlb $7, %dil -; X64-NEXT: sarb $7, %dil +; X64-NEXT: negb %dil ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: select_i8_neg1_or_0_zeroext: ; X32: # BB#0: ; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: shlb $7, %al -; X32-NEXT: sarb $7, %al +; X32-NEXT: negb %al ; X32-NEXT: retl ; %b = sext i1 %a to i8 @@ -43,16 +41,16 @@ define i16 @select_i16_neg1_or_0(i1 %a) { ; X64-LABEL: select_i16_neg1_or_0: ; X64: # BB#0: -; X64-NEXT: shll $15, %edi -; X64-NEXT: sarw $15, %di +; X64-NEXT: andl $1, %edi +; X64-NEXT: negl %edi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: select_i16_neg1_or_0: ; X32: # BB#0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $15, %eax -; X32-NEXT: sarw $15, %ax +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: negl %eax ; X32-NEXT: # kill: %AX %AX %EAX ; X32-NEXT: retl ; @@ -64,16 +62,14 @@ ; X64-LABEL: select_i16_neg1_or_0_zeroext: ; X64: # BB#0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: shll $15, %eax -; X64-NEXT: sarw $15, %ax +; X64-NEXT: negl %eax ; X64-NEXT: # kill: %AX %AX %EAX ; X64-NEXT: retq ; ; X32-LABEL: select_i16_neg1_or_0_zeroext: ; X32: # BB#0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $15, %eax -; X32-NEXT: sarw $15, %ax +; X32-NEXT: negl %eax ; X32-NEXT: # kill: %AX %AX %EAX ; X32-NEXT: retl ; @@ -84,16 +80,16 @@ define i32 @select_i32_neg1_or_0(i1 %a) { ; X64-LABEL: select_i32_neg1_or_0: ; X64: # BB#0: -; X64-NEXT: shll $31, %edi -; X64-NEXT: sarl $31, %edi +; X64-NEXT: andl $1, %edi +; X64-NEXT: negl %edi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq ; ; X32-LABEL: select_i32_neg1_or_0: ; X32: # BB#0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $31, %eax -; X32-NEXT: sarl $31, %eax +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: negl %eax ; X32-NEXT: retl ; %b = sext i1 %a to i32 @@ -104,15 +100,13 @@ ; X64-LABEL: select_i32_neg1_or_0_zeroext: ; X64: # BB#0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: shll $31, %eax -; X64-NEXT: sarl $31, %eax +; X64-NEXT: negl %eax ; X64-NEXT: retq ; ; X32-LABEL: select_i32_neg1_or_0_zeroext: ; X32: # BB#0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $31, %eax -; X32-NEXT: sarl $31, %eax +; X32-NEXT: negl %eax ; X32-NEXT: retl ; %b = sext i1 %a to i32 @@ -123,16 +117,16 @@ ; X64-LABEL: select_i64_neg1_or_0: ; X64: # BB#0: ; X64-NEXT: # kill: %EDI %EDI %RDI -; X64-NEXT: shlq $63, %rdi -; X64-NEXT: sarq $63, %rdi +; X64-NEXT: andl $1, %edi +; X64-NEXT: negq %rdi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: retq ; ; X32-LABEL: select_i64_neg1_or_0: ; X32: # BB#0: -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $31, %eax -; X32-NEXT: sarl $31, %eax +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: negl %eax ; X32-NEXT: movl %eax, %edx ; X32-NEXT: retl ; @@ -144,15 +138,13 @@ ; X64-LABEL: select_i64_neg1_or_0_zeroext: ; X64: # BB#0: ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: shlq $63, %rax -; X64-NEXT: sarq $63, %rax +; X64-NEXT: negq %rax ; X64-NEXT: retq ; ; X32-LABEL: select_i64_neg1_or_0_zeroext: ; X32: # BB#0: ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X32-NEXT: shll $31, %eax -; X32-NEXT: sarl $31, %eax +; X32-NEXT: negl %eax ; X32-NEXT: movl %eax, %edx ; X32-NEXT: retl ;