Index: lib/Target/Mips/MipsCallingConv.td =================================================================== --- lib/Target/Mips/MipsCallingConv.td +++ lib/Target/Mips/MipsCallingConv.td @@ -180,6 +180,9 @@ ]>; def RetCC_MipsN : CallingConv<[ + // Promote i1/i8/i16/i32 arguments to i64 + CCIfType<[i1, i8, i16, i32], CCPromoteToType>, + // f128 needs to be handled similarly to f32 and f64. However, f128 is not // legal and is lowered to i128 which is further lowered to a pair of i64's. // This presents us with a problem for the calling convention since hard-float Index: lib/Target/Mips/MipsISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsISelLowering.cpp +++ lib/Target/Mips/MipsISelLowering.cpp @@ -430,6 +430,7 @@ setTargetDAGCombine(ISD::AND); setTargetDAGCombine(ISD::OR); setTargetDAGCombine(ISD::ADD); + setTargetDAGCombine(ISD::AssertSext); setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2); @@ -815,6 +816,33 @@ return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo); } +static SDValue performAssertSextCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget &Subtarget) { + SDValue N0 = N->getOperand(0); + EVT NewVT = cast(N->getOperand(1))->getVT(); + + // fold (AssertSext (trunc (AssertSext x) -> (trunc (AssertSext x)) + // if the type of the extension of the innermost AssertSext node is + // smaller from that of the outermost node, eg: + // (AssertSext (trunc:i32 (AssertSext:i64 x, i8)), i32) -> + // (trunc (AssertSext x, i8)) + if (N0.getOpcode() == ISD::TRUNCATE && + N0.getOperand(0).getOpcode() == ISD::AssertSext) { + SDValue OuterAssertSext = N0.getOperand(0); + EVT OuterVT = cast(OuterAssertSext->getOperand(1))->getVT(); + + if (NewVT.bitsLT(OuterVT)) { + SDValue NewAssertSext = + DAG.getNode(ISD::AssertSext, SDLoc(N), OuterAssertSext.getValueType(), + OuterAssertSext.getOperand(0), DAG.getValueType(NewVT)); + return DAG.getNode(ISD::TRUNCATE, SDLoc(N), N->getValueType(0), NewAssertSext); + } + } + + return SDValue(); +} + SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -836,6 +864,8 @@ return performORCombine(N, DAG, DCI, Subtarget); case ISD::ADD: return performADDCombine(N, DAG, DCI, Subtarget); + case ISD::AssertSext: + return performAssertSextCombine(N, DAG, DCI, Subtarget); } return SDValue(); Index: test/CodeGen/Mips/atomic.ll =================================================================== --- test/CodeGen/Mips/atomic.ll +++ test/CodeGen/Mips/atomic.ll @@ -97,7 +97,7 @@ @y = common global i8 0, align 1 -define signext i8 @AtomicLoadAdd8(i8 signext %incr) nounwind { +define i8 @AtomicLoadAdd8(i8 signext %incr) nounwind { entry: %0 = atomicrmw add i8* @y, i8 %incr monotonic ret i8 %0 @@ -137,7 +137,7 @@ ; HAS-SEB-SEH: seb $2, $[[R16]] } -define signext i8 @AtomicLoadSub8(i8 signext %incr) nounwind { +define i8 @AtomicLoadSub8(i8 signext %incr) nounwind { entry: %0 = atomicrmw sub i8* @y, i8 %incr monotonic ret i8 %0 @@ -177,7 +177,7 @@ ; HAS-SEB-SEH:seb $2, $[[R16]] } -define signext i8 @AtomicLoadNand8(i8 signext %incr) nounwind { +define i8 @AtomicLoadNand8(i8 signext %incr) nounwind { entry: %0 = atomicrmw nand i8* @y, i8 %incr monotonic ret i8 %0 @@ -218,7 +218,7 @@ ; HAS-SEB-SEH: seb $2, $[[R16]] } -define signext i8 @AtomicSwap8(i8 signext %newval) nounwind { +define i8 @AtomicSwap8(i8 signext %newval) nounwind { entry: %0 = atomicrmw xchg i8* @y, i8 %newval monotonic ret i8 %0 @@ -258,7 +258,7 @@ } -define signext i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind { +define i8 @AtomicCmpSwap8(i8 signext %oldval, i8 signext %newval) nounwind { entry: %pair0 = cmpxchg i8* @y, i8 %oldval, i8 %newval monotonic monotonic %0 = extractvalue { i8, i1 } %pair0, 0 @@ -350,7 +350,7 @@ ; Check one i16 so that we cover the seh sign extend @z = common global i16 0, align 1 -define signext i16 @AtomicLoadAdd16(i16 signext %incr) nounwind { +define i16 @AtomicLoadAdd16(i16 signext %incr) nounwind { entry: %0 = atomicrmw add i16* @z, i16 %incr monotonic ret i16 %0 Index: test/CodeGen/Mips/cconv/return.ll =================================================================== --- test/CodeGen/Mips/cconv/return.ll +++ test/CodeGen/Mips/cconv/return.ll @@ -22,7 +22,7 @@ @float = global float zeroinitializer @double = global double zeroinitializer -define i8 @reti8() nounwind { +define signext i8 @reti8() nounwind { entry: %0 = load volatile i8, i8* @byte ret i8 %0 @@ -30,13 +30,13 @@ ; ALL-LABEL: reti8: ; O32-DAG: lui [[R1:\$[0-9]+]], %hi(byte) -; O32-DAG: lbu $2, %lo(byte)([[R1]]) +; O32-DAG: lb $2, %lo(byte)([[R1]]) ; N32-DAG: lui [[R1:\$[0-9]+]], %hi(byte) -; N32-DAG: lbu $2, %lo(byte)([[R1]]) +; N32-DAG: lb $2, %lo(byte)([[R1]]) ; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(byte)( -; N64-DAG: lbu $2, 0([[R1]]) +; N64-DAG: lb $2, 0([[R1]]) -define i32 @reti32() nounwind { +define signext i32 @reti32() nounwind { entry: %0 = load volatile i32, i32* @word ret i32 %0 @@ -46,11 +46,13 @@ ; O32-DAG: lui [[R1:\$[0-9]+]], %hi(word) ; O32-DAG: lw $2, %lo(word)([[R1]]) ; N32-DAG: lui [[R1:\$[0-9]+]], %hi(word) -; N32-DAG: lw $2, %lo(word)([[R1]]) +; N32-DAG: lw [[R2:\$[0-9]+]], %lo(word)([[R1]]) +; N32-DAG: sll $2, [[R2]], 0 ; N64-DAG: ld [[R1:\$[0-9]+]], %got_disp(word)( -; N64-DAG: lw $2, 0([[R1]]) +; N64-DAG: lw [[R2:\$[0-9]+]], 0([[R1]]) +; N64-DAG: sll $2, [[R2]], 0 -define i64 @reti64() nounwind { +define signext i64 @reti64() nounwind { entry: %0 = load volatile i64, i64* @dword ret i64 %0 Index: test/CodeGen/Mips/delay-slot-kill.ll =================================================================== --- test/CodeGen/Mips/delay-slot-kill.ll +++ test/CodeGen/Mips/delay-slot-kill.ll @@ -1,6 +1,4 @@ ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s -; We have to XFAIL this temporarily because of the reversion of r229675. -; XFAIL: * ; Currently, the following IR assembly generates a KILL instruction between ; the bitwise-and instruction and the return instruction. We verify that the Index: test/CodeGen/Mips/fcmp.ll =================================================================== --- test/CodeGen/Mips/fcmp.ll +++ test/CodeGen/Mips/fcmp.ll @@ -29,21 +29,17 @@ ; 32-C-DAG: c.eq.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.eq.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] ; 32-CMP-DAG: andi $2, $[[T1]], 1 -; FIXME: The sign extension below is redundant. ; 64-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f13 ; 64-CMP-DAG: dmfc1 $[[T1:[0-9]+]], $[[T0]] -; 64-CMP-DAG: sll $[[T2:[0-9]+]], $[[T1]], 0 -; 64-CMP-DAG: andi $2, $[[T2]], 1 +; 64-CMP-DAG: andi $2, $[[T1]], 1 %1 = fcmp oeq float %a, %b %2 = zext i1 %1 to i32 @@ -57,11 +53,9 @@ ; 32-C-DAG: c.ule.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ule.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -83,11 +77,9 @@ ; 32-C-DAG: c.ult.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ult.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -109,11 +101,9 @@ ; 32-C-DAG: c.olt.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.olt.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -135,11 +125,9 @@ ; 32-C-DAG: c.ole.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ole.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -161,11 +149,9 @@ ; 32-C-DAG: c.ueq.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ueq.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -189,11 +175,9 @@ ; 32-C-DAG: c.un.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.un.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -217,11 +201,9 @@ ; 32-C-DAG: c.ueq.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ueq.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -243,11 +225,9 @@ ; 32-C-DAG: c.ole.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ole.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -269,11 +249,9 @@ ; 32-C-DAG: c.olt.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.olt.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -295,11 +273,9 @@ ; 32-C-DAG: c.ult.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ult.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -322,11 +298,9 @@ ; 32-C-DAG: c.ule.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ule.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -348,11 +322,9 @@ ; 32-C-DAG: c.eq.s $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.eq.s $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -376,11 +348,9 @@ ; 32-C-DAG: c.un.s $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.un.s $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.s $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -420,11 +390,9 @@ ; 32-C-DAG: c.eq.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.eq.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -446,11 +414,9 @@ ; 32-C-DAG: c.ule.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ule.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -472,11 +438,9 @@ ; 32-C-DAG: c.ult.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ult.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -498,11 +462,9 @@ ; 32-C-DAG: c.olt.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.olt.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.lt.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -524,11 +486,9 @@ ; 32-C-DAG: c.ole.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ole.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.le.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -550,11 +510,9 @@ ; 32-C-DAG: c.ueq.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ueq.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -578,11 +536,9 @@ ; 32-C-DAG: c.un.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.un.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -606,11 +562,9 @@ ; 32-C-DAG: c.ueq.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ueq.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ueq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -632,11 +586,9 @@ ; 32-C-DAG: c.ole.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ole.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -658,11 +610,9 @@ ; 32-C-DAG: c.olt.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.olt.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f14, $f12 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -684,11 +634,9 @@ ; 32-C-DAG: c.ult.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ult.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ult.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -710,11 +658,9 @@ ; 32-C-DAG: c.ule.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.ule.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.ule.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -736,11 +682,9 @@ ; 32-C-DAG: c.eq.d $f12, $f14 ; 32-C: movt $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.eq.d $f12, $f13 -; 64-C-DAG: movt $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movt $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.eq.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] @@ -764,11 +708,9 @@ ; 32-C-DAG: c.un.d $f12, $f14 ; 32-C: movf $2, $zero, $fcc0 -; FIXME: Remove redundant sign extension. -; 64-C-DAG: daddiu $[[T0:[0-9]+]], $zero, 1 +; 64-C-DAG: daddiu $2, $zero, 1 ; 64-C-DAG: c.un.d $f12, $f13 -; 64-C-DAG: movf $[[T0]], $zero, $fcc0 -; 64-C: sll $2, $[[T0]], 0 +; 64-C: movf $2, $zero, $fcc0 ; 32-CMP-DAG: cmp.un.d $[[T0:f[0-9]+]], $f12, $f14 ; 32-CMP-DAG: mfc1 $[[T1:[0-9]+]], $[[T0]] Index: test/CodeGen/Mips/llvm-ir/add.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/add.ll +++ test/CodeGen/Mips/llvm-ir/add.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32 +; RUN: -check-prefix=ALL -check-prefix=GP32-NOT-R2-R6 -check-prefix=GP32 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP32 +; RUN: -check-prefix=ALL -check-prefix=GP32-NOT-R2-R6 -check-prefix=GP32 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \ ; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32 ; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \ @@ -10,12 +10,13 @@ ; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \ ; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP32 + ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64 +; RUN: -check-prefix=ALL -check-prefix=GP64-NOT-R2-R6 -check-prefix=GP64 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64 +; RUN: -check-prefix=ALL -check-prefix=GP64-NOT-R2-R6 -check-prefix=GP64 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=NOT-R2-R6 -check-prefix=GP64 +; RUN: -check-prefix=ALL -check-prefix=GP64-NOT-R2-R6 -check-prefix=GP64 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \ ; RUN: -check-prefix=ALL -check-prefix=R2-R6 -check-prefix=GP64 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \ @@ -29,9 +30,13 @@ entry: ; ALL-LABEL: add_i1: - ; ALL: addu $[[T0:[0-9]+]], $4, $5 - ; ALL: sll $[[T0]], $[[T0]], 31 - ; ALL: sra $2, $[[T0]], 31 + ; GP32: addu $[[T0:[0-9]+]], $4, $5 + ; GP32: sll $[[T0]], $[[T0]], 31 + ; GP32: sra $2, $[[T0]], 31 + + ; GP64: addu $[[T0:[0-9]+]], $4, $5 + ; GP64: dsll $[[T0]], $[[T0]], 63 + ; GP64: dsra $2, $[[T0]], 63 %r = add i1 %a, %b ret i1 %r @@ -41,12 +46,16 @@ entry: ; ALL-LABEL: add_i8: - ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; NOT-R2-R6: sll $[[T0]], $[[T0]], 24 - ; NOT-R2-R6: sra $2, $[[T0]], 24 + ; GP32-NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; GP32-NOT-R2-R6: sll $[[T0]], $[[T0]], 24 + ; GP32-NOT-R2-R6: sra $2, $[[T0]], 24 + + ; GP64-NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; GP64-NOT-R2-R6: dsll $[[T0]], $[[T0]], 56 + ; GP64-NOT-R2-R6: dsra $2, $[[T0]], 56 - ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; R2-R6: seb $2, $[[T0:[0-9]+]] + ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; R2-R6: seb $2, $[[T0:[0-9]+]] %r = add i8 %a, %b ret i8 %r @@ -56,12 +65,16 @@ entry: ; ALL-LABEL: add_i16: - ; NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; NOT-R2-R6: sll $[[T0]], $[[T0]], 16 - ; NOT-R2-R6: sra $2, $[[T0]], 16 + ; GP32-NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; GP32-NOT-R2-R6: sll $[[T0]], $[[T0]], 16 + ; GP32-NOT-R2-R6: sra $2, $[[T0]], 16 - ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 - ; R2-R6: seh $2, $[[T0:[0-9]+]] + ; GP64-NOT-R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; GP64-NOT-R2-R6: dsll $[[T0]], $[[T0]], 48 + ; GP64-NOT-R2-R6: dsra $2, $[[T0]], 48 + + ; R2-R6: addu $[[T0:[0-9]+]], $4, $5 + ; R2-R6: seh $2, $[[T0:[0-9]+]] %r = add i16 %a, %b ret i16 %r @@ -71,7 +84,10 @@ entry: ; ALL-LABEL: add_i32: - ; ALL: addu $2, $4, $5 + ; GP32: addu $2, $4, $5 + + ; GP64: addu $[[T0:[0-9]+]], $4, $5 + ; GP64: sll $2, $[[T0]], 0 %r = add i32 %a, %b ret i32 %r Index: test/CodeGen/Mips/llvm-ir/and.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/and.ll +++ test/CodeGen/Mips/llvm-ir/and.ll @@ -59,10 +59,7 @@ entry: ; ALL-LABEL: and_i32: - ; GP32: and $2, $4, $5 - - ; GP64: and $[[T0:[0-9]+]], $4, $5 - ; GP64: sll $2, $[[T0]], 0 + ; ALL: and $2, $4, $5 %r = and i32 %a, %b ret i32 %r Index: test/CodeGen/Mips/llvm-ir/ashr.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/ashr.ll +++ test/CodeGen/Mips/llvm-ir/ashr.ll @@ -1,48 +1,36 @@ -; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=M2 -; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=32R1-R5 -; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=32R1-R5 -; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=32R1-R5 -; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=32R1-R5 -; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP32 \ -; RUN: -check-prefix=32R6 -; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=M3 -; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=GP64-NOT-R6 -; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=GP64-NOT-R6 -; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=GP64-NOT-R6 -; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=GP64-NOT-R6 -; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=GP64-NOT-R6 -; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \ -; RUN: -check-prefix=ALL -check-prefix=GP64 \ -; RUN: -check-prefix=64R6 +; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M2 -check-prefix=GP32-NOT-R2-R6 +; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M32R1-R5 -check-prefix=GP32-NOT-R2-R6 +; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M32R1-R5 -check-prefix=GP32-R2-R6 +; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M32R1-R5 -check-prefix=GP32-R2-R6 +; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M32R1-R5 -check-prefix=GP32-R2-R6 +; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP32-R2-R6 + +; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M3 -check-prefix=GP64-NOT-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=GP64-NOT-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=GP64-NOT-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=GP64-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=GP64-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=GP64-NOT-R6 -check-prefix=GP64-R2-R6 +; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s -check-prefix=ALL \ +; RUN: -check-prefix=M64R6 -check-prefix=GP64-R2-R6 define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) { entry: ; ALL-LABEL: ashr_i1: - ; ALL: move $2, $4 + ; ALL: move $2, $4 %r = ashr i1 %a, %b ret i1 %r @@ -52,9 +40,22 @@ entry: ; ALL-LABEL: ashr_i8: - ; FIXME: The andi instruction is redundant. - ; ALL: andi $[[T0:[0-9]+]], $5, 255 - ; ALL: srav $2, $4, $[[T0]] + ; FIXME: The andi instruction in the following cases is redundant. + + ; GP32-NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255 + ; GP32-NOT-R2-R6: srav $2, $4, $[[T0]] + + ; GP32-R2-R6: andi $[[T0:[0-9]+]], $5, 255 + ; GP32-R2-R6: srav $2, $4, $[[T0]] + + ; GP64-NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 255 + ; GP64-NOT-R2-R6: srav $[[T1:[0-9]+]], $4, $[[T0]] + ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 56 + ; GP64-NOT-R2-R6: dsra $2, $[[T2]], 56 + + ; GP64-R2-R6: andi $[[T0:[0-9]+]], $5, 255 + ; GP64-R2-R6: srav $[[T1:[0-9]+]], $4, $[[T0]] + ; GP64-R2-R6: seb $2, $[[T1]] %r = ashr i8 %a, %b ret i8 %r @@ -64,9 +65,21 @@ entry: ; ALL-LABEL: ashr_i16: - ; FIXME: The andi instruction is redundant. - ; ALL: andi $[[T0:[0-9]+]], $5, 65535 - ; ALL: srav $2, $4, $[[T0]] + ; FIXME: The andi instruction in the following cases is redundant. + + ; GP32-NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535 + ; GP32-NOT-R2-R6: srav $2, $4, $[[T0]] + + ; GP32-R2-R6: andi $[[T0:[0-9]+]], $5, 65535 + ; GP32-R2-R6: srav $2, $4, $[[T0]] + + ; GP64-NOT-R2-R6: andi $[[T0:[0-9]+]], $5, 65535 + ; GP64-NOT-R2-R6: srav $[[T1:[0-9]+]], $4, $[[T0]] + ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 48 + ; GP64-NOT-R2-R6: dsra $2, $[[T2]], 48 + + ; GP64-R2-R6: andi $[[T0:[0-9]+]], $5, 65535 + ; GP64-R2-R6: srav $[[T1:[0-9]+]], $4, $[[T0]] %r = ashr i16 %a, %b ret i16 %r @@ -76,7 +89,15 @@ entry: ; ALL-LABEL: ashr_i32: - ; ALL: srav $2, $4, $5 + ; GP32-NOT-R2-R6: srav $2, $4, $5 + + ; GP32-R2-R6: srav $2, $4, $5 + + ; GP64-NOT-R2-R6: srav $[[T0:[0-9]+]], $4, $5 + ; GP64-NOT-R2-R6: sll $2, $[[T0]], 0 + + ; GP64-R2-R6: srav $[[T0:[0-9]+]], $4, $5 + ; GP64-R2-R6: sll $2, $[[T0]], 0 %r = ashr i32 %a, %b ret i32 %r @@ -86,52 +107,53 @@ entry: ; ALL-LABEL: ashr_i64: - ; M2: srav $[[T0:[0-9]+]], $4, $7 - ; M2: andi $[[T1:[0-9]+]], $7, 32 - ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]] - ; M2: move $3, $[[T0]] - ; M2: srlv $[[T2:[0-9]+]], $5, $7 - ; M2: not $[[T3:[0-9]+]], $7 - ; M2: sll $[[T4:[0-9]+]], $4, 1 - ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]] - ; M2: or $3, $[[T3]], $[[T2]] - ; M2: $[[BB0]]: - ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]] - ; M2: nop - ; M2: sra $2, $4, 31 - ; M2: $[[BB1]]: - ; M2: jr $ra - ; M2: nop - - ; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7 - ; 32R1-R5: not $[[T1:[0-9]+]], $7 - ; 32R1-R5: sll $[[T2:[0-9]+]], $4, 1 - ; 32R1-R5: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]] - ; 32R1-R5: or $3, $[[T3]], $[[T0]] - ; 32R1-R5: srav $[[T4:[0-9]+]], $4, $7 - ; 32R1-R5: andi $[[T5:[0-9]+]], $7, 32 - ; 32R1-R5: movn $3, $[[T4]], $[[T5]] - ; 32R1-R5: sra $4, $4, 31 - ; 32R1-R5: jr $ra - ; 32R1-R5: movn $2, $4, $[[T5]] - - ; 32R6: srav $[[T0:[0-9]+]], $4, $7 - ; 32R6: andi $[[T1:[0-9]+]], $7, 32 - ; 32R6: seleqz $[[T2:[0-9]+]], $[[T0]], $[[T1]] - ; 32R6: sra $[[T3:[0-9]+]], $4, 31 - ; 32R6: selnez $[[T4:[0-9]+]], $[[T3]], $[[T1]] - ; 32R6: or $[[T5:[0-9]+]], $[[T4]], $[[T2]] - ; 32R6: srlv $[[T6:[0-9]+]], $5, $7 - ; 32R6: not $[[T7:[0-9]+]], $7 - ; 32R6: sll $[[T8:[0-9]+]], $4, 1 - ; 32R6: sllv $[[T9:[0-9]+]], $[[T8]], $[[T7]] - ; 32R6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]] - ; 32R6: seleqz $[[T11:[0-9]+]], $[[T10]], $[[T1]] - ; 32R6: selnez $[[T12:[0-9]+]], $[[T0]], $[[T1]] - ; 32R6: jr $ra - ; 32R6: or $3, $[[T0]], $[[T11]] - - ; GP64: dsrav $2, $4, $5 + ; M2: srav $[[T0:[0-9]+]], $4, $7 + ; M2: andi $[[T1:[0-9]+]], $7, 32 + ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]] + ; M2: move $3, $[[T0]] + ; M2: srlv $[[T2:[0-9]+]], $5, $7 + ; M2: not $[[T3:[0-9]+]], $7 + ; M2: sll $[[T4:[0-9]+]], $4, 1 + ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]] + ; M2: or $3, $[[T5]], $[[T2]] + ; M2: $[[BB0]]: + ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]] + ; M2: nop + ; M2: sra $2, $4, 31 + ; M2: $[[BB1]]: + ; M2: jr $ra + + ; M32R1-R5: srlv $[[T0:[0-9]+]], $5, $7 + ; M32R1-R5: not $[[T1:[0-9]+]], $7 + ; M32R1-R5: sll $[[T2:[0-9]+]], $4, 1 + ; M32R1-R5: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]] + ; M32R1-R5: or $3, $[[T3]], $[[T0]] + ; M32R1-R5: srav $[[T4:[0-9]+]], $4, $7 + ; M32R1-R5: andi $[[T5:[0-9]+]], $7, 32 + ; M32R1-R5: movn $3, $[[T4]], $[[T5]] + ; M32R1-R5: sra $[[T6:[0-9]+]], $4, 31 + ; M32R1-R5: jr $ra + ; M32R1-R5: movn $2, $[[T6]], $[[T5]] + + ; 32R6: srav $[[T0:[0-9]+]], $4, $7 + ; 32R6: andi $[[T1:[0-9]+]], $7, 32 + ; 32R6: seleqz $[[T2:[0-9]+]], $[[T0]], $[[T1]] + ; 32R6: sra $[[T3:[0-9]+]], $4, 31 + ; 32R6: selnez $[[T4:[0-9]+]], $[[T3]], $[[T1]] + ; 32R6: or $[[T5:[0-9]+]], $[[T4]], $[[T2]] + ; 32R6: srlv $[[T6:[0-9]+]], $5, $7 + ; 32R6: not $[[T7:[0-9]+]], $7 + ; 32R6: sll $[[T8:[0-9]+]], $4, 1 + ; 32R6: sllv $[[T9:[0-9]+]], $[[T8]], $[[T7]] + ; 32R6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]] + ; 32R6: seleqz $[[T11:[0-9]+]], $[[T10]], $[[T1]] + ; 32R6: selnez $[[T12:[0-9]+]], $[[T0]], $[[T1]] + ; 32R6: jr $ra + ; 32R6: or $3, $[[T0]], $[[T11]] + + ; GP64-NOT-R2-R6: dsrav $2, $4, $5 + + ; GP64-R2-R6: dsrav $2, $4, $5 %r = ashr i64 %a, %b ret i64 %r @@ -141,56 +163,56 @@ entry: ; ALL-LABEL: ashr_i128: - ; GP32: lw $25, %call16(__ashrti3)($gp) - - ; M3: sll $[[T0:[0-9]+]], $7, 0 - ; M3: dsrav $[[T1:[0-9]+]], $4, $7 - ; M3: andi $[[T2:[0-9]+]], $[[T0]], 64 - ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]] - ; M3: move $3, $[[T1]] - ; M3: dsrlv $[[T4:[0-9]+]], $5, $7 - ; M3: dsll $[[T5:[0-9]+]], $4, 1 - ; M3: not $[[T6:[0-9]+]], $[[T0]] - ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]] - ; M3: or $3, $[[T7]], $[[T4]] - ; M3: $[[BB0]]: - ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]] - ; M3: nop - ; M3: dsra $2, $4, 63 - ; M3: $[[BB1]]: - ; M3: jr $ra - ; M3: nop - - ; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7 - ; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1 - ; GP64-NOT-R6: sll $[[T2:[0-9]+]], $7, 0 - ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T2]] - ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T1]], $[[T3]] - ; GP64-NOT-R6: or $3, $[[T4]], $[[T0]] - ; GP64-NOT-R6: dsrav $2, $4, $7 - ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T2]], 64 - ; GP64-NOT-R6: movn $3, $2, $[[T5]] - ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 63 - ; GP64-NOT-R6: jr $ra - ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]] - - ; 64R6: dsrav $[[T0:[0-9]+]], $4, $7 - ; 64R6: sll $[[T1:[0-9]+]], $7, 0 - ; 64R6: andi $[[T2:[0-9]+]], $[[T1]], 64 - ; 64R6: sll $[[T3:[0-9]+]], $[[T2]], 0 - ; 64R6: seleqz $[[T4:[0-9]+]], $[[T0]], $[[T3]] - ; 64R6: dsra $[[T5:[0-9]+]], $4, 63 - ; 64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]] - ; 64R6: or $2, $[[T6]], $[[T4]] - ; 64R6: dsrlv $[[T7:[0-9]+]], $5, $7 - ; 64R6: dsll $[[T8:[0-9]+]], $4, 1 - ; 64R6: not $[[T9:[0-9]+]], $[[T1]] - ; 64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]] - ; 64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]] - ; 64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]] - ; 64R6: selnez $[[T13:[0-9]+]], $[[T0]], $[[T3]] - ; 64R6: jr $ra - ; 64R6: or $3, $[[T13]], $[[T12]] + ; GP32: lw $25, %call16(__ashrti3)($gp) + + ; M3: sll $[[T0:[0-9]+]], $7, 0 + ; M3: dsrav $[[T1:[0-9]+]], $4, $7 + ; M3: andi $[[T2:[0-9]+]], $[[T0]], 64 + ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]] + ; M3: move $3, $[[T1]] + ; M3: dsrlv $[[T4:[0-9]+]], $5, $7 + ; M3: dsll $[[T5:[0-9]+]], $4, 1 + ; M3: not $[[T6:[0-9]+]], $[[T0]] + ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]] + ; M3: or $3, $[[T7]], $[[T4]] + ; M3: $[[BB0]]: + ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]] + ; M3: nop + ; M3: dsra $2, $4, 63 + ; M3: $[[BB1]]: + ; M3: jr $ra + ; M3: nop + + ; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7 + ; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1 + ; GP64-NOT-R6: sll $[[T2:[0-9]+]], $7, 0 + ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T2]] + ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T1]], $[[T3]] + ; GP64-NOT-R6: or $3, $[[T4]], $[[T0]] + ; GP64-NOT-R6: dsrav $[[R0:[0-9]+]], $4, $7 + ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T2]], 64 + ; GP64-NOT-R6: movn $3, $2, $[[T5]] + ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 63 + ; GP64-NOT-R6: jr $ra + ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]] + + ; M64R6: dsrav $[[T0:[0-9]+]], $4, $7 + ; M64R6: sll $[[T1:[0-9]+]], $7, 0 + ; M64R6: andi $[[T2:[0-9]+]], $[[T1]], 64 + ; M64R6: sll $[[T3:[0-9]+]], $[[T2]], 0 + ; M64R6: seleqz $[[T4:[0-9]+]], $[[T0]], $[[T3]] + ; M64R6: dsra $[[T5:[0-9]+]], $4, 63 + ; M64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]] + ; M64R6: or $2, $[[T6]], $[[T4]] + ; M64R6: dsrlv $[[T7:[0-9]+]], $5, $7 + ; M64R6: dsll $[[T8:[0-9]+]], $4, 1 + ; M64R6: not $[[T9:[0-9]+]], $[[T1]] + ; M64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]] + ; M64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]] + ; M64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]] + ; M64R6: selnez $[[T13:[0-9]+]], $[[T0]], $[[T3]] + ; M64R6: jr $ra + ; M64R6: or $3, $[[T13]], $[[T12]] %r = ashr i128 %a, %b ret i128 %r Index: test/CodeGen/Mips/llvm-ir/lshr.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/lshr.ll +++ test/CodeGen/Mips/llvm-ir/lshr.ll @@ -42,7 +42,7 @@ entry: ; ALL-LABEL: lshr_i1: - ; ALL: move $2, $4 + ; ALL: move $2, $4 %r = lshr i1 %a, %b ret i1 %r @@ -52,8 +52,8 @@ entry: ; ALL-LABEL: lshr_i8: - ; ALL: srlv $[[T0:[0-9]+]], $4, $5 - ; ALL: andi $2, $[[T0]], 255 + ; ALL: srlv $[[T0:[0-9]+]], $4, $5 + ; ALL: andi $2, $[[T0]], 255 %r = lshr i8 %a, %b ret i8 %r @@ -63,8 +63,8 @@ entry: ; ALL-LABEL: lshr_i16: - ; ALL: srlv $[[T0:[0-9]+]], $4, $5 - ; ALL: andi $2, $[[T0]], 65535 + ; ALL: srlv $[[T0:[0-9]+]], $4, $5 + ; ALL: andi $2, $[[T0]], 65535 %r = lshr i16 %a, %b ret i16 %r @@ -74,7 +74,10 @@ entry: ; ALL-LABEL: lshr_i32: - ; ALL: srlv $2, $4, $5 + ; GP32: srlv $2, $4, $5 + + ; GP64: srlv $[[T0:[0-9]+]], $4, $5 + ; GP64: sll $2, $[[T0]], 0 %r = lshr i32 %a, %b ret i32 %r @@ -125,7 +128,7 @@ ; 32R6: jr $ra ; 32R6: seleqz $2, $[[T7]], $[[T5]] - ; GP64: dsrlv $2, $4, $5 + ; GP64: dsrlv $2, $4, $5 %r = lshr i64 %a, %b ret i64 %r @@ -135,7 +138,7 @@ entry: ; ALL-LABEL: lshr_i128: - ; GP32: lw $25, %call16(__lshrti3)($gp) + ; GP32: lw $25, %call16(__lshrti3)($gp) ; M3: sll $[[T0:[0-9]+]], $7, 0 ; M3: dsrlv $[[T1:[0-9]+]], $4, $7 Index: test/CodeGen/Mips/llvm-ir/mul.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/mul.ll +++ test/CodeGen/Mips/llvm-ir/mul.ll @@ -10,6 +10,7 @@ ; RUN: -check-prefix=32R1-R5 -check-prefix=32R2-R5 -check-prefix=GP32 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s -check-prefix=ALL \ ; RUN: -check-prefix=32R6 -check-prefix=GP32 + ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s -check-prefix=ALL \ ; RUN: -check-prefix=M4 -check-prefix=GP64-NOT-R6 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s -check-prefix=ALL \ @@ -42,16 +43,16 @@ ; M4: mult $4, $5 ; M4: mflo $[[T0:[0-9]+]] - ; M4: sll $[[T0]], $[[T0]], 31 - ; M4: sra $2, $[[T0]], 31 + ; M4: dsll $[[T0]], $[[T0]], 63 + ; M4: dsra $2, $[[T0]], 63 ; 64R1-R5: mul $[[T0:[0-9]+]], $4, $5 - ; 64R1-R5: sll $[[T0]], $[[T0]], 31 - ; 64R1-R5: sra $2, $[[T0]], 31 + ; 64R1-R5: dsll $[[T0]], $[[T0]], 63 + ; 64R1-R5: dsra $2, $[[T0]], 63 ; 64R6: mul $[[T0:[0-9]+]], $4, $5 - ; 64R6: sll $[[T0]], $[[T0]], 31 - ; 64R6: sra $2, $[[T0]], 31 + ; 64R6: dsll $[[T0]], $[[T0]], 63 + ; 64R6: dsra $2, $[[T0]], 63 %r = mul i1 %a, %b ret i1 %r @@ -78,12 +79,12 @@ ; M4: mult $4, $5 ; M4: mflo $[[T0:[0-9]+]] - ; M4: sll $[[T0]], $[[T0]], 24 - ; M4: sra $2, $[[T0]], 24 + ; M4: dsll $[[T0]], $[[T0]], 56 + ; M4: dsra $2, $[[T0]], 56 ; 64R1: mul $[[T0:[0-9]+]], $4, $5 - ; 64R1: sll $[[T0]], $[[T0]], 24 - ; 64R1: sra $2, $[[T0]], 24 + ; 64R1: dsll $[[T0]], $[[T0]], 56 + ; 64R1: dsra $2, $[[T0]], 56 ; 64R2: mul $[[T0:[0-9]+]], $4, $5 ; 64R2: seb $2, $[[T0]] @@ -115,12 +116,12 @@ ; M4: mult $4, $5 ; M4: mflo $[[T0:[0-9]+]] - ; M4: sll $[[T0]], $[[T0]], 16 - ; M4: sra $2, $[[T0]], 16 + ; M4: dsll $[[T0]], $[[T0]], 48 + ; M4: dsra $2, $[[T0]], 48 ; 64R1: mul $[[T0:[0-9]+]], $4, $5 - ; 64R1: sll $[[T0]], $[[T0]], 16 - ; 64R1: sra $2, $[[T0]], 16 + ; 64R1: dsll $[[T0]], $[[T0]], 48 + ; 64R1: dsra $2, $[[T0]], 48 ; 64R2: mul $[[T0:[0-9]+]], $4, $5 ; 64R2: seh $2, $[[T0]] @@ -141,8 +142,15 @@ ; 32R1-R5: mul $2, $4, $5 ; 32R6: mul $2, $4, $5 - ; 64R1-R5: mul $2, $4, $5 - ; 64R6: mul $2, $4, $5 + ; M4: mult $4, $5 + ; M4: mflo $[[T0:[0-9]+]] + ; M4: sll $2, $[[T0]], 0 + + ; 64R1-R5: mul $[[T0:[0-9]+]], $4, $5 + ; 64R1-R5: sll $2, $[[T0]], 0 + + ; 64R6: mul $[[T0:[0-9]+]], $4, $5 + ; 64R6: sll $2, $[[T0]], 0 %r = mul i32 %a, %b ret i32 %r } Index: test/CodeGen/Mips/llvm-ir/or.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/or.ll +++ test/CodeGen/Mips/llvm-ir/or.ll @@ -59,11 +59,7 @@ entry: ; ALL-LABEL: or_i32: - ; GP32: or $2, $4, $5 - - ; GP64: or $[[T0:[0-9]+]], $4, $5 - ; FIXME: The sll instruction below is redundant. - ; GP64: sll $2, $[[T0]], 0 + ; ALL: or $2, $4, $5 %r = or i32 %a, %b ret i32 %r Index: test/CodeGen/Mips/llvm-ir/ret.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/ret.ll +++ test/CodeGen/Mips/llvm-ir/ret.ll @@ -7,17 +7,40 @@ ; affects it and it's undesirable to repeat the non-pointer returns for each ; relocation model. -; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=NO-MTHC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips -mcpu=mips32r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips -mcpu=mips32r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR32 -check-prefix=MTHC1 -check-prefix=R6 -; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips64 -mcpu=mips64r3 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=NOT-R6 -; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | FileCheck %s -check-prefix=ALL -check-prefix=GPR64 -check-prefix=DMTC1 -check-prefix=R6 +; RUN: llc -march=mips -mcpu=mips32 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR32 \ +; RUN: -check-prefix=NO-MTHC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips -mcpu=mips32r2 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR32 \ +; RUN: -check-prefix=MTHC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips -mcpu=mips32r3 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR32 \ +; RUN: -check-prefix=MTHC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips -mcpu=mips32r5 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR32 \ +; RUN: -check-prefix=MTHC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips -mcpu=mips32r6 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR32 \ +; RUN: -check-prefix=MTHC1 -check-prefix=R6 + +; RUN: llc -march=mips64 -mcpu=mips4 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips64 -mcpu=mips64 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips64 -mcpu=mips64r2 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips64 -mcpu=mips64r3 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips64 -mcpu=mips64r5 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=NOT-R6 +; RUN: llc -march=mips64 -mcpu=mips64r6 -asm-show-inst < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=GPR64 \ +; RUN: -check-prefix=DMTC1 -check-prefix=R6 define void @ret_void() { ; ALL-LABEL: ret_void: @@ -90,8 +113,12 @@ define i32 @ret_i32_65537() { ; ALL-LABEL: ret_i32_65537: -; ALL: lui $[[T0:[0-9]+]], 1 -; ALL-DAG: ori $2, $[[T0]], 1 + +; GP32: lui $[[T0:[0-9]+]], 1 +; GP32-DAG: ori $2, $[[T0]], 1 + +; GP64: lui $[[T0:[0-9]+]], 1 +; GP64-DAG: daddiu $2, $[[T0]], 1 ; NOT-R6-DAG: jr $ra # %2, i32 1 %4 = sext i8 %3 to i32 ; ALL-DAG: copy_s.b [[R3:\$[0-9]+]], [[R1]][1] - ; ALL-NOT: sll - ; ALL-NOT: sra + ; MIPS32-NOT: sll + ; MIPS32-NOT: sra ret i32 %4 } @@ -305,8 +305,8 @@ %3 = extractelement <8 x i16> %2, i32 1 %4 = sext i16 %3 to i32 ; ALL-DAG: copy_s.h [[R3:\$[0-9]+]], [[R1]][1] - ; ALL-NOT: sll - ; ALL-NOT: sra + ; MIPS32-NOT: sll + ; MIPS32-NOT: sra ret i32 %4 } @@ -356,8 +356,10 @@ %3 = extractelement <16 x i8> %2, i32 1 %4 = zext i8 %3 to i32 - ; ALL-DAG: copy_u.b [[R3:\$[0-9]+]], [[R1]][1] - ; ALL-NOT: andi + ; MIPS32-DAG: copy_u.b [[R3:\$[0-9]+]], [[R1]][1] + ; MIPS32-NOT: andi + + ; MIPS64: copy_s.b [[R3:\$[0-9]+]], [[R1]][1] ret i32 %4 } @@ -373,8 +375,10 @@ %3 = extractelement <8 x i16> %2, i32 1 %4 = zext i16 %3 to i32 - ; ALL-DAG: copy_u.h [[R3:\$[0-9]+]], [[R1]][1] - ; ALL-NOT: andi + ; MIPS32-DAG: copy_u.h [[R3:\$[0-9]+]], [[R1]][1] + ; MIPS32-NOT: andi + + ; MIPS64-DAG: copy_s.h [[R3:\$[0-9]+]], [[R1]][1] ret i32 %4 } @@ -544,7 +548,9 @@ %5 = zext i8 %4 to i32 ; ALL-DAG: splat.b $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]] ; ALL-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]] - ; ALL-DAG: srl [[R6:\$[0-9]+]], [[R5]], 24 + ; MIPS32-DAG: srl [[R6:\$[0-9]+]], [[R5]], 24 + ; MIPS64-DAG: sra [[R6:\$[0-9]+]], [[R5]], 24 + ; MIPS64-DAG: andi $2, [[R6]], 255 ret i32 %5 } @@ -571,7 +577,9 @@ %5 = zext i16 %4 to i32 ; ALL-DAG: splat.h $w[[R3:[0-9]+]], [[R1]]{{\[}}[[IDX]]] ; ALL-DAG: mfc1 [[R5:\$[0-9]+]], $f[[R3]] - ; ALL-DAG: srl [[R6:\$[0-9]+]], [[R5]], 16 + ; MIPS32-DAG: srl [[R6:\$[0-9]+]], [[R5]], 16 + ; MIPS64-DAG: sra [[R6:\$[0-9]+]], [[R5]], 16 + ; MIPS64-DAG: andi $2, [[R6]], 65535 ret i32 %5 } Index: test/CodeGen/Mips/named-register-n32.ll =================================================================== --- test/CodeGen/Mips/named-register-n32.ll +++ test/CodeGen/Mips/named-register-n32.ll @@ -9,7 +9,7 @@ } ; CHECK-LABEL: get_gp: -; CHECK: sll $2, $gp, 0 +; CHECK: move $2, $gp declare i64 @llvm.read_register.i64(metadata)