Index: lib/Target/Mips/MipsRegisterInfo.h =================================================================== --- lib/Target/Mips/MipsRegisterInfo.h +++ lib/Target/Mips/MipsRegisterInfo.h @@ -57,6 +57,8 @@ BitVector getReservedRegs(const MachineFunction &MF) const override; + bool enableMultipleCopyHints() const override { return true; } + bool requiresRegisterScavenging(const MachineFunction &MF) const override; bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override; Index: test/CodeGen/Mips/Fast-ISel/sel1.ll =================================================================== --- test/CodeGen/Mips/Fast-ISel/sel1.ll +++ test/CodeGen/Mips/Fast-ISel/sel1.ll @@ -84,11 +84,11 @@ entry: ; CHECK-LABEL: sel_float2: + ; CHECK: mov.s $f0, $f14 ; CHECK-DAG: xor $[[T0:[0-9]+]], $6, $zero ; CHECK: sltu $[[T1:[0-9]+]], $zero, $[[T0]] ; CHECK-NEXT: andi $[[T2:[0-9]+]], $[[T1]], 1 - ; CHECK: movn.s $f14, $f12, $[[T2]] - ; CHECK: mov.s $f0, $f14 + ; CHECK: movn.s $f0, $f12, $[[T2]] %cond = icmp ne i32 %j, 0 %res = select i1 %cond, float %k, float %l ret float %res @@ -114,12 +114,12 @@ entry: ; CHECK-LABEL: sel_double2: + ; CHECK: mov.d $f0, $f14 ; CHECK-DAG: lw $[[SEL:[0-9]+]], 16($sp) ; CHECK-DAG: xor $[[T0:[0-9]+]], $[[SEL]], $zero ; CHECK: sltu $[[T1:[0-9]+]], $zero, $[[T0]] ; CHECK-NEXT: andi $[[T2:[0-9]+]], $[[T1]], 1 - ; CHECK: movn.d $f14, $f12, $[[T2]] - ; CHECK: mov.d $f0, $f14 + ; CHECK: movn.d $f0, $f12, $[[T2]] %cond = icmp ne i32 %j, 0 %res = select i1 %cond, double %k, double %l ret double %res Index: test/CodeGen/Mips/analyzebranch.ll =================================================================== --- test/CodeGen/Mips/analyzebranch.ll +++ test/CodeGen/Mips/analyzebranch.ll @@ -16,7 +16,7 @@ ; 32-GPR: mtc1 $zero, $[[Z:f[0-9]]] ; 32-GPR: mthc1 $zero, $[[Z:f[0-9]]] ; 64-GPR: dmtc1 $zero, $[[Z:f[0-9]]] -; GPR: cmp.lt.d $[[FGRCC:f[0-9]+]], $[[Z]], $f12 +; GPR: cmp.lt.d $[[FGRCC:f[0-9]+]], $[[Z]], $f0 ; GPR: mfc1 $[[GPRCC:[0-9]+]], $[[FGRCC]] ; GPR-NOT: not $[[GPRCC]], $[[GPRCC]] ; GPR: bnezc $[[GPRCC]], {{\$|\.L}}BB Index: test/CodeGen/Mips/indirect-jump-hazard/calls.ll =================================================================== --- test/CodeGen/Mips/indirect-jump-hazard/calls.ll +++ test/CodeGen/Mips/indirect-jump-hazard/calls.ll @@ -30,8 +30,7 @@ ; MIPS32R2: # %bb.0: # %entry ; MIPS32R2-NEXT: addiu $sp, $sp, -24 ; MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill -; MIPS32R2-NEXT: move $1, $4 -; MIPS32R2-NEXT: move $25, $1 +; MIPS32R2-NEXT: move $25, $4 ; MIPS32R2-NEXT: jalr.hb $25 ; MIPS32R2-NEXT: addiu $4, $zero, 13 ; MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload @@ -42,8 +41,7 @@ ; MIPS32R6: # %bb.0: # %entry ; MIPS32R6-NEXT: addiu $sp, $sp, -24 ; MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill -; MIPS32R6-NEXT: move $1, $4 -; MIPS32R6-NEXT: move $25, $1 +; MIPS32R6-NEXT: move $25, $4 ; MIPS32R6-NEXT: jalr.hb $25 ; MIPS32R6-NEXT: addiu $4, $zero, 13 ; MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload @@ -54,8 +52,7 @@ ; MIPS64R2: # %bb.0: # %entry ; MIPS64R2-NEXT: daddiu $sp, $sp, -16 ; MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill -; MIPS64R2-NEXT: move $1, $4 -; MIPS64R2-NEXT: move $25, $1 +; MIPS64R2-NEXT: move $25, $4 ; MIPS64R2-NEXT: jalr.hb $25 ; MIPS64R2-NEXT: daddiu $4, $zero, 13 ; MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload @@ -66,8 +63,7 @@ ; MIPS64R6: # %bb.0: # %entry ; MIPS64R6-NEXT: daddiu $sp, $sp, -16 ; MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill -; MIPS64R6-NEXT: move $1, $4 -; MIPS64R6-NEXT: move $25, $1 +; MIPS64R6-NEXT: move $25, $4 ; MIPS64R6-NEXT: jalr.hb $25 ; MIPS64R6-NEXT: daddiu $4, $zero, 13 ; MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload @@ -78,8 +74,7 @@ ; PIC-MIPS32R2: # %bb.0: # %entry ; PIC-MIPS32R2-NEXT: addiu $sp, $sp, -24 ; PIC-MIPS32R2-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill -; PIC-MIPS32R2-NEXT: move $1, $4 -; PIC-MIPS32R2-NEXT: move $25, $1 +; PIC-MIPS32R2-NEXT: move $25, $4 ; PIC-MIPS32R2-NEXT: jalr.hb $25 ; PIC-MIPS32R2-NEXT: addiu $4, $zero, 13 ; PIC-MIPS32R2-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload @@ -90,8 +85,7 @@ ; PIC-MIPS32R6: # %bb.0: # %entry ; PIC-MIPS32R6-NEXT: addiu $sp, $sp, -24 ; PIC-MIPS32R6-NEXT: sw $ra, 20($sp) # 4-byte Folded Spill -; PIC-MIPS32R6-NEXT: move $1, $4 -; PIC-MIPS32R6-NEXT: move $25, $1 +; PIC-MIPS32R6-NEXT: move $25, $4 ; PIC-MIPS32R6-NEXT: jalr.hb $25 ; PIC-MIPS32R6-NEXT: addiu $4, $zero, 13 ; PIC-MIPS32R6-NEXT: lw $ra, 20($sp) # 4-byte Folded Reload @@ -102,8 +96,7 @@ ; PIC-MIPS64R2: # %bb.0: # %entry ; PIC-MIPS64R2-NEXT: daddiu $sp, $sp, -16 ; PIC-MIPS64R2-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill -; PIC-MIPS64R2-NEXT: move $1, $4 -; PIC-MIPS64R2-NEXT: move $25, $1 +; PIC-MIPS64R2-NEXT: move $25, $4 ; PIC-MIPS64R2-NEXT: jalr.hb $25 ; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 13 ; PIC-MIPS64R2-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload @@ -114,8 +107,7 @@ ; PIC-MIPS64R6: # %bb.0: # %entry ; PIC-MIPS64R6-NEXT: daddiu $sp, $sp, -16 ; PIC-MIPS64R6-NEXT: sd $ra, 8($sp) # 8-byte Folded Spill -; PIC-MIPS64R6-NEXT: move $1, $4 -; PIC-MIPS64R6-NEXT: move $25, $1 +; PIC-MIPS64R6-NEXT: move $25, $4 ; PIC-MIPS64R6-NEXT: jalr.hb $25 ; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 13 ; PIC-MIPS64R6-NEXT: ld $ra, 8($sp) # 8-byte Folded Reload @@ -129,57 +121,49 @@ define i32 @fooTail(i32 (i32)* nocapture %f1) nounwind { ; MIPS32R2-LABEL: fooTail: ; MIPS32R2: # %bb.0: # %entry -; MIPS32R2-NEXT: move $1, $4 -; MIPS32R2-NEXT: move $25, $1 +; MIPS32R2-NEXT: move $25, $4 ; MIPS32R2-NEXT: jr.hb $25 ; MIPS32R2-NEXT: addiu $4, $zero, 14 ; ; MIPS32R6-LABEL: fooTail: ; MIPS32R6: # %bb.0: # %entry -; MIPS32R6-NEXT: move $1, $4 -; MIPS32R6-NEXT: move $25, $1 +; MIPS32R6-NEXT: move $25, $4 ; MIPS32R6-NEXT: jr.hb $25 ; MIPS32R6-NEXT: addiu $4, $zero, 14 ; ; MIPS64R2-LABEL: fooTail: ; MIPS64R2: # %bb.0: # %entry -; MIPS64R2-NEXT: move $1, $4 -; MIPS64R2-NEXT: move $25, $1 +; MIPS64R2-NEXT: move $25, $4 ; MIPS64R2-NEXT: jr.hb $25 ; MIPS64R2-NEXT: daddiu $4, $zero, 14 ; ; MIPS64R6-LABEL: fooTail: ; MIPS64R6: # %bb.0: # %entry -; MIPS64R6-NEXT: move $1, $4 -; MIPS64R6-NEXT: move $25, $1 +; MIPS64R6-NEXT: move $25, $4 ; MIPS64R6-NEXT: jr.hb $25 ; MIPS64R6-NEXT: daddiu $4, $zero, 14 ; ; PIC-MIPS32R2-LABEL: fooTail: ; PIC-MIPS32R2: # %bb.0: # %entry -; PIC-MIPS32R2-NEXT: move $1, $4 -; PIC-MIPS32R2-NEXT: move $25, $1 +; PIC-MIPS32R2-NEXT: move $25, $4 ; PIC-MIPS32R2-NEXT: jr.hb $25 ; PIC-MIPS32R2-NEXT: addiu $4, $zero, 14 ; ; PIC-MIPS32R6-LABEL: fooTail: ; PIC-MIPS32R6: # %bb.0: # %entry -; PIC-MIPS32R6-NEXT: move $1, $4 -; PIC-MIPS32R6-NEXT: move $25, $1 +; PIC-MIPS32R6-NEXT: move $25, $4 ; PIC-MIPS32R6-NEXT: jr.hb $25 ; PIC-MIPS32R6-NEXT: addiu $4, $zero, 14 ; ; PIC-MIPS64R2-LABEL: fooTail: ; PIC-MIPS64R2: # %bb.0: # %entry -; PIC-MIPS64R2-NEXT: move $1, $4 -; PIC-MIPS64R2-NEXT: move $25, $1 +; PIC-MIPS64R2-NEXT: move $25, $4 ; PIC-MIPS64R2-NEXT: jr.hb $25 ; PIC-MIPS64R2-NEXT: daddiu $4, $zero, 14 ; ; PIC-MIPS64R6-LABEL: fooTail: ; PIC-MIPS64R6: # %bb.0: # %entry -; PIC-MIPS64R6-NEXT: move $1, $4 -; PIC-MIPS64R6-NEXT: move $25, $1 +; PIC-MIPS64R6-NEXT: move $25, $4 ; PIC-MIPS64R6-NEXT: jr.hb $25 ; PIC-MIPS64R6-NEXT: daddiu $4, $zero, 14 entry: Index: test/CodeGen/Mips/llvm-ir/select-dbl.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/select-dbl.ll +++ test/CodeGen/Mips/llvm-ir/select-dbl.ll @@ -59,15 +59,15 @@ ; M3: andi $[[T0:[0-9]+]], $4, 1 ; M3: bnez $[[T0]], [[BB0:.LBB[0-9_]+]] - ; M3: nop - ; M3: mov.d $f13, $f14 + ; M3: mov.d $f0, $f13 + ; M3: mov.d $f0, $f14 ; M3: [[BB0]]: ; M3: jr $ra - ; M3: mov.d $f0, $f13 + ; M3: nop - ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1 - ; CMOV-64: movn.d $f14, $f13, $[[T0]] ; CMOV-64: mov.d $f0, $f14 + ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1 + ; CMOV-64: movn.d $f0, $f13, $[[T0]] ; SEL-64: mtc1 $4, $f0 ; SEL-64: sel.d $f0, $f14, $f13 @@ -90,16 +90,16 @@ ; M2: lw $[[T0:[0-9]+]], 16($sp) ; M2: andi $[[T1:[0-9]+]], $[[T0]], 1 ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]] - ; M2: nop - ; M2: mov.d $f12, $f14 + ; M2: mov.d $f0, $f12 + ; M2: mov.d $f0, $f14 ; M2: $[[BB0]]: ; M2: jr $ra - ; M2: mov.d $f0, $f12 + ; M2: nop + ; CMOV-32: mov.d $f0, $f14 ; CMOV-32: lw $[[T0:[0-9]+]], 16($sp) ; CMOV-32: andi $[[T1:[0-9]+]], $[[T0]], 1 - ; CMOV-32: movn.d $f14, $f12, $[[T1]] - ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: movn.d $f0, $f12, $[[T1]] ; SEL-32: lw $[[T0:[0-9]+]], 16($sp) ; SEL-32: mtc1 $[[T0]], $f0 @@ -107,23 +107,23 @@ ; M3: andi $[[T0:[0-9]+]], $6, 1 ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] - ; M3: nop - ; M3: mov.d $f12, $f13 + ; M3: mov.d $f0, $f12 + ; M3: mov.d $f0, $f13 ; M3: [[BB0]]: ; M3: jr $ra - ; M3: mov.d $f0, $f12 + ; M3: nop - ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1 - ; CMOV-64: movn.d $f13, $f12, $[[T0]] ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1 + ; CMOV-64: movn.d $f0, $f12, $[[T0]] ; SEL-64: mtc1 $6, $f0 ; SEL-64: sel.d $f0, $f13, $f12 + ; MM32R3: mov.d $f0, $f14 ; MM32R3: lw $[[T0:[0-9]+]], 16($sp) ; MM32R3: andi16 $[[T1:[0-9]+]], $[[T0:[0-9]+]], 1 - ; MM32R3: movn.d $f14, $f12, $[[T1]] - ; MM32R3: mov.d $f0, $f14 + ; MM32R3: movn.d $f0, $f12, $[[T1]] %r = select i1 %s, double %x, double %y ret double %r @@ -133,34 +133,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_olt_double: - ; M2: c.olt.d $f12, $f14 - ; M3: c.olt.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.olt.d $f0, $f14 + ; M3: c.olt.d $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.olt.d $f12, $f14 - ; CMOV-32: movt.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.olt.d $f12, $f0 + ; CMOV-32: movt.d $f0, $f12, $fcc0 ; SEL-32: cmp.lt.d $f0, $f12, $f14 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.olt.d $f12, $f13 - ; CMOV-64: movt.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.olt.d $f12, $f0 + ; CMOV-64: movt.d $f0, $f12, $fcc0 ; SEL-64: cmp.lt.d $f0, $f12, $f13 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.olt.d $f12, $f14 - ; MM32R3: movt.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.olt.d $f12, $f0 + ; MM32R3: movt.d $f0, $f12, $fcc0 %s = fcmp olt double %x, %y %r = select i1 %s, double %x, double %y @@ -171,34 +172,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_ole_double: - ; M2: c.ole.d $f12, $f14 - ; M3: c.ole.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.ole.d $f0, $f14 + ; M3: c.ole.d $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ole.d $f12, $f14 - ; CMOV-32: movt.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.ole.d $f12, $f0 + ; CMOV-32: movt.d $f0, $f12, $fcc0 ; SEL-32: cmp.le.d $f0, $f12, $f14 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.ole.d $f12, $f13 - ; CMOV-64: movt.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.ole.d $f12, $f0 + ; CMOV-64: movt.d $f0, $f12, $fcc0 ; SEL-64: cmp.le.d $f0, $f12, $f13 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.ole.d $f12, $f14 - ; MM32R3: movt.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.ole.d $f12, $f0 + ; MM32R3: movt.d $f0, $f12, $fcc0 %s = fcmp ole double %x, %y %r = select i1 %s, double %x, double %y @@ -209,34 +211,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_ogt_double: - ; M2: c.ule.d $f12, $f14 - ; M3: c.ule.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.ule.d $f0, $f14 + ; M3: c.ule.d $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ule.d $f12, $f14 - ; CMOV-32: movf.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.ule.d $f12, $f0 + ; CMOV-32: movf.d $f0, $f12, $fcc0 ; SEL-32: cmp.lt.d $f0, $f14, $f12 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.ule.d $f12, $f13 - ; CMOV-64: movf.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.ule.d $f12, $f0 + ; CMOV-64: movf.d $f0, $f12, $fcc0 ; SEL-64: cmp.lt.d $f0, $f13, $f12 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.ule.d $f12, $f14 - ; MM32R3: movf.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.ule.d $f12, $f0 + ; MM32R3: movf.d $f0, $f12, $fcc0 %s = fcmp ogt double %x, %y %r = select i1 %s, double %x, double %y @@ -247,34 +250,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_oge_double: - ; M2: c.ult.d $f12, $f14 - ; M3: c.ult.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.ult.d $f0, $f14 + ; M3: c.ult.d $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ult.d $f12, $f14 - ; CMOV-32: movf.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.ult.d $f12, $f0 + ; CMOV-32: movf.d $f0, $f12, $fcc0 ; SEL-32: cmp.le.d $f0, $f14, $f12 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.ult.d $f12, $f13 - ; CMOV-64: movf.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.ult.d $f12, $f0 + ; CMOV-64: movf.d $f0, $f12, $fcc0 ; SEL-64: cmp.le.d $f0, $f13, $f12 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.ult.d $f12, $f14 - ; MM32R3: movf.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.ult.d $f12, $f0 + ; MM32R3: movf.d $f0, $f12, $fcc0 %s = fcmp oge double %x, %y %r = select i1 %s, double %x, double %y @@ -285,34 +289,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_oeq_double: - ; M2: c.eq.d $f12, $f14 - ; M3: c.eq.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.eq.d $f0, $f14 + ; M3: c.eq.d $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.eq.d $f12, $f14 - ; CMOV-32: movt.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.eq.d $f12, $f0 + ; CMOV-32: movt.d $f0, $f12, $fcc0 ; SEL-32: cmp.eq.d $f0, $f12, $f14 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.eq.d $f12, $f13 - ; CMOV-64: movt.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.eq.d $f12, $f0 + ; CMOV-64: movt.d $f0, $f12, $fcc0 ; SEL-64: cmp.eq.d $f0, $f12, $f13 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.eq.d $f12, $f14 - ; MM32R3: movt.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.eq.d $f12, $f0 + ; MM32R3: movt.d $f0, $f12, $fcc0 %s = fcmp oeq double %x, %y %r = select i1 %s, double %x, double %y @@ -323,20 +328,21 @@ entry: ; ALL-LABEL: tst_select_fcmp_one_double: - ; M2: c.ueq.d $f12, $f14 - ; M3: c.ueq.d $f12, $f13 + ; M2-M3: mov.d $f0, $f12 + ; M2: c.ueq.d $f0, $f14 + ; M3: c.ueq.d $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.d $f12, $f14 - ; M3: mov.d $f12, $f13 + ; M2: mov.d $f0, $f14 + ; M3: mov.d $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.d $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ueq.d $f12, $f14 - ; CMOV-32: movf.d $f14, $f12, $fcc0 ; CMOV-32: mov.d $f0, $f14 + ; CMOV-32: c.ueq.d $f12, $f0 + ; CMOV-32: movf.d $f0, $f12, $fcc0 ; SEL-32: cmp.ueq.d $f0, $f12, $f14 ; SEL-32: mfc1 $[[T0:[0-9]+]], $f0 @@ -344,9 +350,9 @@ ; SEL-32: mtc1 $[[T0:[0-9]+]], $f0 ; SEL-32: sel.d $f0, $f14, $f12 - ; CMOV-64: c.ueq.d $f12, $f13 - ; CMOV-64: movf.d $f13, $f12, $fcc0 ; CMOV-64: mov.d $f0, $f13 + ; CMOV-64: c.ueq.d $f12, $f0 + ; CMOV-64: movf.d $f0, $f12, $fcc0 ; SEL-64: cmp.ueq.d $f0, $f12, $f13 ; SEL-64: mfc1 $[[T0:[0-9]+]], $f0 @@ -354,9 +360,9 @@ ; SEL-64: mtc1 $[[T0:[0-9]+]], $f0 ; SEL-64: sel.d $f0, $f13, $f12 - ; MM32R3: c.ueq.d $f12, $f14 - ; MM32R3: movf.d $f14, $f12, $fcc0 ; MM32R3: mov.d $f0, $f14 + ; MM32R3: c.ueq.d $f12, $f0 + ; MM32R3: movf.d $f0, $f12, $fcc0 %s = fcmp one double %x, %y %r = select i1 %s, double %x, double %y Index: test/CodeGen/Mips/llvm-ir/select-flt.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/select-flt.ll +++ test/CodeGen/Mips/llvm-ir/select-flt.ll @@ -35,15 +35,16 @@ ; M2-M3: andi $[[T0:[0-9]+]], $4, 1 ; M2: bnez $[[T0]], [[BB0:\$BB[0-9_]+]] + ; M2: nop ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] - ; M2-M3: nop + ; M3: mov.s $f0, $f13 + ; M3: mov.s $f0, $f14 ; M2: jr $ra ; M2: mtc1 $6, $f0 - ; M3: mov.s $f13, $f14 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra ; M2: mtc1 $5, $f0 - ; M3: mov.s $f0, $f13 + ; M3: nop ; CMOV-32: mtc1 $6, $f0 ; CMOV-32: andi $[[T0:[0-9]+]], $4, 1 @@ -55,9 +56,9 @@ ; SEL-32: mtc1 $4, $f0 ; SEL-32: sel.s $f0, $[[F1]], $[[F0]] - ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1 - ; CMOV-64: movn.s $f14, $f13, $[[T0]] ; CMOV-64: mov.s $f0, $f14 + ; CMOV-64: andi $[[T0:[0-9]+]], $4, 1 + ; CMOV-64: movn.s $f0, $f13, $[[T0]] ; SEL-64: mtc1 $4, $f0 ; SEL-64: sel.s $f0, $f14, $f13 @@ -79,30 +80,30 @@ ; M2-M3: andi $[[T0:[0-9]+]], $6, 1 ; M2: bnez $[[T0]], [[BB0:\$BB[0-9_]+]] ; M3: bnez $[[T0]], [[BB0:\.LBB[0-9_]+]] - ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: andi $[[T0:[0-9]+]], $6, 1 - ; CMOV-32: movn.s $f14, $f12, $[[T0]] ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: andi $[[T0:[0-9]+]], $6, 1 + ; CMOV-32: movn.s $f0, $f12, $[[T0]] ; SEL-32: mtc1 $6, $f0 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1 - ; CMOV-64: movn.s $f13, $f12, $[[T0]] ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: andi $[[T0:[0-9]+]], $6, 1 + ; CMOV-64: movn.s $f0, $f12, $[[T0]] ; SEL-64: mtc1 $6, $f0 ; SEL-64: sel.s $f0, $f13, $f12 + ; MM32R3: mov.s $[[F0:f[0-9]+]], $f14 ; MM32R3: andi16 $[[T0:[0-9]+]], $6, 1 - ; MM32R3: movn.s $[[F0:f[0-9]+]], $f12, $[[T0]] - ; MM32R3: mov.s $f0, $[[F0]] + ; MM32R3: movn.s $[[F0]], $f12, $[[T0]] %r = select i1 %s, float %x, float %y ret float %r @@ -112,34 +113,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_olt_float: - ; M2: c.olt.s $f12, $f14 - ; M3: c.olt.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.olt.s $f0, $f14 + ; M3: c.olt.s $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.olt.s $f12, $f14 - ; CMOV-32: movt.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.olt.s $f12, $f0 + ; CMOV-32: movt.s $f0, $f12, $fcc0 ; SEL-32: cmp.lt.s $f0, $f12, $f14 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.olt.s $f12, $f13 - ; CMOV-64: movt.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.olt.s $f12, $f0 + ; CMOV-64: movt.s $f0, $f12, $fcc0 ; SEL-64: cmp.lt.s $f0, $f12, $f13 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.olt.s $f12, $f14 - ; MM32R3: movt.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.olt.s $f12, $f0 + ; MM32R3: movt.s $f0, $f12, $fcc0 %s = fcmp olt float %x, %y %r = select i1 %s, float %x, float %y @@ -150,34 +152,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_ole_float: - ; M2: c.ole.s $f12, $f14 - ; M3: c.ole.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.ole.s $f0, $f14 + ; M3: c.ole.s $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ole.s $f12, $f14 - ; CMOV-32: movt.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.ole.s $f12, $f0 + ; CMOV-32: movt.s $f0, $f12, $fcc0 ; SEL-32: cmp.le.s $f0, $f12, $f14 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.ole.s $f12, $f13 - ; CMOV-64: movt.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.ole.s $f12, $f0 + ; CMOV-64: movt.s $f0, $f12, $fcc0 ; SEL-64: cmp.le.s $f0, $f12, $f13 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.ole.s $f12, $f14 - ; MM32R3: movt.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.ole.s $f12, $f0 + ; MM32R3: movt.s $f0, $f12, $fcc0 %s = fcmp ole float %x, %y %r = select i1 %s, float %x, float %y @@ -188,34 +191,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_ogt_float: - ; M2: c.ule.s $f12, $f14 - ; M3: c.ule.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.ule.s $f0, $f14 + ; M3: c.ule.s $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ule.s $f12, $f14 - ; CMOV-32: movf.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.ule.s $f12, $f0 + ; CMOV-32: movf.s $f0, $f12, $fcc0 ; SEL-32: cmp.lt.s $f0, $f14, $f12 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.ule.s $f12, $f13 - ; CMOV-64: movf.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.ule.s $f12, $f0 + ; CMOV-64: movf.s $f0, $f12, $fcc0 ; SEL-64: cmp.lt.s $f0, $f13, $f12 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.ule.s $f12, $f14 - ; MM32R3: movf.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.ule.s $f12, $f0 + ; MM32R3: movf.s $f0, $f12, $fcc0 %s = fcmp ogt float %x, %y %r = select i1 %s, float %x, float %y @@ -226,34 +230,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_oge_float: - ; M2: c.ult.s $f12, $f14 - ; M3: c.ult.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.ult.s $f0, $f14 + ; M3: c.ult.s $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ult.s $f12, $f14 - ; CMOV-32: movf.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.ult.s $f12, $f0 + ; CMOV-32: movf.s $f0, $f12, $fcc0 ; SEL-32: cmp.le.s $f0, $f14, $f12 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.ult.s $f12, $f13 - ; CMOV-64: movf.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.ult.s $f12, $f0 + ; CMOV-64: movf.s $f0, $f12, $fcc0 ; SEL-64: cmp.le.s $f0, $f13, $f12 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.ult.s $f12, $f14 - ; MM32R3: movf.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.ult.s $f12, $f0 + ; MM32R3: movf.s $f0, $f12, $fcc0 %s = fcmp oge float %x, %y %r = select i1 %s, float %x, float %y @@ -264,34 +269,35 @@ entry: ; ALL-LABEL: tst_select_fcmp_oeq_float: - ; M2: c.eq.s $f12, $f14 - ; M3: c.eq.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.eq.s $f0, $f14 + ; M3: c.eq.s $f0, $f13 ; M2: bc1t [[BB0:\$BB[0-9_]+]] ; M3: bc1t [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.eq.s $f12, $f14 - ; CMOV-32: movt.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.eq.s $f12, $f0 + ; CMOV-32: movt.s $f0, $f12, $fcc0 ; SEL-32: cmp.eq.s $f0, $f12, $f14 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.eq.s $f12, $f13 - ; CMOV-64: movt.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.eq.s $f12, $f0 + ; CMOV-64: movt.s $f0, $f12, $fcc0 ; SEL-64: cmp.eq.s $f0, $f12, $f13 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.eq.s $f12, $f14 - ; MM32R3: movt.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.eq.s $f12, $f0 + ; MM32R3: movt.s $f0, $f12, $fcc0 %s = fcmp oeq float %x, %y %r = select i1 %s, float %x, float %y @@ -302,20 +308,21 @@ entry: ; ALL-LABEL: tst_select_fcmp_one_float: - ; M2: c.ueq.s $f12, $f14 - ; M3: c.ueq.s $f12, $f13 + ; M2-M3: mov.s $f0, $f12 + ; M2: c.ueq.s $f0, $f14 + ; M3: c.ueq.s $f0, $f13 ; M2: bc1f [[BB0:\$BB[0-9_]+]] ; M3: bc1f [[BB0:\.LBB[0-9_]+]] ; M2-M3: nop - ; M2: mov.s $f12, $f14 - ; M3: mov.s $f12, $f13 + ; M2: mov.s $f0, $f14 + ; M3: mov.s $f0, $f13 ; M2-M3: [[BB0]]: ; M2-M3: jr $ra - ; M2-M3: mov.s $f0, $f12 + ; M2-M3: nop - ; CMOV-32: c.ueq.s $f12, $f14 - ; CMOV-32: movf.s $f14, $f12, $fcc0 ; CMOV-32: mov.s $f0, $f14 + ; CMOV-32: c.ueq.s $f12, $f0 + ; CMOV-32: movf.s $f0, $f12, $fcc0 ; SEL-32: cmp.ueq.s $f0, $f12, $f14 ; SEL-32: mfc1 $[[T0:[0-9]+]], $f0 @@ -323,9 +330,9 @@ ; SEL-32: mtc1 $[[T0:[0-9]+]], $f0 ; SEL-32: sel.s $f0, $f14, $f12 - ; CMOV-64: c.ueq.s $f12, $f13 - ; CMOV-64: movf.s $f13, $f12, $fcc0 ; CMOV-64: mov.s $f0, $f13 + ; CMOV-64: c.ueq.s $f12, $f0 + ; CMOV-64: movf.s $f0, $f12, $fcc0 ; SEL-64: cmp.ueq.s $f0, $f12, $f13 ; SEL-64: mfc1 $[[T0:[0-9]+]], $f0 @@ -333,9 +340,9 @@ ; SEL-64: mtc1 $[[T0:[0-9]+]], $f0 ; SEL-64: sel.s $f0, $f13, $f12 - ; MM32R3: c.ueq.s $f12, $f14 - ; MM32R3: movf.s $f14, $f12, $fcc0 ; MM32R3: mov.s $f0, $f14 + ; MM32R3: c.ueq.s $f12, $f0 + ; MM32R3: movf.s $f0, $f12, $fcc0 %s = fcmp one float %x, %y %r = select i1 %s, float %x, float %y Index: test/CodeGen/Mips/o32_cc_byval.ll =================================================================== --- test/CodeGen/Mips/o32_cc_byval.ll +++ test/CodeGen/Mips/o32_cc_byval.ll @@ -97,14 +97,14 @@ define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 -; CHECK-DAG: sw $7, 60($sp) +; CHECK: move $4, $7 +; CHECK-DAG: sw $4, 60($sp) ; CHECK-DAG: sw $6, 56($sp) ; CHECK-DAG: sw $5, 52($sp) ; CHECK-DAG: lw $[[R1:[0-9]+]], 80($sp) ; CHECK-DAG: lb $[[R0:[0-9]+]], 52($sp) ; CHECK-DAG: sw $[[R0]], 32($sp) ; CHECK-DAG: sw $[[R1]], 24($sp) -; CHECK: move $4, $7 %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 %tmp = load i32, i32* %i, align 4 Index: test/CodeGen/Mips/select.ll =================================================================== --- test/CodeGen/Mips/select.ll +++ test/CodeGen/Mips/select.ll @@ -147,11 +147,11 @@ ; 32R6: mtc1 $[[T0]], $[[CC:f0]] ; 32R6: sel.s $[[CC]], $[[F1]], $[[F0]] -; 64: movn.s $f14, $f13, $4 ; 64: mov.s $f0, $f14 +; 64: movn.s $f0, $f13, $4 -; 64R2: movn.s $f14, $f13, $4 ; 64R2: mov.s $f0, $f14 +; 64R2: movn.s $f0, $f13, $4 ; 64R6: sltu $[[T0:[0-9]+]], $zero, $4 ; 64R6: mtc1 $[[T0]], $[[CC:f0]] @@ -183,11 +183,11 @@ ; 32R6-DAG: ldc1 $[[F1:f[0-9]+]], 16($sp) ; 32R6: sel.d $[[CC]], $[[F1]], $[[F0]] -; 64: movn.d $f14, $f13, $4 ; 64: mov.d $f0, $f14 +; 64: movn.d $f0, $f13, $4 -; 64R2: movn.d $f14, $f13, $4 ; 64R2: mov.d $f0, $f14 +; 64R2: movn.d $f0, $f13, $4 ; 64R6-DAG: sltu $[[T0:[0-9]+]], $zero, $4 ; 64R6-DAG: mtc1 $[[T0]], $[[CC:f0]] @@ -202,30 +202,30 @@ entry: ; ALL-LABEL: f32_fcmp_oeq_f32_val: +; 32: mov.s $f0, $f14 ; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32: c.eq.s $[[F2]], $[[F3]] -; 32: movt.s $f14, $f12, $fcc0 -; 32: mov.s $f0, $f14 +; 32: movt.s $f0, $f12, $fcc0 +; 32R2: mov.s $f0, $f14 ; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R2: c.eq.s $[[F2]], $[[F3]] -; 32R2: movt.s $f14, $f12, $fcc0 -; 32R2: mov.s $f0, $f14 +; 32R2: movt.s $f0, $f12, $fcc0 ; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R6: cmp.eq.s $[[CC:f0]], $[[F2]], $[[F3]] ; 32R6: sel.s $[[CC]], $f14, $f12 -; 64: c.eq.s $f14, $f15 -; 64: movt.s $f13, $f12, $fcc0 ; 64: mov.s $f0, $f13 +; 64: c.eq.s $f14, $f15 +; 64: movt.s $f0, $f12, $fcc0 -; 64R2: c.eq.s $f14, $f15 -; 64R2: movt.s $f13, $f12, $fcc0 ; 64R2: mov.s $f0, $f13 +; 64R2: c.eq.s $f14, $f15 +; 64R2: movt.s $f0, $f12, $fcc0 ; 64R6: cmp.eq.s $[[CC:f0]], $f14, $f15 ; 64R6: sel.s $[[CC]], $f13, $f12 @@ -239,30 +239,30 @@ entry: ; ALL-LABEL: f32_fcmp_olt_f32_val: +; 32: mov.s $f0, $f14 ; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32: c.olt.s $[[F2]], $[[F3]] -; 32: movt.s $f14, $f12, $fcc0 -; 32: mov.s $f0, $f14 +; 32: movt.s $f0, $f12, $fcc0 +; 32R2: mov.s $f0, $f14 ; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R2: c.olt.s $[[F2]], $[[F3]] -; 32R2: movt.s $f14, $f12, $fcc0 -; 32R2: mov.s $f0, $f14 +; 32R2: movt.s $f0, $f12, $fcc0 ; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R6: cmp.lt.s $[[CC:f0]], $[[F2]], $[[F3]] ; 32R6: sel.s $[[CC]], $f14, $f12 -; 64: c.olt.s $f14, $f15 -; 64: movt.s $f13, $f12, $fcc0 ; 64: mov.s $f0, $f13 +; 64: c.olt.s $f14, $f15 +; 64: movt.s $f0, $f12, $fcc0 -; 64R2: c.olt.s $f14, $f15 -; 64R2: movt.s $f13, $f12, $fcc0 ; 64R2: mov.s $f0, $f13 +; 64R2: c.olt.s $f14, $f15 +; 64R2: movt.s $f0, $f12, $fcc0 ; 64R6: cmp.lt.s $[[CC:f0]], $f14, $f15 ; 64R6: sel.s $[[CC]], $f13, $f12 @@ -276,30 +276,30 @@ entry: ; ALL-LABEL: f32_fcmp_ogt_f32_val: +; 32: mov.s $f0, $f14 ; 32-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32: c.ule.s $[[F2]], $[[F3]] -; 32: movf.s $f14, $f12, $fcc0 -; 32: mov.s $f0, $f14 +; 32: movf.s $f0, $f12, $fcc0 +; 32R2: mov.s $f0, $f14 ; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R2-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R2: c.ule.s $[[F2]], $[[F3]] -; 32R2: movf.s $f14, $f12, $fcc0 -; 32R2: mov.s $f0, $f14 +; 32R2: movf.s $f0, $f12, $fcc0 ; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R6-DAG: mtc1 $7, $[[F3:f[0-9]+]] ; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]] ; 32R6: sel.s $[[CC]], $f14, $f12 -; 64: c.ule.s $f14, $f15 -; 64: movf.s $f13, $f12, $fcc0 ; 64: mov.s $f0, $f13 +; 64: c.ule.s $f14, $f15 +; 64: movf.s $f0, $f12, $fcc0 -; 64R2: c.ule.s $f14, $f15 -; 64R2: movf.s $f13, $f12, $fcc0 ; 64R2: mov.s $f0, $f13 +; 64R2: c.ule.s $f14, $f15 +; 64R2: movf.s $f0, $f12, $fcc0 ; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14 ; 64R6: sel.s $[[CC]], $f13, $f12 @@ -313,30 +313,30 @@ entry: ; ALL-LABEL: f32_fcmp_ogt_f64_val: +; 32: mov.d $f0, $f14 ; 32-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp) ; 32-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp) ; 32: c.ule.s $[[F2]], $[[F3]] -; 32: movf.d $f14, $f12, $fcc0 -; 32: mov.d $f0, $f14 +; 32: movf.d $f0, $f12, $fcc0 +; 32R2: mov.d $f0, $f14 ; 32R2-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp) ; 32R2-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp) ; 32R2: c.ule.s $[[F2]], $[[F3]] -; 32R2: movf.d $f14, $f12, $fcc0 -; 32R2: mov.d $f0, $f14 +; 32R2: movf.d $f0, $f12, $fcc0 ; 32R6-DAG: lwc1 $[[F2:f[0-9]+]], 16($sp) ; 32R6-DAG: lwc1 $[[F3:f[0-9]+]], 20($sp) ; 32R6: cmp.lt.s $[[CC:f0]], $[[F3]], $[[F2]] ; 32R6: sel.d $[[CC]], $f14, $f12 -; 64: c.ule.s $f14, $f15 -; 64: movf.d $f13, $f12, $fcc0 ; 64: mov.d $f0, $f13 +; 64: c.ule.s $f14, $f15 +; 64: movf.d $f0, $f12, $fcc0 -; 64R2: c.ule.s $f14, $f15 -; 64R2: movf.d $f13, $f12, $fcc0 ; 64R2: mov.d $f0, $f13 +; 64R2: c.ule.s $f14, $f15 +; 64R2: movf.d $f0, $f12, $fcc0 ; 64R6: cmp.lt.s $[[CC:f0]], $f15, $f14 ; 64R6: sel.d $[[CC]], $f13, $f12 @@ -350,30 +350,30 @@ entry: ; ALL-LABEL: f64_fcmp_oeq_f64_val: +; 32: mov.d $f0, $f14 ; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32: c.eq.d $[[F2]], $[[F3]] -; 32: movt.d $f14, $f12, $fcc0 -; 32: mov.d $f0, $f14 +; 32: movt.d $f0, $f12, $fcc0 +; 32R2: mov.d $f0, $f14 ; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R2: c.eq.d $[[F2]], $[[F3]] -; 32R2: movt.d $f14, $f12, $fcc0 -; 32R2: mov.d $f0, $f14 +; 32R2: movt.d $f0, $f12, $fcc0 ; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R6: cmp.eq.d $[[CC:f0]], $[[F2]], $[[F3]] ; 32R6: sel.d $[[CC]], $f14, $f12 -; 64: c.eq.d $f14, $f15 -; 64: movt.d $f13, $f12, $fcc0 ; 64: mov.d $f0, $f13 +; 64: c.eq.d $f14, $f15 +; 64: movt.d $f0, $f12, $fcc0 -; 64R2: c.eq.d $f14, $f15 -; 64R2: movt.d $f13, $f12, $fcc0 ; 64R2: mov.d $f0, $f13 +; 64R2: c.eq.d $f14, $f15 +; 64R2: movt.d $f0, $f12, $fcc0 ; 64R6: cmp.eq.d $[[CC:f0]], $f14, $f15 ; 64R6: sel.d $[[CC]], $f13, $f12 @@ -387,30 +387,30 @@ entry: ; ALL-LABEL: f64_fcmp_olt_f64_val: +; 32: mov.d $f0, $f14 ; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32: c.olt.d $[[F2]], $[[F3]] -; 32: movt.d $f14, $f12, $fcc0 -; 32: mov.d $f0, $f14 +; 32: movt.d $f0, $f12, $fcc0 +; 32R2: mov.d $f0, $f14 ; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R2: c.olt.d $[[F2]], $[[F3]] -; 32R2: movt.d $f14, $f12, $fcc0 -; 32R2: mov.d $f0, $f14 +; 32R2: movt.d $f0, $f12, $fcc0 ; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R6: cmp.lt.d $[[CC:f0]], $[[F2]], $[[F3]] ; 32R6: sel.d $[[CC]], $f14, $f12 -; 64: c.olt.d $f14, $f15 -; 64: movt.d $f13, $f12, $fcc0 ; 64: mov.d $f0, $f13 +; 64: c.olt.d $f14, $f15 +; 64: movt.d $f0, $f12, $fcc0 -; 64R2: c.olt.d $f14, $f15 -; 64R2: movt.d $f13, $f12, $fcc0 ; 64R2: mov.d $f0, $f13 +; 64R2: c.olt.d $f14, $f15 +; 64R2: movt.d $f0, $f12, $fcc0 ; 64R6: cmp.lt.d $[[CC:f0]], $f14, $f15 ; 64R6: sel.d $[[CC]], $f13, $f12 @@ -424,30 +424,30 @@ entry: ; ALL-LABEL: f64_fcmp_ogt_f64_val: +; 32: mov.d $f0, $f14 ; 32-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32: c.ule.d $[[F2]], $[[F3]] -; 32: movf.d $f14, $f12, $fcc0 -; 32: mov.d $f0, $f14 +; 32: movf.d $f0, $f12, $fcc0 +; 32R2: mov.d $f0, $f14 ; 32R2-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R2: c.ule.d $[[F2]], $[[F3]] -; 32R2: movf.d $f14, $f12, $fcc0 -; 32R2: mov.d $f0, $f14 +; 32R2: movf.d $f0, $f12, $fcc0 ; 32R6-DAG: ldc1 $[[F2:f[0-9]+]], 16($sp) ; 32R6-DAG: ldc1 $[[F3:f[0-9]+]], 24($sp) ; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]] ; 32R6: sel.d $[[CC]], $f14, $f12 -; 64: c.ule.d $f14, $f15 -; 64: movf.d $f13, $f12, $fcc0 ; 64: mov.d $f0, $f13 +; 64: c.ule.d $f14, $f15 +; 64: movf.d $f0, $f12, $fcc0 -; 64R2: c.ule.d $f14, $f15 -; 64R2: movf.d $f13, $f12, $fcc0 ; 64R2: mov.d $f0, $f13 +; 64R2: c.ule.d $f14, $f15 +; 64R2: movf.d $f0, $f12, $fcc0 ; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14 ; 64R6: sel.d $[[CC]], $f13, $f12 @@ -461,19 +461,19 @@ entry: ; ALL-LABEL: f64_fcmp_ogt_f32_val: +; 32: mov.s $f0, $f14 ; 32-DAG: mtc1 $6, $[[F2:f[1-3]*[02468]+]] ; 32-DAG: mtc1 $7, $[[F2H:f[1-3]*[13579]+]] ; 32-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp) ; 32: c.ule.d $[[F2]], $[[F3]] -; 32: movf.s $f14, $f12, $fcc0 -; 32: mov.s $f0, $f14 +; 32: movf.s $f0, $f12, $fcc0 +; 32R2: mov.s $f0, $f14 ; 32R2-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R2-DAG: mthc1 $7, $[[F2]] ; 32R2-DAG: ldc1 $[[F3:f[0-9]+]], 16($sp) ; 32R2: c.ule.d $[[F2]], $[[F3]] -; 32R2: movf.s $f14, $f12, $fcc0 -; 32R2: mov.s $f0, $f14 +; 32R2: movf.s $f0, $f12, $fcc0 ; 32R6-DAG: mtc1 $6, $[[F2:f[0-9]+]] ; 32R6-DAG: mthc1 $7, $[[F2]] @@ -481,13 +481,13 @@ ; 32R6: cmp.lt.d $[[CC:f0]], $[[F3]], $[[F2]] ; 32R6: sel.s $[[CC]], $f14, $f12 -; 64: c.ule.d $f14, $f15 -; 64: movf.s $f13, $f12, $fcc0 ; 64: mov.s $f0, $f13 +; 64: c.ule.d $f14, $f15 +; 64: movf.s $f0, $f12, $fcc0 -; 64R2: c.ule.d $f14, $f15 -; 64R2: movf.s $f13, $f12, $fcc0 ; 64R2: mov.s $f0, $f13 +; 64R2: c.ule.d $f14, $f15 +; 64R2: movf.s $f0, $f12, $fcc0 ; 64R6: cmp.lt.d $[[CC:f0]], $f15, $f14 ; 64R6: sel.s $[[CC]], $f13, $f12