Index: lib/Target/Mips/MipsCallLowering.h =================================================================== --- lib/Target/Mips/MipsCallLowering.h +++ lib/Target/Mips/MipsCallLowering.h @@ -38,7 +38,7 @@ bool assignVRegs(ArrayRef VRegs, ArrayRef ArgLocs, unsigned Index); - void setMostSignificantFirst(SmallVectorImpl &VRegs); + void setLeastSignificantFirst(SmallVectorImpl &VRegs); MachineIRBuilder &MIRBuilder; MachineRegisterInfo &MRI; Index: lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- lib/Target/Mips/MipsCallLowering.cpp +++ lib/Target/Mips/MipsCallLowering.cpp @@ -45,9 +45,9 @@ return true; } -void MipsCallLowering::MipsHandler::setMostSignificantFirst( +void MipsCallLowering::MipsHandler::setLeastSignificantFirst( SmallVectorImpl &VRegs) { - if (MIRBuilder.getMF().getDataLayout().isLittleEndian()) + if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) std::reverse(VRegs.begin(), VRegs.end()); } @@ -181,7 +181,7 @@ unsigned ArgsReg) { if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) return false; - setMostSignificantFirst(VRegs); + setLeastSignificantFirst(VRegs); MIRBuilder.buildMerge(ArgsReg, VRegs); return true; } @@ -283,7 +283,7 @@ unsigned ArgLocsStartIndex, unsigned ArgsReg) { MIRBuilder.buildUnmerge(VRegs, ArgsReg); - setMostSignificantFirst(VRegs); + setLeastSignificantFirst(VRegs); if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) return false; Index: lib/Target/Mips/MipsLegalizerInfo.cpp =================================================================== --- lib/Target/Mips/MipsLegalizerInfo.cpp +++ lib/Target/Mips/MipsLegalizerInfo.cpp @@ -80,15 +80,15 @@ unsigned Carry = MRI.createGenericVirtualRegister(sHalf); unsigned TmpResHigh = MRI.createGenericVirtualRegister(sHalf); - MIRBuilder.buildUnmerge({RHSHigh, RHSLow}, MI.getOperand(2).getReg()); - MIRBuilder.buildUnmerge({LHSHigh, LHSLow}, MI.getOperand(1).getReg()); + MIRBuilder.buildUnmerge({RHSLow, RHSHigh}, MI.getOperand(2).getReg()); + MIRBuilder.buildUnmerge({LHSLow, LHSHigh}, MI.getOperand(1).getReg()); MIRBuilder.buildAdd(TmpResHigh, LHSHigh, RHSHigh); MIRBuilder.buildAdd(ResLow, LHSLow, RHSLow); MIRBuilder.buildICmp(CmpInst::ICMP_ULT, Carry, ResLow, LHSLow); MIRBuilder.buildAdd(ResHigh, TmpResHigh, Carry); - MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {ResHigh, ResLow}); + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {ResLow, ResHigh}); MI.eraseFromParent(); break; @@ -109,7 +109,7 @@ ResHigh, *ConstantInt::get(MI.getMF()->getFunction().getContext(), CImmValue.lshr(Size / 2).trunc(Size / 2))); - MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {ResHigh, ResLow}); + MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {ResLow, ResHigh}); MI.eraseFromParent(); break; Index: test/CodeGen/Mips/GlobalISel/irtranslator/split_args.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/irtranslator/split_args.ll +++ test/CodeGen/Mips/GlobalISel/irtranslator/split_args.ll @@ -6,10 +6,10 @@ ; MIPS32: liveins: $a0, $a1 ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0 ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1 - ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY]](s32) + ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; MIPS32: $v0 = COPY [[UV1]](s32) - ; MIPS32: $v1 = COPY [[UV]](s32) + ; MIPS32: $v0 = COPY [[UV]](s32) + ; MIPS32: $v1 = COPY [[UV1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 entry: ret i64 %a @@ -30,10 +30,10 @@ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.[[STACK1]], align 0) ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.[[STACK0]], align 0) - ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[LOAD]](s32) + ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; MIPS32: $v0 = COPY [[UV1]](s32) - ; MIPS32: $v1 = COPY [[UV]](s32) + ; MIPS32: $v0 = COPY [[UV]](s32) + ; MIPS32: $v1 = COPY [[UV1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 entry: ret i64 %a @@ -46,10 +46,10 @@ ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0 ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a2 ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a3 - ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY1]](s32) + ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; MIPS32: $v0 = COPY [[UV1]](s32) - ; MIPS32: $v1 = COPY [[UV]](s32) + ; MIPS32: $v0 = COPY [[UV]](s32) + ; MIPS32: $v1 = COPY [[UV1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 entry: ret i64 %a @@ -73,10 +73,10 @@ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.[[STACK1]], align 0) ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.[[STACK0]], align 0) - ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD1]](s32) + ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[LOAD2]](s32) ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; MIPS32: $v0 = COPY [[UV1]](s32) - ; MIPS32: $v1 = COPY [[UV]](s32) + ; MIPS32: $v0 = COPY [[UV]](s32) + ; MIPS32: $v1 = COPY [[UV1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 entry: ret i64 %a @@ -96,10 +96,10 @@ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.[[STACK1]], align 0) ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.[[STACK0]], align 0) - ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD1]](s32), [[LOAD]](s32) + ; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32) ; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64) - ; MIPS32: $v0 = COPY [[UV1]](s32) - ; MIPS32: $v1 = COPY [[UV]](s32) + ; MIPS32: $v0 = COPY [[UV]](s32) + ; MIPS32: $v1 = COPY [[UV1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 entry: ret i64 %a Index: test/CodeGen/Mips/GlobalISel/legalizer/add.mir =================================================================== --- test/CodeGen/Mips/GlobalISel/legalizer/add.mir +++ test/CodeGen/Mips/GlobalISel/legalizer/add.mir @@ -226,12 +226,12 @@ ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1 ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2 ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3 - ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]] - ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]] - ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[COPY2]] + ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY]] + ; MIPS32: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[COPY1]] + ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD1]](s32), [[COPY3]] ; MIPS32: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[ICMP]] - ; MIPS32: $v0 = COPY [[ADD1]](s32) - ; MIPS32: $v1 = COPY [[ADD2]](s32) + ; MIPS32: $v0 = COPY [[ADD2]](s32) + ; MIPS32: $v1 = COPY [[ADD1]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 %2:_(s32) = COPY $a0 %3:_(s32) = COPY $a1 Index: test/CodeGen/Mips/GlobalISel/legalizer/constants.mir =================================================================== --- test/CodeGen/Mips/GlobalISel/legalizer/constants.mir +++ test/CodeGen/Mips/GlobalISel/legalizer/constants.mir @@ -21,8 +21,8 @@ ; MIPS32-LABEL: name: any_i64 ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648 - ; MIPS32: $v0 = COPY [[C]](s32) - ; MIPS32: $v1 = COPY [[C1]](s32) + ; MIPS32: $v0 = COPY [[C1]](s32) + ; MIPS32: $v1 = COPY [[C]](s32) ; MIPS32: RetRA implicit $v0, implicit $v1 %0:_(s64) = G_CONSTANT i64 -9223372036854775808 %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0(s64)