Index: include/llvm/CodeGen/GlobalISel/Legalizer.h =================================================================== --- include/llvm/CodeGen/GlobalISel/Legalizer.h +++ include/llvm/CodeGen/GlobalISel/Legalizer.h @@ -58,9 +58,6 @@ bool combineExtracts(MachineInstr &MI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII); - bool combineMerges(MachineInstr &MI, MachineRegisterInfo &MRI, - const TargetInstrInfo &TII, MachineIRBuilder &MIRBuilder); - bool runOnMachineFunction(MachineFunction &MF) override; }; } // End namespace llvm. Index: include/llvm/CodeGen/GlobalISel/LegalizerCombiner.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/GlobalISel/LegalizerCombiner.h @@ -0,0 +1,206 @@ +//===-- llvm/CodeGen/GlobalISel/LegalizerCombiner.h --===========// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This file contains some helper functions which try to cleanup artifacts +// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make +// the types match. This file also contains some combines of merges that happens +// at the end of the legalization. +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/GlobalISel/Legalizer.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/Debug.h" + +#define DEBUG_TYPE "legalizer" + +namespace llvm { +class LegalizerCombiner { + MachineIRBuilder &Builder; + MachineRegisterInfo &MRI; + +public: + LegalizerCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI) + : Builder(B), MRI(MRI) {} + + bool tryCombineAnyExt(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + if (MI.getOpcode() != TargetOpcode::G_ANYEXT) + return false; + MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg()); + if (DefMI->getOpcode() == TargetOpcode::G_TRUNC) { + DEBUG(dbgs() << ".. Combine MI: " << MI;); + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned SrcReg = DefMI->getOperand(1).getReg(); + Builder.setInstr(MI); + // We get a copy/trunc/extend depending on the sizes + Builder.buildAnyExtOrTrunc(DstReg, SrcReg); + MI.eraseFromParent(); + if (MRI.use_empty(DefMI->getOperand(0).getReg())) + DeadInsts.push_back(DefMI); + return true; + } + return false; + } + + bool tryCombineZExt(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + + if (MI.getOpcode() != TargetOpcode::G_ZEXT) + return false; + MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg()); + if (DefMI->getOpcode() == TargetOpcode::G_TRUNC) { + DEBUG(dbgs() << ".. Combine MI: " << MI;); + Builder.setInstr(MI); + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned ZExtSrc = MI.getOperand(1).getReg(); + LLT ZExtSrcTy = MRI.getType(ZExtSrc); + LLT DstTy = MRI.getType(DstReg); + APInt Mask = APInt::getAllOnesValue(ZExtSrcTy.getSizeInBits()); + auto MaskCstMIB = Builder.buildConstant(DstTy, Mask.getZExtValue()); + unsigned TruncSrc = DefMI->getOperand(1).getReg(); + // We get a copy/trunc/extend depending on the sizes + auto SrcCopyOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc); + Builder.buildAnd(DstReg, SrcCopyOrTrunc, MaskCstMIB); + MI.eraseFromParent(); + if (MRI.use_empty(DefMI->getOperand(0).getReg())) + DeadInsts.push_back(DefMI); + return true; + } + return false; + } + + bool tryCombineSExt(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + + if (MI.getOpcode() != TargetOpcode::G_SEXT) + return false; + MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg()); + if (DefMI->getOpcode() == TargetOpcode::G_TRUNC) { + DEBUG(dbgs() << ".. Combine MI: " << MI;); + Builder.setInstr(MI); + unsigned DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + unsigned SExtSrc = MI.getOperand(1).getReg(); + LLT SExtSrcTy = MRI.getType(SExtSrc); + unsigned SizeDiff = DstTy.getSizeInBits() - SExtSrcTy.getSizeInBits(); + auto SizeDiffMIB = Builder.buildConstant(DstTy, SizeDiff); + unsigned TruncSrcReg = DefMI->getOperand(1).getReg(); + // We get a copy/trunc/extend depending on the sizes + auto SrcCopyExtOrTrunc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrcReg); + auto ShlMIB = Builder.buildInstr(TargetOpcode::G_SHL, DstTy, + SrcCopyExtOrTrunc, SizeDiffMIB); + Builder.buildInstr(TargetOpcode::G_ASHR, DstReg, ShlMIB, SizeDiffMIB); + MI.eraseFromParent(); + if (MRI.use_empty(DefMI->getOperand(0).getReg())) + DeadInsts.push_back(DefMI); + return true; + } + return false; + } + + bool tryCombineMerges(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + + if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES) + return false; + + unsigned NumDefs = MI.getNumOperands() - 1; + unsigned SrcReg = MI.getOperand(NumDefs).getReg(); + MachineInstr *MergeI = MRI.getVRegDef(SrcReg); + if (!MergeI || (MergeI->getOpcode() != TargetOpcode::G_MERGE_VALUES)) + return false; + + const unsigned NumMergeRegs = MergeI->getNumOperands() - 1; + + if (NumMergeRegs < NumDefs) { + if (NumDefs % NumMergeRegs != 0) + return false; + + Builder.setInstr(MI); + // Transform to UNMERGEs, for example + // %1 = G_MERGE_VALUES %4, %5 + // %9, %10, %11, %12 = G_UNMERGE_VALUES %1 + // to + // %9, %10 = G_UNMERGE_VALUES %4 + // %11, %12 = G_UNMERGE_VALUES %5 + + const unsigned NewNumDefs = NumDefs / NumMergeRegs; + for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) { + SmallVector DstRegs; + for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs; + ++j, ++DefIdx) + DstRegs.push_back(MI.getOperand(DefIdx).getReg()); + + Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg()); + } + + } else if (NumMergeRegs > NumDefs) { + if (NumMergeRegs % NumDefs != 0) + return false; + + Builder.setInstr(MI); + // Transform to MERGEs + // %6 = G_MERGE_VALUES %17, %18, %19, %20 + // %7, %8 = G_UNMERGE_VALUES %6 + // to + // %7 = G_MERGE_VALUES %17, %18 + // %8 = G_MERGE_VALUES %19, %20 + + const unsigned NumRegs = NumMergeRegs / NumDefs; + for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) { + SmallVector Regs; + for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs; + ++j, ++Idx) + Regs.push_back(MergeI->getOperand(Idx).getReg()); + + Builder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs); + } + + } else { + // FIXME: is a COPY appropriate if the types mismatch? We know both + // registers are allocatable by now. + if (MRI.getType(MI.getOperand(0).getReg()) != + MRI.getType(MergeI->getOperand(1).getReg())) + return false; + + for (unsigned Idx = 0; Idx < NumDefs; ++Idx) + MRI.replaceRegWith(MI.getOperand(Idx).getReg(), + MergeI->getOperand(Idx + 1).getReg()); + } + + MI.eraseFromParent(); + if (MRI.use_empty(MergeI->getOperand(0).getReg())) + DeadInsts.push_back(MergeI); + return true; + } + + /// Try to combine away MI. + /// Returns true if it combined away the MI. + /// Caller should not rely in MI existing as it may be deleted. + /// Adds instructions that are dead as a result of the combine + // into DeadInsts + bool tryCombineInstruction(MachineInstr &MI, + SmallVectorImpl &DeadInsts) { + switch (MI.getOpcode()) { + default: + return false; + case TargetOpcode::G_ANYEXT: + return tryCombineAnyExt(MI, DeadInsts); + case TargetOpcode::G_ZEXT: + return tryCombineZExt(MI, DeadInsts); + case TargetOpcode::G_SEXT: + return tryCombineSExt(MI, DeadInsts); + case TargetOpcode::G_UNMERGE_VALUES: + return tryCombineMerges(MI, DeadInsts); + } + } +}; + +} // namespace llvm Index: include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -349,6 +349,10 @@ /// with the same (scalar or vector) type). /// /// \return a MachineInstrBuilder for the newly created instruction. + template + MachineInstrBuilder buildAnd(DstTy &&Dst, UseArgsTy &&... UseArgs) { + return buildAnd(getDestFromArg(Dst), getRegFromArg(UseArgs)...); + } MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0, unsigned Op1); Index: lib/CodeGen/GlobalISel/Legalizer.cpp =================================================================== --- lib/CodeGen/GlobalISel/Legalizer.cpp +++ lib/CodeGen/GlobalISel/Legalizer.cpp @@ -14,6 +14,8 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/GlobalISel/Legalizer.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/GlobalISel/LegalizerCombiner.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" @@ -50,81 +52,6 @@ void Legalizer::init(MachineFunction &MF) { } -bool Legalizer::combineMerges(MachineInstr &MI, MachineRegisterInfo &MRI, - const TargetInstrInfo &TII, - MachineIRBuilder &MIRBuilder) { - if (MI.getOpcode() != TargetOpcode::G_UNMERGE_VALUES) - return false; - - unsigned NumDefs = MI.getNumOperands() - 1; - unsigned SrcReg = MI.getOperand(NumDefs).getReg(); - MachineInstr &MergeI = *MRI.def_instr_begin(SrcReg); - if (MergeI.getOpcode() != TargetOpcode::G_MERGE_VALUES) - return false; - - const unsigned NumMergeRegs = MergeI.getNumOperands() - 1; - - if (NumMergeRegs < NumDefs) { - if (NumDefs % NumMergeRegs != 0) - return false; - - MIRBuilder.setInstr(MI); - // Transform to UNMERGEs, for example - // %1 = G_MERGE_VALUES %4, %5 - // %9, %10, %11, %12 = G_UNMERGE_VALUES %1 - // to - // %9, %10 = G_UNMERGE_VALUES %4 - // %11, %12 = G_UNMERGE_VALUES %5 - - const unsigned NewNumDefs = NumDefs / NumMergeRegs; - for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) { - SmallVector DstRegs; - for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs; - ++j, ++DefIdx) - DstRegs.push_back(MI.getOperand(DefIdx).getReg()); - - MIRBuilder.buildUnmerge(DstRegs, MergeI.getOperand(Idx + 1).getReg()); - } - - } else if (NumMergeRegs > NumDefs) { - if (NumMergeRegs % NumDefs != 0) - return false; - - MIRBuilder.setInstr(MI); - // Transform to MERGEs - // %6 = G_MERGE_VALUES %17, %18, %19, %20 - // %7, %8 = G_UNMERGE_VALUES %6 - // to - // %7 = G_MERGE_VALUES %17, %18 - // %8 = G_MERGE_VALUES %19, %20 - - const unsigned NumRegs = NumMergeRegs / NumDefs; - for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) { - SmallVector Regs; - for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs; ++j, ++Idx) - Regs.push_back(MergeI.getOperand(Idx).getReg()); - - MIRBuilder.buildMerge(MI.getOperand(DefIdx).getReg(), Regs); - } - - } else { - // FIXME: is a COPY appropriate if the types mismatch? We know both - // registers are allocatable by now. - if (MRI.getType(MI.getOperand(0).getReg()) != - MRI.getType(MergeI.getOperand(1).getReg())) - return false; - - for (unsigned Idx = 0; Idx < NumDefs; ++Idx) - MRI.replaceRegWith(MI.getOperand(Idx).getReg(), - MergeI.getOperand(Idx + 1).getReg()); - } - - MI.eraseFromParent(); - if (MRI.use_empty(MergeI.getOperand(0).getReg())) - MergeI.eraseFromParent(); - return true; -} - bool Legalizer::runOnMachineFunction(MachineFunction &MF) { // If the ISel pipeline failed, do not bother running that pass. if (MF.getProperties().hasProperty( @@ -154,60 +81,91 @@ if (!isPreISelGenericOpcode(MI->getOpcode())) continue; unsigned NumNewInsns = 0; - SmallVector WorkList; + using VecType = SetVector>; + VecType WorkList; + VecType CombineList; Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) { // Only legalize pre-isel generic instructions. // Legalization process could generate Target specific pseudo // instructions with generic types. Don't record them if (isPreISelGenericOpcode(MI->getOpcode())) { ++NumNewInsns; - WorkList.push_back(MI); + WorkList.insert(MI); + CombineList.insert(MI); } }); - WorkList.push_back(&*MI); - + WorkList.insert(&*MI); + LegalizerCombiner C(Helper.MIRBuilder, MF.getRegInfo()); bool Changed = false; LegalizerHelper::LegalizeResult Res; - unsigned Idx = 0; do { - Res = Helper.legalizeInstrStep(*WorkList[Idx]); - // Error out if we couldn't legalize this instruction. We may want to - // fall back to DAG ISel instead in the future. - if (Res == LegalizerHelper::UnableToLegalize) { - Helper.MIRBuilder.stopRecordingInsertions(); + assert(!WorkList.empty() && "Expecting illegal ops"); + while (!WorkList.empty()) { + NumNewInsns = 0; + MachineInstr *CurrInst = WorkList.pop_back_val(); + Res = Helper.legalizeInstrStep(*CurrInst); + // Error out if we couldn't legalize this instruction. We may want to + // fall back to DAG ISel instead in the future. if (Res == LegalizerHelper::UnableToLegalize) { - reportGISelFailure(MF, TPC, MORE, "gisel-legalize", - "unable to legalize instruction", - *WorkList[Idx]); - return false; + Helper.MIRBuilder.stopRecordingInsertions(); + if (Res == LegalizerHelper::UnableToLegalize) { + reportGISelFailure(MF, TPC, MORE, "gisel-legalize", + "unable to legalize instruction", *CurrInst); + return false; + } } - } - Changed |= Res == LegalizerHelper::Legalized; - ++Idx; + Changed |= Res == LegalizerHelper::Legalized; + // If CurrInst was legalized, there's a good chance that it might have + // been erased. So remove it from the Combine List. + if (Res == LegalizerHelper::Legalized) + CombineList.remove(CurrInst); #ifndef NDEBUG - if (NumNewInsns) { - DEBUG(dbgs() << ".. .. Emitted " << NumNewInsns << " insns\n"); - for (auto I = WorkList.end() - NumNewInsns, E = WorkList.end(); - I != E; ++I) - DEBUG(dbgs() << ".. .. New MI: "; (*I)->print(dbgs())); - NumNewInsns = 0; + if (NumNewInsns) + for (unsigned I = WorkList.size() - NumNewInsns, + E = WorkList.size(); + I != E; ++I) + DEBUG(dbgs() << ".. .. New MI: " << *WorkList[I];); +#endif } + // Do the combines. + while (!CombineList.empty()) { + NumNewInsns = 0; + MachineInstr *CurrInst = CombineList.pop_back_val(); + SmallVector DeadInstructions; + Changed |= C.tryCombineInstruction(*CurrInst, DeadInstructions); + for (auto *DeadMI : DeadInstructions) { + DEBUG(dbgs() << ".. Erasing Dead Instruction " << *DeadMI); + CombineList.remove(DeadMI); + WorkList.remove(DeadMI); + DeadMI->eraseFromParent(); + } +#ifndef NDEBUG + if (NumNewInsns) + for (unsigned I = CombineList.size() - NumNewInsns, + E = CombineList.size(); + I != E; ++I) + DEBUG(dbgs() << ".. .. Combine New MI: " << *CombineList[I];); #endif - } while (Idx < WorkList.size()); + } + } while (!WorkList.empty()); Helper.MIRBuilder.stopRecordingInsertions(); } } MachineRegisterInfo &MRI = MF.getRegInfo(); - const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); + MachineIRBuilder MIRBuilder(MF); + LegalizerCombiner C(MIRBuilder, MRI); for (auto &MBB : MF) { for (auto MI = MBB.begin(); MI != MBB.end(); MI = NextMI) { // Get the next Instruction before we try to legalize, because there's a // good chance MI will be deleted. NextMI = std::next(MI); - Changed |= combineMerges(*MI, MRI, TII, Helper.MIRBuilder); + SmallVector DeadInsts; + Changed |= C.tryCombineMerges(*MI, DeadInsts); + for (auto *DeadMI : DeadInsts) + DeadMI->eraseFromParent(); } } Index: test/CodeGen/AArch64/GlobalISel/legalize-add.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-add.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-add.mir @@ -69,8 +69,10 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_add_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[A:%.*]](s64) = COPY %x0 + ; CHECK: [[B:%.*]](s64) = COPY %x1 + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC [[A]] + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC [[B]] ; CHECK: [[RES32:%.*]](s32) = G_ADD [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-and.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-and.mir @@ -22,8 +22,10 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_and_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[A:%.*]](s64) = COPY %x0 + ; CHECK: [[B:%.*]](s64) = COPY %x1 + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC [[A]] + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC [[B]] ; CHECK: [[RES32:%.*]](s32) = G_AND [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir @@ -36,9 +36,13 @@ ; CHECK: [[CMP_T1:%[0-9]+]](s1) = G_TRUNC [[CMP1]] %4(s1) = G_ICMP intpred(sge), %0, %1 - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3 - ; CHECK: [[CMP2:%[0-9]+]](s32) = G_ICMP intpred(ult), [[LHS32]](s32), [[RHS32]] + ; CHECK: [[CSTMASK1:%[0-9]+]](s32) = G_CONSTANT i32 255 + ; CHECK: [[T1:%[0-9]+]](s32) = G_TRUNC %0(s64) + ; CHECK: [[AND1:%[0-9]+]](s32) = G_AND [[T1]], [[CSTMASK1]] + ; CHECK: [[CSTMASK2:%[0-9]+]](s32) = G_CONSTANT i32 255 + ; CHECK: [[T2:%[0-9]+]](s32) = G_TRUNC %1(s64) + ; CHECK: [[AND2:%[0-9]+]](s32) = G_AND [[T2]], [[CSTMASK2]] + ; CHECK: [[CMP2:%[0-9]+]](s32) = G_ICMP intpred(ult), [[AND1]](s32), [[AND2]] ; CHECK: [[CMP_T2:%[0-9]+]](s1) = G_TRUNC [[CMP2]] %8(s1) = G_ICMP intpred(ult), %2, %3 Index: test/CodeGen/AArch64/GlobalISel/legalize-div.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-div.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-div.mir @@ -26,15 +26,28 @@ %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 + ; CHECK: [[A:%.*]](s64) = COPY %x0 + ; CHECK: [[B:%.*]](s64) = COPY %x1 + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[S1:%.*]](s32) = G_TRUNC [[A]] + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[S1]], [[C1]] + ; CHECK: [[SEXT1:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: [[C2:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[S2:%.*]](s32) = G_TRUNC [[B]] + ; CHECK: [[SHL2:%.*]](s32) = G_SHL [[S2]], [[C2]] + ; CHECK: [[SEXT2:%.*]](s32) = G_ASHR [[SHL2]], [[C2]] + ; CHECK: [[DIV:%.*]](s32) = G_SDIV [[SEXT1]], [[SEXT2]] + ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[DIV]] - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %2 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %3 - ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]] - ; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]] %4(s8) = G_SDIV %2, %3 - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3 + + ; CHECK: [[CMASK1:%.*]](s32) = G_CONSTANT i32 255 + ; CHECK: [[T1:%.*]](s32) = G_TRUNC [[A]] + ; CHECK: [[LHS32:%.*]](s32) = G_AND [[T1]], [[CMASK1]] + ; CHECK: [[CMASK2:%.*]](s32) = G_CONSTANT i32 255 + ; CHECK: [[T2:%.*]](s32) = G_TRUNC [[B]] + ; CHECK: [[RHS32:%.*]](s32) = G_AND [[T2]], [[CMASK2]] ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_UDIV [[LHS32]], [[RHS32]] ; CHECK: [[RES:%[0-9]+]](s8) = G_TRUNC [[QUOT32]] %5(s8) = G_UDIV %2, %3 Index: test/CodeGen/AArch64/GlobalISel/legalize-gep.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-gep.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-gep.mir @@ -20,8 +20,14 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_gep_small - ; CHECK: [[OFFSET_EXT:%[0-9]+]](s64) = G_SEXT %2(s8) - ; CHECK: %3(p0) = G_GEP %0, [[OFFSET_EXT]](s64) + ; CHECK: [[A:%.*]](p0) = COPY %x0 + ; CHECK: [[B:%.*]](s64) = COPY %x1 + ; CHECK: [[C:%.*]](s64) = G_CONSTANT i64 56 + ; CHECK: [[SRC:%.*]](s64) = COPY [[B]](s64) + ; CHECK: [[SHL:%.*]](s64) = G_SHL [[SRC]], [[C]] + ; CHECK: [[SEXT:%.*]](s64) = G_ASHR [[SHL]], [[C]] + ; CHECK: G_GEP [[A]], [[SEXT]] + %0(p0) = COPY %x0 %1(s64) = COPY %x1 Index: test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir @@ -130,8 +130,11 @@ %1:_(s1) = G_TRUNC %0 ; CHECK-LABEL: name: test_sitofp_s32_s1 - ; CHECK: %3(s32) = G_SEXT %1 - ; CHECK: %2(s32) = G_SITOFP %3 + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 31 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[SRC]], [[C1]] + ; CHECK: [[SEXT:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: %2(s32) = G_SITOFP [[SEXT]] %2:_(s32) = G_SITOFP %1 ... @@ -144,8 +147,10 @@ %1:_(s1) = G_TRUNC %0 ; CHECK-LABEL: name: test_uitofp_s32_s1 - ; CHECK: %3(s32) = G_ZEXT %1 - ; CHECK: %2(s32) = G_UITOFP %3 + ; CHECK: [[C:%.*]](s32) = G_CONSTANT i32 1 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[ZEXT:%.*]](s32) = G_AND [[SRC]], [[C]] + ; CHECK: [[RES:%.*]](s32) = G_UITOFP [[ZEXT]] %2:_(s32) = G_UITOFP %1 ... @@ -158,8 +163,11 @@ %1:_(s8) = G_TRUNC %0 ; CHECK-LABEL: name: test_sitofp_s64_s8 - ; CHECK: %3(s32) = G_SEXT %1 - ; CHECK: %2(s64) = G_SITOFP %3 + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[SRC]], [[C1]] + ; CHECK: [[SEXT:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: %2(s64) = G_SITOFP [[SEXT]] %2:_(s64) = G_SITOFP %1 ... @@ -172,8 +180,10 @@ %1:_(s8) = G_TRUNC %0 ; CHECK-LABEL: name: test_uitofp_s64_s8 - ; CHECK: %3(s32) = G_ZEXT %1 - ; CHECK: %2(s64) = G_UITOFP %3 + ; CHECK: [[C:%.*]](s32) = G_CONSTANT i32 255 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[ZEXT:%.*]](s32) = G_AND [[SRC]], [[C]] + ; CHECK: %2(s64) = G_UITOFP [[ZEXT]] %2:_(s64) = G_UITOFP %1 ... @@ -186,8 +196,11 @@ %1:_(s16) = G_TRUNC %0 ; CHECK-LABEL: name: test_sitofp_s32_s16 - ; CHECK: %3(s32) = G_SEXT %1 - ; CHECK: %2(s32) = G_SITOFP %3 + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 16 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[SRC]], [[C1]] + ; CHECK: [[SEXT:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: %2(s32) = G_SITOFP [[SEXT]] %2:_(s32) = G_SITOFP %1 ... @@ -200,7 +213,9 @@ %1:_(s16) = G_TRUNC %0 ; CHECK-LABEL: name: test_uitofp_s32_s16 - ; CHECK: %3(s32) = G_ZEXT %1 - ; CHECK: %2(s32) = G_UITOFP %3 + ; CHECK: [[C:%.*]](s32) = G_CONSTANT i32 65535 + ; CHECK: [[SRC:%.*]](s32) = COPY %0(s32) + ; CHECK: [[ZEXT:%.*]](s32) = G_AND [[SRC]], [[C]] + ; CHECK: [[RES:%.*]](s32) = G_UITOFP [[ZEXT]] %2:_(s32) = G_UITOFP %1 ... Index: test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir @@ -80,7 +80,13 @@ %0(p0) = COPY %x0 %1(s32) = COPY %w1 - ; CHECK: [[BIT8:%[0-9]+]](s8) = G_ZEXT %2(s1) + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 1 + ; CHECK: [[B:%.*]](s32) = COPY %1(s32) + ; CHECK: [[COPY_C1:%.*]](s32) = COPY [[C1]] + ; CHECK: [[AND:%.*]](s32) = G_AND [[B]], [[COPY_C1]] + ; CHECK: [[BIT8:%.*]](s8) = G_TRUNC [[AND]] + + ; CHECK: G_STORE [[BIT8]](s8), %0(p0) :: (store 1 into %ir.addr) %2(s1) = G_TRUNC %1 G_STORE %2, %0 :: (store 1 into %ir.addr) Index: test/CodeGen/AArch64/GlobalISel/legalize-mul.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-mul.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-mul.mir @@ -23,8 +23,8 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_mul_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC %0 + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC %1 ; CHECK: [[RES32:%.*]](s32) = G_MUL [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-or.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-or.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-or.mir @@ -22,8 +22,8 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_or_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC %0 + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC %1 ; CHECK: [[RES32:%.*]](s32) = G_OR [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-phi.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-phi.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-phi.mir @@ -201,7 +201,7 @@ ; CHECK-LABEL: bb.1: ; CHECK: [[RES_PHI:%.*]](s16) = G_PHI [[RES_BB1]](s16), %bb.0, [[RES_BB2:%.*]](s16), %bb.1 - ; CHECK-NEXT: G_TRUNC + ; CHECK-NEXT: G_ANYEXT [[RES_PHI]] ; CHECK: [[RES_BB2]](s16) = G_ANYEXT %0(s32) = COPY %w0 %2(s8) = G_CONSTANT i8 1 @@ -250,7 +250,7 @@ ; CHECK-LABEL: bb.1: ; CHECK: [[RES_PHI:%.*]](s16) = G_PHI [[RES_BB1]](s16), %bb.0, [[RES_BB2:%.*]](s16), %bb.1 ; CHECK-NEXT: G_TRUNC - ; CHECK: [[RES_BB2]](s16) = G_ANYEXT + ; CHECK: [[RES_BB2]](s16) = COPY %0(s32) = COPY %w0 %4(s8) = G_CONSTANT i8 0 Index: test/CodeGen/AArch64/GlobalISel/legalize-rem.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-rem.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-rem.mir @@ -3,30 +3,35 @@ --- | target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" target triple = "aarch64--" - define void @test_rem() { + define void @test_urem_64() { + entry: + ret void + } + define void @test_srem_32() { + entry: + ret void + } + define void @test_srem_8() { + entry: + ret void + } + define void @test_frem() { entry: ret void } ... --- -name: test_rem +name: test_urem_64 registers: - { id: 0, class: _ } - { id: 1, class: _ } - { id: 2, class: _ } - - { id: 3, class: _ } - - { id: 4, class: _ } - - { id: 5, class: _ } - - { id: 6, class: _ } - - { id: 7, class: _ } - - { id: 8, class: _ } - - { id: 9, class: _ } - - { id: 10, class: _ } body: | bb.0.entry: liveins: %x0, %x1, %x2, %x3 + ; CHECK-LABEL: name: test_urem_64 ; CHECK: [[QUOT:%[0-9]+]](s64) = G_UDIV %0, %1 ; CHECK: [[PROD:%[0-9]+]](s64) = G_MUL [[QUOT]], %1 ; CHECK: [[RES:%[0-9]+]](s64) = G_SUB %0, [[PROD]] @@ -34,40 +39,95 @@ %1(s64) = COPY %x1 %2(s64) = G_UREM %0, %1 - ; CHECK: [[QUOT:%[0-9]+]](s32) = G_SDIV %3, %4 - ; CHECK: [[PROD:%[0-9]+]](s32) = G_MUL [[QUOT]], %4 - ; CHECK: [[RES:%[0-9]+]](s32) = G_SUB %3, [[PROD]] + +... +--- +name: test_srem_32 +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } + - { id: 5, class: _ } +body: | + bb.0.entry: + liveins: %x0, %x1, %x2, %x3 + ; CHECK-LABEL: name: test_srem_32 + ; CHECK: [[T1:%.*]](s32) = G_TRUNC %0(s64) + ; CHECK: [[T2:%.*]](s32) = G_TRUNC %1(s64) + ; CHECK: [[DIV:%.*]](s32) = G_SDIV [[T1]], [[T2]] + ; CHECK: [[MUL:%.*]](s32) = G_MUL [[DIV]], [[T2]] + ; CHECK: [[RES:%.*]](s32) = G_SUB [[T1]], [[MUL]] + + %0(s64) = COPY %x0 + %1(s64) = COPY %x1 %3(s32) = G_TRUNC %0 %4(s32) = G_TRUNC %1 %5(s32) = G_SREM %3, %4 - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %6 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %7 - ; CHECK: [[QUOT32:%[0-9]+]](s32) = G_SDIV [[LHS32]], [[RHS32]] - ; CHECK: [[QUOT:%[0-9]+]](s8) = G_TRUNC [[QUOT32]] +... +--- +name: test_srem_8 +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 6, class: _ } + - { id: 7, class: _ } + - { id: 8, class: _ } +body: | + bb.0.entry: + liveins: %x0, %x1, %x2, %x3 + ; CHECK-LABEL: name: test_srem_8 - ; CHECK: [[QUOT32_2:%.*]](s32) = G_ANYEXT [[QUOT]](s8) - ; CHECK: [[RHS32_2:%.*]](s32) = G_ANYEXT %7(s8) - ; CHECK: [[PROD32:%.*]](s32) = G_MUL [[QUOT32_2]], [[RHS32_2]] - ; CHECK: [[PROD:%.*]](s8) = G_TRUNC [[PROD32]](s32) + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[SRC1:%.*]](s32) = G_TRUNC %0(s64) + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[SRC1]], [[C1]] + ; CHECK: [[LHS_SEXT:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: [[C2:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[SRC2:%.*]](s32) = G_TRUNC %1(s64) + ; CHECK: [[SHL2:%.*]](s32) = G_SHL [[SRC2]], [[C2]] + ; CHECK: [[RHS_SEXT:%.*]](s32) = G_ASHR [[SHL2]], [[C2]] + ; CHECK: [[SDIV:%.*]](s32) = G_SDIV [[LHS_SEXT]], [[RHS_SEXT]] + ; CHECK: [[A:%.*]](s32) = COPY [[SDIV]] + ; CHECK: [[SRC3:%.*]](s32) = G_TRUNC %1(s64) + ; CHECK: [[MUL:%.*]](s32) = G_MUL [[A]], [[SRC3]] + ; CHECK: [[SRC4:%.*]](s32) = G_TRUNC %0(s64) + ; CHECK: [[SRC5:%.*]](s32) = COPY [[MUL]] + ; CHECK: [[SUB:%.*]](s32) = G_SUB [[SRC4]], [[SRC5]] + ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[SUB]] - ; CHECK: [[LHS32_2:%.*]](s32) = G_ANYEXT %6(s8) - ; CHECK: [[PROD32_2:%.*]](s32) = G_ANYEXT [[PROD]](s8) - ; CHECK: [[RES:%[0-9]+]](s32) = G_SUB [[LHS32_2]], [[PROD32_2]] + %0(s64) = COPY %x0 + %1(s64) = COPY %x1 %6(s8) = G_TRUNC %0 %7(s8) = G_TRUNC %1 %8(s8) = G_SREM %6, %7 +... +--- +name: test_frem +registers: + - { id: 0, class: _ } + - { id: 1, class: _ } + - { id: 2, class: _ } + - { id: 3, class: _ } + - { id: 4, class: _ } + - { id: 5, class: _ } +body: | + bb.0.entry: + liveins: %x0, %x1, %x2, %x3 + ; CHECK-LABEL: name: test_frem ; CHECK: %d0 = COPY %0 ; CHECK: %d1 = COPY %1 ; CHECK: BL $fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0 - ; CHECK: %9(s64) = COPY %d0 - %9(s64) = G_FREM %0, %1 + ; CHECK: [[RES:%.*]](s64) = COPY %d0 + %0(s64) = COPY %x0 + %1(s64) = COPY %x1 + %2(s64) = G_FREM %0, %1 ; CHECK: %s0 = COPY %3 ; CHECK: %s1 = COPY %4 ; CHECK: BL $fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0 - ; CHECK: %10(s32) = COPY %s0 - %10(s32) = G_FREM %3, %4 - -... + ; CHECK: [[RES:%.*]](s32) = COPY %s0 + %3(s32) = G_TRUNC %0 + %4(s32) = G_TRUNC %1 + %5(s32) = G_FREM %3, %4 Index: test/CodeGen/AArch64/GlobalISel/legalize-shift.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-shift.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-shift.mir @@ -27,20 +27,31 @@ %2(s8) = G_TRUNC %0 %3(s8) = G_TRUNC %1 - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_SEXT %2 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_SEXT %3 - ; CHECK: [[RES32:%[0-9]+]](s32) = G_ASHR [[LHS32]], [[RHS32]] + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[SRC:%.*]](s32) = G_TRUNC %0(s64) + ; CHECK: [[SHL1:%.*]](s32) = G_SHL [[SRC]], [[C1]] + ; CHECK: [[SEXT1:%.*]](s32) = G_ASHR [[SHL1]], [[C1]] + ; CHECK: [[C2:%.*]](s32) = G_CONSTANT i32 24 + ; CHECK: [[SRC2:%.*]](s32) = G_TRUNC %1(s64) + ; CHECK: [[SHL2:%.*]](s32) = G_SHL [[SRC2]], [[C2]] + ; CHECK: [[SEXT2:%.*]](s32) = G_ASHR [[SHL2]], [[C2]] + ; CHECK: [[RES32:%[0-9]+]](s32) = G_ASHR [[SEXT1]], [[SEXT2]] ; CHECK: %4(s8) = G_TRUNC [[RES32]] %4(s8) = G_ASHR %2, %3 - ; CHECK: [[LHS32:%[0-9]+]](s32) = G_ZEXT %2 - ; CHECK: [[RHS32:%[0-9]+]](s32) = G_ZEXT %3 - ; CHECK: [[RES32:%[0-9]+]](s32) = G_LSHR [[LHS32]], [[RHS32]] + + ; CHECK: [[C1:%.*]](s32) = G_CONSTANT i32 255 + ; CHECK: [[SRC:%.*]](s32) = G_TRUNC %0(s64) + ; CHECK: [[ZEXT:%.*]](s32) = G_AND [[SRC]], [[C1]] + ; CHECK: [[C2:%.*]](s32) = G_CONSTANT i32 255 + ; CHECK: [[SRC2:%.*]](s32) = G_TRUNC %1(s64) + ; CHECK: [[ZEXT2:%.*]](s32) = G_AND [[SRC2]], [[C2]] + ; CHECK: [[RES32:%[0-9]+]](s32) = G_LSHR [[ZEXT]], [[ZEXT2]] ; CHECK: %5(s8) = G_TRUNC [[RES32]] %5(s8) = G_LSHR %2, %3 - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC %0 + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC %1 ; CHECK: [[RES32:%.*]](s32) = G_SHL [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) %6(s8) = G_SHL %2, %3 Index: test/CodeGen/AArch64/GlobalISel/legalize-simple.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-simple.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-simple.mir @@ -52,20 +52,20 @@ bb.1.next: - ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %1(s1) - ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %1(s1) + ; CHECK: [[LHS:%[0-9]+]](s32) = G_TRUNC %0 + ; CHECK: [[RHS:%[0-9]+]](s32) = G_TRUNC %0 ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]] ; CHECK: %7(s1) = G_TRUNC [[RES]](s32) %7(s1) = G_SELECT %1, %1, %1 - ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %2(s8) + ; CHECK: [[LHS:%[0-9]+]](s32) = G_TRUNC %0 + ; CHECK: [[RHS:%[0-9]+]](s32) = G_TRUNC %0 ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]] ; CHECK: %8(s8) = G_TRUNC [[RES]](s32) %8(s8) = G_SELECT %1, %2, %2 - ; CHECK: [[LHS:%[0-9]+]](s32) = G_ANYEXT %3(s16) - ; CHECK: [[RHS:%[0-9]+]](s32) = G_ANYEXT %3(s16) + ; CHECK: [[LHS:%[0-9]+]](s32) = G_TRUNC %0 + ; CHECK: [[RHS:%[0-9]+]](s32) = G_TRUNC %0 ; CHECK: [[RES:%[0-9]+]](s32) = G_SELECT %1(s1), [[LHS]], [[RHS]] ; CHECK: %9(s16) = G_TRUNC [[RES]](s32) %9(s16) = G_SELECT %1, %3, %3 Index: test/CodeGen/AArch64/GlobalISel/legalize-sub.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-sub.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-sub.mir @@ -22,8 +22,8 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_sub_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC %0 + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC %1 ; CHECK: [[RES32:%.*]](s32) = G_SUB [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/AArch64/GlobalISel/legalize-xor.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-xor.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-xor.mir @@ -22,8 +22,8 @@ bb.0.entry: liveins: %x0, %x1, %x2, %x3 ; CHECK-LABEL: name: test_scalar_xor_small - ; CHECK: [[OP0:%.*]](s32) = G_ANYEXT %2(s8) - ; CHECK: [[OP1:%.*]](s32) = G_ANYEXT %3(s8) + ; CHECK: [[OP0:%.*]](s32) = G_TRUNC %0 + ; CHECK: [[OP1:%.*]](s32) = G_TRUNC %1 ; CHECK: [[RES32:%.*]](s32) = G_XOR [[OP0]], [[OP1]] ; CHECK: [[RES:%.*]](s8) = G_TRUNC [[RES32]](s32) Index: test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir @@ -914,7 +914,6 @@ ; SOFT-DEFAULT: BLX $__gtsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]] ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP @@ -925,11 +924,12 @@ ; SOFT-DEFAULT: BLX $__ltsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]] - ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] - ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] + ; SOFT-AEABI: [[R1EXT:%[0-9]+]](s32) = COPY [[RET1]] + ; SOFT-AEABI: [[R2EXT:%[0-9]+]](s32) = COPY [[RET2]] + ; SOFT-DEFAULT: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] + ; SOFT-DEFAULT: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]] ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]] ; SOFT-NOT: G_FCMP @@ -970,7 +970,6 @@ ; SOFT-DEFAULT: BLX $__eqsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]] ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP @@ -981,11 +980,12 @@ ; SOFT-DEFAULT: BLX $__unordsf2, {{.*}}, implicit %r0, implicit %r1, implicit-def %r0 ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]] - ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] - ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] + ; SOFT-AEABI: [[R1EXT:%[0-9]+]](s32) = COPY [[RET1]] + ; SOFT-AEABI: [[R2EXT:%[0-9]+]](s32) = COPY [[RET2]] + ; SOFT-DEFAULT: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] + ; SOFT-DEFAULT: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]] ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]] ; SOFT-NOT: G_FCMP @@ -1789,7 +1789,6 @@ ; SOFT-DEFAULT: BLX $__gtdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(sgt), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP @@ -1802,11 +1801,12 @@ ; SOFT-DEFAULT: BLX $__ltdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(slt), [[RET2]](s32), [[ZERO]] - ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] - ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] + ; SOFT-AEABI: [[R1EXT:%[0-9]+]](s32) = COPY [[RET1]] + ; SOFT-AEABI: [[R2EXT:%[0-9]+]](s32) = COPY [[RET2]] + ; SOFT-DEFAULT: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] + ; SOFT-DEFAULT: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]] ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]] ; SOFT-NOT: G_FCMP @@ -1861,7 +1861,6 @@ ; SOFT-DEFAULT: BLX $__eqdf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 ; SOFT: [[RET1:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R1:%[0-9]+]](s1) = G_TRUNC [[RET1]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R1:%[0-9]+]](s1) = G_ICMP intpred(eq), [[RET1]](s32), [[ZERO]] ; SOFT-NOT: G_FCMP @@ -1874,11 +1873,12 @@ ; SOFT-DEFAULT: BLX $__unorddf2, {{.*}}, implicit %r0, implicit %r1, implicit %r2, implicit %r3, implicit-def %r0 ; SOFT: [[RET2:%[0-9]+]](s32) = COPY %r0 ; SOFT: ADJCALLSTACKUP - ; SOFT-AEABI: [[R2:%[0-9]+]](s1) = G_TRUNC [[RET2]](s32) ; SOFT-DEFAULT: [[ZERO:%[0-9]+]](s32) = G_CONSTANT i32 0 ; SOFT-DEFAULT: [[R2:%[0-9]+]](s1) = G_ICMP intpred(ne), [[RET2]](s32), [[ZERO]] - ; SOFT-DAG: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] - ; SOFT-DAG: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] + ; SOFT-AEABI: [[R1EXT:%[0-9]+]](s32) = COPY [[RET1]] + ; SOFT-AEABI: [[R2EXT:%[0-9]+]](s32) = COPY [[RET2]] + ; SOFT-DEFAULT: [[R1EXT:%[0-9]+]](s32) = G_ANYEXT [[R1]] + ; SOFT-DEFAULT: [[R2EXT:%[0-9]+]](s32) = G_ANYEXT [[R2]] ; SOFT: [[REXT:%[0-9]+]](s32) = G_OR [[R1EXT]], [[R2EXT]] ; SOFT: [[R:%[0-9]+]](s1) = G_TRUNC [[REXT]] ; SOFT-NOT: G_FCMP