Index: llvm/lib/Target/SystemZ/CMakeLists.txt =================================================================== --- llvm/lib/Target/SystemZ/CMakeLists.txt +++ llvm/lib/Target/SystemZ/CMakeLists.txt @@ -17,6 +17,7 @@ SystemZCallingConv.cpp SystemZConstantPoolValue.cpp SystemZCopyPhysRegs.cpp + SystemZDomainReassignment.cpp SystemZElimCompare.cpp SystemZFrameLowering.cpp SystemZHazardRecognizer.cpp Index: llvm/lib/Target/SystemZ/SystemZ.h =================================================================== --- llvm/lib/Target/SystemZ/SystemZ.h +++ llvm/lib/Target/SystemZ/SystemZ.h @@ -83,6 +83,10 @@ const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3; const unsigned CCMASK_TM = CCMASK_ANY; +const unsigned CCMASK_VTM_SOME_0 = CCMASK_TM_SOME_0 ^ CCMASK_2; +const unsigned CCMASK_VTM_SOME_1 = CCMASK_TM_SOME_1 ^ CCMASK_2; +const unsigned CCMASK_VTM = CCMASK_TM ^ CCMASK_2; + // Condition-code mask assignments for TRANSACTION_BEGIN. const unsigned CCMASK_TBEGIN_STARTED = CCMASK_0; const unsigned CCMASK_TBEGIN_INDETERMINATE = CCMASK_1; @@ -189,6 +193,7 @@ FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel); +FunctionPass *createSystemZDomainReassignmentPass(SystemZTargetMachine &TM); FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM); FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM); FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM); Index: llvm/lib/Target/SystemZ/SystemZDomainReassignment.cpp =================================================================== --- /dev/null +++ llvm/lib/Target/SystemZ/SystemZDomainReassignment.cpp @@ -0,0 +1,2441 @@ +//==- SystemZDomainReassignment.cpp - Selectively switch register classes --==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This pass attempts to find instruction chains (closures) in one domain, +// and convert them to equivalent instructions in a different domain, +// if profitable. +// +// On SystemZ this means converting GPR closures to the vector domain. The +// aim can be to reduce register pressure or to avoid vector element +// extractions. +//===----------------------------------------------------------------------===// + +#include "SystemZ.h" +#include "SystemZInstrInfo.h" +#include "SystemZSubtarget.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Printable.h" +#include +using namespace llvm; + +#define SYSTEMZ_DOMAINREASSIGN_NAME "SystemZ Domain Reassignment Pass" +#define DEBUG_TYPE "systemz-domain-reassignment" + +static cl::opt +DisableDomReass("disable-domreass", cl::init(false), cl::Hidden); + +// Option to enable insertion of non convertible values into GPRs with VLVG. +static cl::opt +EnableGPRInsertion("domreass-inserts", cl::init(false), cl::Hidden); + +static cl::opt +DumpVRLiveness("dump-vregliveness", cl::init(false), cl::Hidden); + +namespace llvm { + void initializeSystemZDomainReassignmentPass(PassRegistry&); +} + +namespace { + +///// A MRI-based class to track liveness of virtual registers on SSA form. +// Is this avaialable elsewhere in the tree? +class VirtRegLiveness { +public: + VirtRegLiveness() {} + + typedef std::map > MBB2RegsMap; + MBB2RegsMap VRegLiveIns; + MBB2RegsMap VRegLiveOuts; + + void compute_and_setkills(const MachineRegisterInfo *MRI, + const MachineDominatorTree *MDT, + MachineFunction *MF); + + void dumpMBB(MachineBasicBlock *MBB); + void dumpMF(MachineFunction *MF); +}; + +void VirtRegLiveness::compute_and_setkills(const MachineRegisterInfo *MRI, + const MachineDominatorTree *MDT, + MachineFunction *MF) { + assert(MRI->isSSA() && "Expected MIR to be in SSA form"); + VRegLiveIns.clear(); + VRegLiveOuts.clear(); + + typedef std::map LastUseMap; + std::map Reg2LastUses; + + // Find the last user of every register in every MBB. + for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) { + Register Reg = Register::index2VirtReg(Idx); + if (MRI->getVRegDef(Reg) == nullptr) + continue; + LastUseMap &LastUses = Reg2LastUses[Reg]; + for (auto &RegMI : MRI->reg_nodbg_instructions(Reg)) { + MachineBasicBlock *UseMBB = RegMI.getParent(); + if (RegMI.readsRegister(Reg) && + (LastUses.find(UseMBB) == LastUses.end() || + MDT->dominates(LastUses[UseMBB], &RegMI))) + LastUses[UseMBB] = &RegMI; + } + } + + // Find live-ins locally for non-PHI uses. + for (auto &II : Reg2LastUses) { + Register Reg = II.first; + LastUseMap &LastUses = II.second; + MachineInstr *DefMI = MRI->getVRegDef(Reg); + for (auto &LU : LastUses) { + MachineBasicBlock *UseMBB = LU.first; + MachineInstr *UseMI = LU.second; + if (!UseMI->isPHI() && UseMBB != DefMI->getParent()) + // A normal use is live-in if not defined in same block. + VRegLiveIns[UseMBB].insert(Reg); + } + } + + // Handle PHI uses. + for (auto &MBB : *MF) + for (const MachineInstr &MI : MBB.phis()) + for (unsigned MOIdx = 1; MOIdx +1 < MI.getNumOperands(); MOIdx += 2) { + Register Reg = MI.getOperand(MOIdx).getReg(); + MachineBasicBlock *P = MI.getOperand(MOIdx + 1).getMBB(); + MachineInstr *DefMI = MRI->getVRegDef(Reg); + // A PHI use means Reg is live out of and possibly live into P, + // however not generally live into MBB. + VRegLiveOuts[P].insert(Reg); + if (DefMI->getParent() != P) + VRegLiveIns[P].insert(Reg); + } + + // Propagate VRegLiveIns up the CFG. + bool Change = true; + while (Change) { + Change = false; + for (auto &MBB : *MF) + for (auto S : MBB.successors()) + for (Register Reg : VRegLiveIns[S]) { + MachineInstr *DefMI = MRI->getVRegDef(Reg); + if (DefMI->getParent() != &MBB && VRegLiveIns[&MBB].insert(Reg).second) + Change = true; + } + } + + // Compute VRegLiveOuts for each MBB. + for (auto &MBB : *MF) + for (auto S : MBB.successors()) + for (Register Reg : VRegLiveIns[S]) + VRegLiveOuts[&MBB].insert(Reg); + + // Set kill-flags, except on PHI instructions which are not supposed to + // have them (see comment in LiveVariables.h for VarInfo). + for (auto &II : Reg2LastUses) { + Register Reg = II.first; + LastUseMap &LastUses = II.second; + for (auto &LU : LastUses) { + MachineBasicBlock *UseMBB = LU.first; + MachineInstr *UseMI = LU.second; + if (!UseMI->isPHI() && !VRegLiveOuts[UseMBB].count(Reg)) + UseMI->addRegisterKilled(Reg, MRI->getTargetRegisterInfo()); + } + } +} + +static void dumpRegSet(std::string Msg, std::set &Regs) { + dbgs() << Msg << ": "; + for (auto Reg : Regs) + dbgs() << "%" << Register::virtReg2Index(Reg) << ", "; + dbgs() << "\n"; +} + +void VirtRegLiveness::dumpMBB(MachineBasicBlock *MBB) { + dumpRegSet("Live IN vregs", VRegLiveIns[MBB]); + MBB->dump(); + dumpRegSet("Live OUT vregs", VRegLiveOuts[MBB]); + dbgs() << "\n"; +} + +void VirtRegLiveness::dumpMF(MachineFunction *MF) { + dbgs() << "# Machine code for function " << MF->getName() << "\n\n"; + for (auto &MBB : *MF) + dumpMBB(&MBB); +} + +///// End VirtRegLiveness + +#ifndef NDEBUG +// Debug output +static MachineBasicBlock::iterator getPrevOrEnd(MachineInstr *MI) { + return (MI == MI->getParent()->begin()) ? MI->getParent()->end() : + std::prev(MachineBasicBlock::iterator(MI)); +} + +static void dumpConversion(MachineInstr *MI, MachineBasicBlock::iterator Start) { + MachineBasicBlock *MBB = MI->getParent(); + Start = (Start == MBB->end() ? MBB->begin() : std::next(Start)); + dbgs() << "From : "; MI->dump(); + dbgs() << " To "; Start->dump(); + while (++Start != MI) { + dbgs() << " "; Start->dump(); + } +} + +void dumpEnclosureMsg(std::string Msg, const MachineInstr *MI) { + dbgs() << "--- " << Msg << ": "; + MI->dump(); +} +#else + // A dummy function definition for a non-debug build, to avoid cluttering + // code around users. + static MachineBasicBlock::iterator getPrevOrEnd(MachineInstr *MI) { + return nullptr; + } +#endif + +// Returns true if Reg belongs to the GR64BitRegClass. +static bool is64BitReg(Register Reg, + const MachineRegisterInfo *MRI) { + return MRI->getRegClass(Reg) == &SystemZ::GR64BitRegClass; +} + +static bool is32BitReg(Register Reg, + const MachineRegisterInfo *MRI) { + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + return (RC == &SystemZ::GRX32BitRegClass || RC == &SystemZ::GR32BitRegClass || + RC == &SystemZ::GRH32BitRegClass); +} + +static bool isGPRDomainReg(Register Reg, const MachineRegisterInfo *MRI) { + if (!Register::isVirtualRegister(Reg)) + return false; + return is64BitReg(Reg, MRI) || is32BitReg(Reg, MRI); +} + +// Return a defined virtual GPR, but only if MI defines only it in operand +// 0, to avoid odd cases. +static Register +getDefedGPRReg(const MachineInstr *MI, const MachineRegisterInfo *MRI) { + Register DefReg = 0; + for (unsigned OpIdx = 0; OpIdx < MI->getNumExplicitOperands(); ++OpIdx) { + auto &Op = MI->getOperand(OpIdx); + if (Op.isReg() && Op.isDef()) { + if (OpIdx != 0 || DefReg) + return SystemZ::NoRegister; + DefReg = Op.getReg(); + } + } + return (DefReg && isGPRDomainReg(DefReg, MRI)) ? DefReg : SystemZ::NoRegister; +} + +/////// Vector lanes handling: + +// Each reassigned virtual register will have a vector lane assigned to +// it. This facilitates things like extensions / truncations where the result +// will end up in a specific lane. For example, truncating a 64 bit value in +// element:64 0 will give the low 32 bits in element:32 1. Before deciding +// that a closure can be reassigned, it is iterated over to find a possible +// assignements of vector lanes. + +// Vector lane identifiers: +// [ G0| G1] +// [ F0| F1| F2| F3] +// [ H H H H H H H H] +// [BBBBBBBBBBBBBBBB] +static unsigned G0 = 1 << 0; +static unsigned G1 = 1 << 1; +static unsigned F0 = 1 << 2; +static unsigned F1 = 1 << 3; +static unsigned F2 = 1 << 4; +static unsigned F3 = 1 << 5; +static unsigned H0 = 1 << 6; +// H1 = 1 << 7; +// H2 = 1 << 8; +// H3 = 1 << 9; +// H4 = 1 << 10; +// H5 = 1 << 11; +// H6 = 1 << 12; +// H7 = 1 << 13; +static unsigned B0 = 1 << 14; +// B1 = 1 << 15; +// B2 = 1 << 16; +// B3 = 1 << 17; +// B4 = 1 << 18; +// B5 = 1 << 19; +// B6 = 1 << 20; +// B7 = 1 << 21; +// B8 = 1 << 22; +// B9 = 1 << 23; +// B10 = 1 << 24; +// B11 = 1 << 25; +// B12 = 1 << 26; +// B13 = 1 << 27; +// B14 = 1 << 28; +// B15 = 1 << 29; + +// Returns the vector lane corresponding to the extracted element of MI. +static unsigned VLGVElt2Lane(const MachineInstr *MI) { + unsigned FirstLane = 0; + switch(MI->getOpcode()) { + case SystemZ::VLGVG: + FirstLane = countTrailingZeros(G0); + break; + case SystemZ::VLGVF: + FirstLane = countTrailingZeros(F0); + break; + case SystemZ::VLGVH: + FirstLane = countTrailingZeros(H0); + break; + case SystemZ::VLGVB: + FirstLane = countTrailingZeros(B0); + break; + default: + llvm_unreachable("Expected a VLGV opcode"); + break; + } + unsigned ExtractIdx = MI->getOperand(3).getImm(); + return 1 << (FirstLane + ExtractIdx); +} + +// These functions return true if the single lane in Lanes is of a particular +// size. +static bool isDoubleWordLane(unsigned Lanes) { + assert(countPopulation(Lanes) == 1 && "Lane not selected?"); + return Lanes < F0; +} +static bool isFullWordLane(unsigned Lanes) { + assert(countPopulation(Lanes) == 1 && "Lane not selected?"); + return Lanes >= G0 && Lanes < H0; +} +static bool isHalfWordLane(unsigned Lanes) { + assert(countPopulation(Lanes) == 1 && "Lane not selected?"); + return Lanes >= H0 && Lanes < B0; +} +static bool isByteLane(unsigned Lanes) { + assert(countPopulation(Lanes) == 1 && "Lane not selected?"); + return Lanes >= B0; +} + +// This function takes a set SrcLanes and a set DstLanes representing the +// possible lanes of two registers. The two masks show which lanes depend on +// each other: If there are no lanes of SrcMask available in SrcLanes, then +// all of DstMask lanes in DstLanes are unavailable, and vice versa. For +// example: truncating G0 or G1 can only give the result in F1 or F3, but if +// G1 is not available, then F3 can also not be used. +static void applyLaneDeps(unsigned &SrcLanes, unsigned SrcMask, + unsigned &DstLanes, unsigned DstMask) { + if (!(SrcLanes & SrcMask)) + DstLanes &= ~DstMask; + else if (!(DstLanes & DstMask)) + SrcLanes &= ~SrcMask; +} + +// Returns the element index corresponding to a vector lane. +static unsigned lane2EltIdx(unsigned Lanes) { + assert(countPopulation(Lanes) == 1 && "Lane not selected?"); + unsigned LaneIdx = countTrailingZeros(Lanes); + if (isDoubleWordLane(Lanes)) + return LaneIdx - countTrailingZeros(G0); + if (isFullWordLane(Lanes)) + return LaneIdx - countTrailingZeros(F0); + if (isHalfWordLane(Lanes)) + return LaneIdx - countTrailingZeros(H0); + return LaneIdx - countTrailingZeros(B0); +} + +// Checks if the lanes have changed for Reg. If so, Lanes for Reg is updated, +// related instructions are pushed onto the worklist, and true is +// returned. Otherwise returns false. +static bool updateLanes(Register Reg, + unsigned NewLanes, + DenseMap &Lanes, + const MachineRegisterInfo *MRI, + std::list &Worklist, + const MachineInstr *MI) { + if (NewLanes != Lanes[Reg]) { + LLVM_DEBUG(dbgs() << "Visiting "; + MI->dump();); + LLVM_DEBUG(dbgs() << "Lanes %" + << Register::virtReg2Index(Reg) << ": "; + if (is64BitReg(Reg, MRI)) + dbgs() << ((NewLanes & G0) ? " G0" : " --") + << ((NewLanes & G1) ? " G1" : " --"); + else + dbgs() << ((NewLanes & F0) ? "F0 " : "-- ") + << ((NewLanes & F1) ? "F1 " : "-- ") + << ((NewLanes & F2) ? "F2 " : "-- ") + << ((NewLanes & F3) ? "F3 " : "-- "); + if (NewLanes >= H0) { + if (countPopulation(NewLanes) == 1) { + if (isHalfWordLane(NewLanes)) + dbgs() << " H" << lane2EltIdx(NewLanes); + else + dbgs() << " B" << lane2EltIdx(NewLanes); + } + else + dbgs() << " H/B lanes set"; + } + dbgs() << "\n";); + Lanes[Reg] = NewLanes; + for (auto &RegMI : MRI->reg_nodbg_instructions(Reg)) + if (&RegMI != MI) + Worklist.push_back(&RegMI); + return true; + } + return false; +} + +// Update lanes for two registers. +static bool updateLanes(Register Reg0, + unsigned NewLanes0, + Register Reg1, + unsigned NewLanes1, + DenseMap &Lanes, + const MachineRegisterInfo *MRI, + std::list &Worklist, + const MachineInstr *MI) { + bool Change = updateLanes(Reg0, NewLanes0, Lanes, MRI, Worklist, MI); + Change |= updateLanes(Reg1, NewLanes1, Lanes, MRI, Worklist, MI); + return Change; +} + +// Called when any of the lanes in Lanes is possible to use, in which case +// the first one is taken. +static void selectLane(unsigned &Lanes) { + assert(Lanes && "Cannot select a lane."); + unsigned FirstEltIdx = countTrailingZeros(Lanes); + Lanes = 1 << FirstEltIdx; +} + +// This function selects the first lane available for all explicit operands. +static bool selectLanesGeneric(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + const MachineRegisterInfo *MRI) { + SmallVector MOs; + for (auto &Op : MI->explicit_operands()) + if (Op.isReg() && Lanes.find(Op.getReg()) != Lanes.end()) + MOs.push_back(&Op); + + bool Change = false; + // Pick the first available lane. + for (unsigned I = 0; I < MOs.size(); I++) { + unsigned RegLanes = Lanes[MOs[I]->getReg()]; + selectLane(RegLanes); + Change |= + updateLanes(MOs[I]->getReg(), RegLanes, Lanes, MRI, Worklist, MI); + } + + return Change; +} + +// A generic implementation of findLanes() that finds the intersection of +// possible lanes for all operands. For example, a VAG would require all +// operands to use the same lane, so if one of the source had to be in lane +// G0, the other operands would as well. +static bool findLanesGeneric(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) { + SmallVector MOs; + for (auto &Op : MI->explicit_operands()) + if (Op.isReg() && Lanes.find(Op.getReg()) != Lanes.end()) + MOs.push_back(&Op); + + if (ToFinal) { + bool Change = selectLanesGeneric(MI, Lanes, Worklist, MRI); + for (unsigned I = 0; I < MOs.size(); I++) + assert(Lanes[MOs[I]->getReg()] == Lanes[MOs[0]->getReg()] && + "All operands should use the same lane."); + return Change; + } + + bool Change = false; + if (MOs.size() > 1) { + // Find the intersection of lanes. + unsigned RegLanes = ~0U; + for (unsigned I = 0; I < MOs.size(); I++) + RegLanes &= Lanes[MOs[I]->getReg()]; + // Update the Lanes entry for each operand. + for (unsigned I = 0; I < MOs.size(); I++) + Change |= + updateLanes(MOs[I]->getReg(), RegLanes, Lanes, MRI, Worklist, MI); + } + + return Change; +} + +// MI is a scalar instruction which loads from memory. Load the value into +// the element EltIdx of DstReg, or a new virtual virtual register if not +// provided. Returns DstReg. +static Register loadMemIntoVecElt(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, unsigned EltIdx, + Register DstReg = SystemZ::NoRegister) { + unsigned FirstMemOp = 0; + while (MI->getDesc().OpInfo[FirstMemOp].OperandType != MCOI::OPERAND_MEMORY) + FirstMemOp++; + assert(MI->getDesc().OpInfo[FirstMemOp].OperandType == MCOI::OPERAND_MEMORY && + (MI->getDesc().OpInfo[FirstMemOp + 1].OperandType == + MCOI::OPERAND_MEMORY) && + (MI->getDesc().OpInfo[FirstMemOp + 2].OperandType == + MCOI::OPERAND_MEMORY) && + "Expected MI to have three memory operands."); + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + Register VTmp0 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + if (DstReg == SystemZ::NoRegister) + DstReg = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + unsigned VLEOpc = 0; + MachineMemOperand *MMO = *MI->memoperands_begin(); + switch (MMO->getSize()) { + case 8: VLEOpc = SystemZ::VLEG; break; + case 4: VLEOpc = SystemZ::VLEF; break; + case 2: VLEOpc = SystemZ::VLEH; break; + default: break; + } + assert(VLEOpc && "Unexpected number of loaded bytes."); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp0); + BuildMI(*MBB, MI, DL, TII->get(VLEOpc), DstReg) + .addReg(VTmp0) + .add(MI->getOperand(FirstMemOp)) + .add(MI->getOperand(FirstMemOp + 1)) + .add(MI->getOperand(FirstMemOp + 2)) + .addImm(EltIdx) + .setMemRefs(MI->memoperands()); + return DstReg; +} + +// Types of immediates that are treated differently. +enum ImmediateType { SE16, SE32, SInt32, ZE16, ZE32, ZELH16, UInt32, ANDLow16, NoImmTy }; + +// Load Imm into the element EltIdx of DstReg, or a new virtual virtual +// register if not provided. Returns DstReg. +// Note: 64 bit immediates with LLIHF + OILF64 not (yet?) supported. +static Register loadImmIntoVecElt(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, unsigned EltIdx, + int64_t Imm, ImmediateType ImmType, + Register DstReg = SystemZ::NoRegister) { + assert((isUInt<32>(Imm) || isInt<32>(Imm)) && "Unexpected huge immediate."); + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + bool ResultIs64Bit = is64BitReg(MI->getOperand(0).getReg(), MRI); + bool DoSE16 = false; + switch (ImmType) { + case ImmediateType::SE16: + // The significant bits of Imm may or may not have been sign extended. + assert((isUInt<16>(Imm) || isInt<16>(Imm)) && "Unexpected bits."); + Imm = int16_t(Imm); + DoSE16 = true; + break; + case ImmediateType::SE32: + // The significant bits of Imm may or may not have been sign extended. + assert((isUInt<32>(Imm) || isInt<32>(Imm)) && "Unexpected bits."); + Imm = int32_t(Imm); + if (isInt<16>(Imm)) + DoSE16 = true; + break; + case ImmediateType::SInt32: + // A 32 bit signed integer. + assert(isInt<32>(Imm) && "Unexpected bits."); + assert(!ResultIs64Bit && "A 64 bit element needs extension of immediate."); + if (isInt<16>(Imm)) + DoSE16 = true; + break; + case ImmediateType::ZE16: + // 16 bits that should be zero extended. + assert(isUInt<16>(Imm) && "Unexpected bits."); + LLVM_FALLTHROUGH; + case ImmediateType::ZE32: + // 32 bits that should be zero extended. + assert(isUInt<32>(Imm) && "Unexpected bits."); + if (isUInt<15>(Imm)) + DoSE16 = true; + break; + case ImmediateType::ZELH16: + // 16 high bits of low 32 that should be zero extended to 64 bits. + assert(isUInt<16>(Imm) && "Unexpected bits."); + Imm <<= 16; + break; + case ImmediateType::UInt32: + // A 32 bit unsigned integer. + assert(isUInt<32>(Imm) && "Unexpected bits."); + assert(!ResultIs64Bit && "A 64 bit element needs extension of immediate."); + if ((Imm >> 16 == 0xffff) && (Imm & (1 << 15))) { + Imm = int16_t(Imm & 0xffff); + DoSE16 = true; + } + else if (isUInt<15>(Imm)) + DoSE16 = true; + break; + case ImmediateType::ANDLow16: + // The AND-mask for the 16 low bits which needs all high bits to be set. + assert(isUInt<16>(Imm) && "Unexpected bits."); + Imm = (int64_t(-1) ^ 0xffff) | Imm; + if (isInt<16>(Imm)) + DoSE16 = true; + break; + case NoImmTy: + llvm_unreachable("Can't load an unspecified immediate."); + break; + } + + if (DstReg == SystemZ::NoRegister) + DstReg = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + Register VTmp0 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + + if (DoSE16) { + // Load Imm into the element with a VLEI, which will sign extend it. + unsigned VLEIOpc = ResultIs64Bit ? SystemZ::VLEIG : SystemZ::VLEIF; + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp0); + BuildMI(*MBB, MI, DL, TII->get(VLEIOpc), DstReg) + .addReg(VTmp0) + .addImm(Imm) + .addImm(EltIdx); + return DstReg; + } + + int32_t High32 = ((Imm >> 32) & UINT32_MAX); + int16_t Lo16(Imm & UINT16_MAX); + int16_t Hi16((Imm >> 16) & UINT16_MAX); + + bool DoLo16VLEIG = ResultIs64Bit && ((Lo16 & (1 << 15)) == (High32 & 1)); + bool DoHi16VLEIG = (ResultIs64Bit && (Hi16 == High32)); + bool DoHigh32 = ResultIs64Bit && !DoLo16VLEIG && !DoHi16VLEIG; + bool DoLow32VLEIF = DoHigh32 && Hi16 == -1 && Lo16 < 0; + bool DoLo16 = !DoLow32VLEIF && !DoLo16VLEIG; + bool DoHi16 = !DoLow32VLEIF && !DoHi16VLEIG; + + if (DoLo16VLEIG || DoHi16VLEIG) { + // High32 matches the sign bit of Lo16 or the value of Hi16. + Register VTmp1 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp1); + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VLEIG), VTmp0) + .addReg(VTmp1) + .addImm(DoLo16VLEIG ? Lo16 : Hi16) + .addImm(EltIdx); + } + else if (DoHigh32) { + // Fill the high 32 bits with ones or zeroes for a 64 bit value. + Register VTmp1 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp1); + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VLEIF), VTmp0) + .addReg(VTmp1) + .addImm(High32) + .addImm(EltIdx * 2); + } + else + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp0); + + if (DoLow32VLEIF) { + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VLEIF), DstReg) + .addReg(VTmp0) + .addImm(Lo16) + .addImm(EltIdx * 2 + 1); + } else { + unsigned VLEIH_HighIdx = ResultIs64Bit ? EltIdx * 4 + 2 : EltIdx * 2; + Register VTmp2; + if (DoLo16) { + VTmp2 = + DoHi16 ? MRI->createVirtualRegister(&SystemZ::VR128BitRegClass) : DstReg; + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VLEIH), VTmp2) + .addReg(VTmp0) + .addImm(Lo16) + .addImm(VLEIH_HighIdx + 1); + } else + VTmp2 = VTmp0; + + if (DoHi16) + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VLEIH), DstReg) + .addReg(VTmp2) + .addImm(Hi16) + .addImm(VLEIH_HighIdx); + } + + return DstReg; +} + +/////// Instruction converters +// A converter is defined for each scalar opcode that can be converted into +// the vector domain + +/// Abstract instruction converter base class. +class InstrConverterBase { +protected: + unsigned SrcOpcode; + +public: + InstrConverterBase(unsigned SrcOpcode) : SrcOpcode(SrcOpcode) {} + + virtual ~InstrConverterBase() {} + + /// \returns true if \p MI is legal to convert. + virtual bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const { + assert(MI->getOpcode() == SrcOpcode && + "Wrong instruction passed to converter"); + + // Don't convert if any non-memory operand contains an address (an + // OPERAND_MEMORY register operand should always have an ADDR.. regclass + // and is therefore never reassigned and always ok here). + const MCInstrDesc &MCID = MI->getDesc(); + for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { + const MachineOperand &Op = MI->getOperand(I); + if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) + continue; + const TargetRegisterClass *RC = MRI->getRegClass(Op.getReg()); + if (RC != &SystemZ::ADDR64BitRegClass && + RC != &SystemZ::ADDR32BitRegClass && + RC != &SystemZ::ADDR128BitRegClass) + continue; + if (I >= MCID.getNumOperands() || + MCID.OpInfo[I].OperandType != MCOI::OPERAND_MEMORY) { + LLVM_DEBUG(dumpEnclosureMsg("address ", MI);); + return false; + } + } + + // Memory: Rejecting all instructions with >12 bit displacements. Using a + // GPR to add a bigger offset would defeat the purpose of helping + // register pressure (that would only possibly be beneficial if the + // address register is killed and therefore could be used for this). + if (MI->mayLoad() || MI->mayStore()) { + assert(MI->hasOneMemOperand() && "Missing memory operand?"); + unsigned NumOps = MI->getNumExplicitOperands(); + int Displ = MI->getOperand(NumOps - 2).getImm(); + if (!isUInt<12>(Displ)) { + LLVM_DEBUG(dumpEnclosureMsg("offset ", MI);); + return false; + } + } + + // Only deal with subregs in COPYs from GR64 to GR32. + for (auto &Op : MI->explicit_uses()) + if (Op.isReg() && Op.getSubReg() && + (!MI->isCopy() || + (Op.getSubReg() != SystemZ::subreg_l32 || + MRI->getRegClass(Op.getReg()) != &SystemZ::GR64BitRegClass))) { + LLVM_DEBUG(dumpEnclosureMsg("subreg ", MI);); + return false; + } + + return true; + } + + /// In first iteration (ToFinal == false), the set of possible vector lanes + /// is found for each operand. If ToFinal is true, a single lane for each + /// operand is selected. \returns true if anything changed. + virtual bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const { + return findLanesGeneric(MI, Lanes, Worklist, ToFinal, MRI); + } + + /// Applies conversion to \p MI. + /// + /// \returns true if \p MI is no longer need, and can be deleted. + virtual bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) = 0; + + /// \returns the cost increment incurred by converting \p MI. + virtual double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const { return 0; } +}; + +/// An Instruction Converter for pseudos like PHI instructions which are not +/// changed. +class PseudoConverter : public InstrConverterBase { +public: + PseudoConverter(unsigned SrcOpcode) : InstrConverterBase(SrcOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + return false; // Don't erase + } +}; + +// If MI is a COPY of the low 32 bits of a VLGV instruction return the VLGV, +// otherwise nullptr. +static const MachineInstr *getVLGVDefMIFromCopyLow32(const MachineInstr *MI, + const MachineRegisterInfo *MRI) { + if (!MI->isCopy()) + return nullptr; + const MachineOperand &SrcMO = MI->getOperand(1); + if (SrcMO.getSubReg() != SystemZ::subreg_l32 || + !MRI->hasOneDef(SrcMO.getReg())) + return nullptr; + MachineInstr *DefMI = MRI->getVRegDef(SrcMO.getReg()); + if (DefMI->getOpcode() == SystemZ::VLGVF || + DefMI->getOpcode() == SystemZ::VLGVH || + DefMI->getOpcode() == SystemZ::VLGVB) + return DefMI; + return nullptr; +} + +// If Reg was defined by or copied from a VLGV instruction, return the VLGV, +// otherwise nullptr. +static const MachineInstr* +getVLGVDefMIFromReg(Register Reg, const MachineRegisterInfo *MRI) { + if (!MRI->hasOneDef(Reg)) + return nullptr; + MachineInstr *MI = MRI->getVRegDef(Reg); + if (MI->getOpcode() == SystemZ::VLGVG) + return MI; + while (MI->isCopy()) { + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); + if (!is32BitReg(DstReg, MRI) || !is32BitReg(SrcReg, MRI) || + !MRI->hasOneDef(SrcReg)) + break; + MI = MRI->getVRegDef(SrcReg); + } + return getVLGVDefMIFromCopyLow32(MI, MRI); +} + +/// An instruction converter for replacing COPY instructions. +class COPYConverter : public InstrConverterBase { +public: + + COPYConverter() : InstrConverterBase(SystemZ::COPY) {} + + bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const override { + if (!InstrConverterBase::isLegal(MI, TII, MRI)) + return false; + + const MachineOperand &DstMO = MI->getOperand(0); + const MachineOperand &SrcMO = MI->getOperand(1); + if (Register::isPhysicalRegister(DstMO.getReg()) || + Register::isPhysicalRegister(SrcMO.getReg())) { + // Don't convert a COPY involving a phys-reg. + LLVM_DEBUG(dumpEnclosureMsg("physreg ", MI);); + return false; + } + + return true; + } + + virtual bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + const MachineOperand &DstMO = MI->getOperand(0); + const MachineOperand &SrcMO = MI->getOperand(1); + if (!SrcMO.getSubReg()) + return InstrConverterBase::findLanes(MI, Lanes, Worklist, ToFinal, MRI); + + // VLGVF/VGLVH/VLGVB cases. + if (const MachineInstr *VLGV_MI = getVLGVDefMIFromCopyLow32(MI, MRI)) { + // The COPY source reg will be replaced by the vector (VLGV) source reg + // and will/can not have any lane assigned to it. Find the lane for the + // extracted element and assign it to DstLanes. + unsigned DstLanes = VLGVElt2Lane(VLGV_MI); + return updateLanes(DstMO.getReg(), DstLanes, Lanes, MRI, Worklist, MI); + } + + // General case of copying low32 subreg. + if (ToFinal) { + bool Change = selectLanesGeneric(MI, Lanes, Worklist, MRI); + assert(((Lanes[SrcMO.getReg()] == G0 && Lanes[DstMO.getReg()] == F1) || + (Lanes[SrcMO.getReg()] == G1 && Lanes[DstMO.getReg()] == F3)) && + "Bad lanes for COPY of subreg_l32."); + return Change; + } + + unsigned DstLanes = Lanes[DstMO.getReg()] & (F1 | F3); + unsigned SrcLanes = Lanes[SrcMO.getReg()]; + applyLaneDeps(SrcLanes, G0, DstLanes, F1); + applyLaneDeps(SrcLanes, G1, DstLanes, F3); + return updateLanes(DstMO.getReg(), DstLanes, SrcMO.getReg(), SrcLanes, + Lanes,MRI, Worklist, MI); + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineOperand &SrcMO = MI->getOperand(1); + + // VLGVF/VGLVH/VLGVB cases. + if (const MachineInstr *VLGV_MI = getVLGVDefMIFromCopyLow32(MI, MRI)) { + const MachineOperand &VecMO = VLGV_MI->getOperand(1); + SrcMO.setReg(VecMO.getReg()); + SrcMO.setIsKill(VecMO.isKill()); + } + + if (SrcMO.getSubReg()) { + // Remove subreg of COPY source and rely on using the right vector + // element (lane) in users. + LLVM_DEBUG(dbgs() << "From : "; MI->dump();); + SrcMO.setSubReg(0); + LLVM_DEBUG(dbgs() << " To "; MI->dump();); + } + + // Don't erase the COPY. + return false; + } +}; + +/// An instruction converter for element extractions (VLGV). The 32/16/8 bit +/// cases COPY the low 32 bits out of the defined 64 bits and therefore their +/// conversions are handled in part by the COPYConverter. +class VLGVConverter : public InstrConverterBase { + + // Returns true if the UserMI is only using the extracted bits of the DefMI + // element, and not the full zero extended value. + bool isUsingOnlyExtractedBits(const MachineInstr *UserMI, + const MachineInstr *DefMI, + const MachineRegisterInfo *MRI) const { + assert((DefMI->getOpcode() == SystemZ::VLGVH || + DefMI->getOpcode() == SystemZ::VLGVB) && "Bad DefMI opcode."); + if (UserMI->isCopy()) { + Register CopyDefReg = UserMI->getOperand(0).getReg(); + for (auto &CopyUseMI : MRI->use_nodbg_instructions(CopyDefReg)) + if (!isUsingOnlyExtractedBits(&CopyUseMI, DefMI, MRI)) + return false; + return true; + } + else if (UserMI->getOpcode() == SystemZ::TMLMux) { + assert(getVLGVDefMIFromReg(UserMI->getOperand(0).getReg(), MRI) == DefMI && + "Could not trace back to DefMI?"); + if (DefMI->getOpcode() == SystemZ::VLGVB) + return isUInt<8>(UserMI->getOperand(1).getImm()); + return true; + } + + return false; + } + +public: + VLGVConverter(unsigned SrcOpcode) : InstrConverterBase(SrcOpcode) {} + + bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const override { + if (!InstrConverterBase::isLegal(MI, TII, MRI)) + return false; + + // Can only deal with a constant element index. + if (MI->getOperand(2).getReg()) { + LLVM_DEBUG(dumpEnclosureMsg("variable elt", MI);); + return false; + } + + Register DefReg = MI->getOperand(0).getReg(); + if (MI->getOpcode() != SystemZ::VLGVG) { + // All users should be a COPY of the low32 subreg, and all those COPYs + // must be able to find their way to MI as well. + for (auto &UseMI : MRI->use_nodbg_instructions(DefReg)) + if (getVLGVDefMIFromCopyLow32(&UseMI, MRI) != MI) { + LLVM_DEBUG(dumpEnclosureMsg("context ", MI);); + return false; + } + } + + if (MI->getOpcode() == SystemZ::VLGVH || MI->getOpcode() == SystemZ::VLGVB) { + // Since extracting a halfword/byte element zero extends it to 32 bits, + // using that subelement without extraction can only be done directly + // when the extension is not needed. + for (auto &UseMI : MRI->use_nodbg_instructions(DefReg)) + if (!isUsingOnlyExtractedBits(&UseMI, MI, MRI)) { + LLVM_DEBUG(dumpEnclosureMsg("context ", MI);); + return false; + } + } + + return true; + } + + virtual bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + if (MI->getOpcode() == SystemZ::VLGVG) { + Register DstReg = MI->getOperand(0).getReg(); + unsigned DstLanes = Lanes[DstReg] & VLGVElt2Lane(MI); + return updateLanes(DstReg, DstLanes, Lanes, MRI, Worklist, MI); + } + // VLGVF/VLGVH/VLGVB: These define a gr64bit reg which have the + // subreg_l32 COPY:ed from it. The dst-reg of that COPY will get the lane + // of the extracted element directly. + return false; + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + if (MI->getOpcode() == SystemZ::VLGVG) { + // Replace with a COPY. + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); + LLVM_DEBUG(dumpConversion(MI, Start)); + } else { + // VLGVF/VLGVH/VLGVB: The COPY source operand will be reset to the VLGV + // source, so just remove this. + LLVM_DEBUG(dbgs() << "From : "; MI->dump();); + LLVM_DEBUG(dbgs() << " To (removed)\n";); + } + return true; + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Removed element extraction cost : \t"; MI->dump();); + return -3; + } +}; + +// A converter for loads of various immediates. +class ImmLoadConverter : public InstrConverterBase { + ImmediateType ImmType; + +public: + ImmLoadConverter(unsigned SrcOpcode, ImmediateType ImmT) + : InstrConverterBase(SrcOpcode), ImmType(ImmT) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + loadImmIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(0).getReg()]), + MI->getOperand(1).getImm(), ImmType, + MI->getOperand(0).getReg()); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } + + // TODO: Return a cost based on number of instructions needed. +}; + +/// An instruction converter which replaces an instruction with another. +class InstrReplacer : public InstrConverterBase { +protected: + // If the vector lanes call for it, the "low" or "odd" opcode needs to be + // used. For example, unpacking F0 to G0 needs a VUPHF, while unpacking F2 + // to G0 would require VUPLF. In such a case DstOpcodeAlt is set for that + // particular MI during conversion. + unsigned DstOpcodeAlt; + +private: + // If a derived converter has set DstOpcodeAlt, return it this time and + // clear it. Otherwise return the regular DstOpcode. + unsigned getDstOpcodeToUse() { + unsigned Opc = DstOpcode; + if (DstOpcodeAlt) { + Opc = DstOpcodeAlt; + DstOpcodeAlt = 0; + } + return Opc; + } + +public: + /// Opcode of the destination instruction. + unsigned DstOpcode; + + InstrReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrConverterBase(SrcOpcode), DstOpcodeAlt(0), DstOpcode(DstOpcode) {} + + bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const override { + if (!InstrConverterBase::isLegal(MI, TII, MRI)) + return false; + + // It's illegal to replace an instruction that implicitly defines a register + // with an instruction that doesn't unless that register def is dead. + for (auto &MO : MI->implicit_operands()) + if (MO.isReg() && MO.isDef() && !MO.isDead() && + !TII->get(DstOpcode).hasImplicitDefOfPhysReg(MO.getReg())) { + LLVM_DEBUG(dumpEnclosureMsg("implicit reg", MI);); + return false; + } + + // DstOpcode should not implictly define any register if MI doesn't. + const MCInstrDesc &DstMCID = TII->get(DstOpcode); + if (DstMCID.getNumImplicitDefs() > 0) + for (const MCPhysReg *Regs = DstMCID.getImplicitDefs(); *Regs; ++Regs) + if (!MI->definesRegister(*Regs)) { + LLVM_DEBUG(dumpEnclosureMsg("implicit reg", MI);); + return false; + } + + return true; + } + +protected: + MachineInstr *replaceMI(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI) { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + unsigned Opc = getDstOpcodeToUse(); + MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(Opc)); + + // Transfer explicit operands from original instruction. + for (auto &Op : MI->explicit_operands()) + Bld.add(Op); + + if (MI->hasOneMemOperand()) + Bld.setMemRefs(MI->memoperands()); + + return Bld; + } + +public: + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + replaceMI(MI, TII, MRI); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter to replace a shift instruction where the immediate shift +// amount needs to be converted. +class ShiftReplacer : public InstrReplacer { +public: + ShiftReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + MachineInstr *BuiltMI = InstrReplacer::replaceMI(MI, TII, MRI); + MachineOperand &ImmMO = BuiltMI->getOperand(3); + ImmMO.setImm(ImmMO.getImm() & 0xfff); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter to replace a scalar load with a load into a vector element. +class LoadReplacer : public InstrReplacer { +public: + LoadReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + loadMemIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(0).getReg()]), + MI->getOperand(0).getReg()); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter to replace a scalar store with a store of a vector element. +class StoreReplacer : public InstrReplacer { +public: + StoreReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + Register SavedReg = MI->getOperand(0).getReg(); + unsigned EltIdx = lane2EltIdx(Lanes[SavedReg]); + MachineMemOperand *MMO = *MI->memoperands_begin(); + unsigned SavedBytes = MMO->getSize(); + if (SavedBytes < 4) { + assert((is32BitReg(SavedReg, MRI) || + MRI->getVRegDef(SavedReg)->getOpcode() == SystemZ::VLVGF) + && "Expected truncating store from 32 bit register only."); + unsigned SubElts = 4 / SavedBytes; + EltIdx = ((EltIdx + 1) * SubElts) - 1; + } + + BuildMI(*MBB, MI, DL, TII->get(DstOpcode)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .add(MI->getOperand(2)) + .add(MI->getOperand(3)) + .addImm(EltIdx) + .setMemRefs(MI->memoperands()); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter to replace a scalar sign-extending load with a load into a +// vector element followed by an unpack. +class SExtLoadReplacer : public InstrReplacer { +public: + SExtLoadReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + // Load into the same element index as that of the result and then unpack. + // TODO: sext of a loaded imm could do just vleig (subregliveness-04.ll). + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + Register VTmp1 = loadMemIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(0).getReg()])); + BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(DstOpcode)) + .add(MI->getOperand(0)) + .addReg(VTmp1); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Extra cost for unpack : \t"; MI->dump();); + return 1; + } +}; + +// A converter to replace a scalar zero-extending load with a VLLEZ. +class ZExtLoadReplacer : public InstrReplacer { +public: + ZExtLoadReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + // A VLLEZx always loads into (the rightmost subelement of) G0. + Register DstReg = MI->getOperand(0).getReg(); + unsigned DstLanes = Lanes[DstReg] & (is64BitReg(DstReg, MRI) ? G0 : F1); + return updateLanes(DstReg, DstLanes, Lanes, MRI, Worklist, MI); + } +}; + +// A converter to replace a scalar register extension from 32 to 64 or 16 to +// 32 bits. +class RegExtReplacer : public InstrReplacer { + unsigned LowOpcode; + +public: + RegExtReplacer(unsigned SrcOpcode, unsigned DstOpcode, unsigned LOpc) + : InstrReplacer(SrcOpcode, DstOpcode), LowOpcode(LOpc) {} + + bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + Register DstReg = MI->getOperand(0).getReg(); + Register SrcReg = MI->getOperand(1).getReg(); + unsigned DstLanes = Lanes[DstReg]; + unsigned SrcLanes = Lanes[SrcReg]; + // 32 to 64 bits: F0/F2 -> G0 F1/F3 -> G1 + // 16 to 32 bits: F0/F2 -> F1 F1/F3 -> F3 + bool ResultIs64Bit = is64BitReg(DstReg, MRI); + unsigned ResLane0 = ResultIs64Bit ? G0 : F1; + unsigned ResLane1 = ResultIs64Bit ? G1 : F3; + + if (ToFinal) { + selectLane(DstLanes); + // Make sure to select source idx 0 or 2 for dst idx 0. + if (DstLanes & ResLane0) + SrcLanes &= (F0 | F2); + selectLane(SrcLanes); + } + else { + DstLanes &= (ResLane0 | ResLane1); + applyLaneDeps(SrcLanes, (F0 | F2), DstLanes, ResLane0); + applyLaneDeps(SrcLanes, (F1 | F3), DstLanes, ResLane1); + } + assert((!ToFinal || + ((DstLanes == ResLane0 && (SrcLanes == F0 || SrcLanes == F2)) || + (DstLanes == ResLane1 && (SrcLanes == F1 || SrcLanes == F3)))) + && "Bad lanes for register extension."); + return updateLanes(DstReg, DstLanes, SrcReg, SrcLanes, + Lanes, MRI, Worklist, MI); + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + // Use unpack "low" opcode instead if called for. + if (lane2EltIdx(Lanes[MI->getOperand(1).getReg()]) >= 2) + DstOpcodeAlt = LowOpcode; + return InstrReplacer::convertInstr(MI, TII, MRI, Lanes); + } +}; + +// A converter for a reg/mem instruction. The memory operand is first loaded +// into a vector element. +class RegMemReplacer : public InstrReplacer { +public: + RegMemReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + Register VTmp1 = loadMemIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(1).getReg()])); + BuildMI(*MBB, MI, DL, TII->get(DstOpcode)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .addReg(VTmp1); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Extra cost for needed VLE : \t"; MI->dump();); + return 1; + } +}; + +// A converter for a reg/imm instruction. The immediate operand is first loaded +// into a vector element. +class RegImmReplacer : public InstrReplacer { + ImmediateType ImmType; + +public: + RegImmReplacer(unsigned SrcOpcode, unsigned DstOpcode, ImmediateType ImmT) + : InstrReplacer(SrcOpcode, DstOpcode), ImmType(ImmT) {} + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + Register VTmp1 = loadImmIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(1).getReg()]), + MI->getOperand(2).getImm(), ImmType); + BuildMI(*MBB, MI, DL, TII->get(DstOpcode)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .addReg(VTmp1); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Extra cost for immediate load : \t"; MI->dump();); + return 1; + } +}; + +// A converter for a multiply with reg/mem/imm instructions. A +// memory/immediate operand is first loaded into a vector element. +class MulReplacer : public InstrReplacer { + ImmediateType ImmType; +public: + MulReplacer(unsigned SrcOpcode, unsigned DstOpcode, ImmediateType ImmT) + : InstrReplacer(SrcOpcode, DstOpcode), ImmType(ImmT) {} + + bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + Register DstReg = MI->getOperand(0).getReg(); + Register Src1Reg = MI->getOperand(1).getReg(); + Register Src2Reg = 0; + const MachineOperand &Src2MO = MI->getOperand(2); + if (Src2MO.isReg() && Lanes.find(Src2MO.getReg()) != Lanes.end()) + Src2Reg = Src2MO.getReg(); + + if (ToFinal) { + bool Change = selectLanesGeneric(MI, Lanes, Worklist, MRI); + assert(((((Lanes[Src1Reg] == F0 || Lanes[Src1Reg] == F1) && + Lanes[DstReg] == F1) || + ((Lanes[Src1Reg] == F2 || Lanes[Src1Reg] == F3) && + Lanes[DstReg] == F3)) && + (!Src2Reg || Lanes[Src1Reg] == Lanes[Src2Reg])) && + "Bad vector lanes for VMEF/VMOF"); + return Change; + } + + // VMEF (Vector Multiply Even Fullword) works on the even indexed + // elements. The result has double width, so its lane is the odd lane of + // the double sized element. VMOF works on the odd elements instead. + unsigned DstLanes = (F1 | F3) & Lanes[DstReg]; + unsigned SrcLanes = Lanes[Src1Reg]; + if (Src2Reg) + SrcLanes &= Lanes[Src2Reg]; + applyLaneDeps(SrcLanes, (F0 | F1), DstLanes, F1); + applyLaneDeps(SrcLanes, (F2 | F3), DstLanes, F3); + bool Change = updateLanes(DstReg, DstLanes, Src1Reg, SrcLanes, + Lanes, MRI, Worklist, MI); + if (Src2Reg) + Change |= updateLanes(Src2Reg, SrcLanes, Lanes, MRI, Worklist, MI); + return Change; + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + Register RHSReg = SystemZ::NoRegister; + MachineOperand &Src2MO = MI->getOperand(2); + if (Src2MO.isImm()) + RHSReg = loadImmIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(1).getReg()]), + MI->getOperand(2).getImm(), ImmType); + else if (MI->getDesc().OpInfo[2].OperandType == MCOI::OPERAND_MEMORY) + RHSReg = loadMemIntoVecElt(MI, TII, MRI, + lane2EltIdx(Lanes[MI->getOperand(1).getReg()])); + unsigned Opc = DstOpcode; + // Use "odd" opcode instead if called for. + if (lane2EltIdx(Lanes[MI->getOperand(1).getReg()]) % 2 != 0) + Opc = SystemZ::VMOF; + MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(Opc)); + Bld.add(MI->getOperand(0)); + Bld.add(MI->getOperand(1)); + if (RHSReg) + Bld.addReg(RHSReg); + else + Bld.add(Src2MO); + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter for fp<->int conversions. +class FPIntConvReplacer : public InstrReplacer { +public: + FPIntConvReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool isInt2FP() const { + return (DstOpcode == SystemZ::WCDGB || DstOpcode == SystemZ::WCEFB || + DstOpcode == SystemZ::WCDLGB || DstOpcode == SystemZ::WCELFB); + } + + bool findLanes(const MachineInstr *MI, + DenseMap &Lanes, + std::list &Worklist, + bool ToFinal, + const MachineRegisterInfo *MRI) const override { + // The scalar FP conversion instructions correspond to the first lane. + unsigned SrcRegOpNo = MI->getOperand(1).isReg() ? 1 : 2; + Register Reg = MI->getOperand(isInt2FP() ? SrcRegOpNo : 0).getReg(); + unsigned RegLanes = Lanes[Reg] & (is64BitReg(Reg, MRI) ? G0 : F0); + return updateLanes(Reg, RegLanes, Lanes, MRI, Worklist, MI); + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo(); + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + const TargetRegisterClass *RC = + TRI->getRegClass(TII->get(DstOpcode).OpInfo[0].RegClass); + unsigned SubRegIdx = (RC == &SystemZ::VR64BitRegClass ? SystemZ::subreg_h64 + : SystemZ::subreg_h32); + Register VTmp0 = MRI->createVirtualRegister(RC); + Register VTmp1 = MRI->createVirtualRegister(RC); + unsigned M4 = 0; // XxC + + if (isInt2FP()) { + unsigned M5 = 0; // Rounding method + unsigned SrcRegOpNo = 1; + if (DstOpcode == SystemZ::WCDLGB || DstOpcode == SystemZ::WCELFB) { + M4 = MI->getOperand(3).getImm(); + M5 = MI->getOperand(1).getImm(); + SrcRegOpNo = 2; + } + + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), VTmp0) + .addReg(MI->getOperand(SrcRegOpNo).getReg(), 0, SubRegIdx); + BuildMI(*MBB, MI, DL, TII->get(DstOpcode), VTmp1) + .addReg(VTmp0).addImm(M4).addImm(M5); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY)) + .add(MI->getOperand(0)) + .addReg(VTmp1); + } + else { + Register VTmp2 = MRI->createVirtualRegister(RC); + unsigned M5 = MI->getOperand(1).getImm(); // Rounding method + if (DstOpcode == SystemZ::WCLGDB || DstOpcode == SystemZ::WCLFEB) + M4 = MI->getOperand(3).getImm(); + + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), VTmp0) + .add(MI->getOperand(2)); + BuildMI(*MBB, MI, DL, TII->get(DstOpcode), VTmp1) + .addReg(VTmp0).addImm(M4).addImm(M5); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp2); + BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG)) + .add(MI->getOperand(0)) + .addReg(VTmp2).addReg(VTmp1).addImm(SubRegIdx); + } + + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } +}; + +// A converter that elimnates a vector element extraction followed by a +// compare with 0/-1 or a test-under-mask. A VTM can be used instead. +class CompareTMReplacer : public InstrReplacer { + bool isCompareImm() const { + switch(SrcOpcode) { + case SystemZ::CHIMux: + case SystemZ::CGHI: + return true; + case SystemZ::TMLMux: + return false; + default: break; + } + llvm_unreachable("Unhandled opcode."); + return false; + } + + // Converts a scalar TM CC mask to fit VTM, or 0. + unsigned TMCCMask2VTM(unsigned CCMask) const { + switch (CCMask) { + case SystemZ::CCMASK_TM_ALL_0: + case SystemZ::CCMASK_TM_ALL_1: + return CCMask; + case SystemZ::CCMASK_TM_SOME_0: + return SystemZ::CCMASK_VTM_SOME_0; + case SystemZ::CCMASK_TM_SOME_1: + return SystemZ::CCMASK_VTM_SOME_1; + default: break; + } + return 0; + } + + unsigned getCCMaskForVTM(unsigned CCMask, int64_t Imm) const { + if (isCompareImm()) { + if (Imm == 0 && CCMask == SystemZ::CCMASK_CMP_EQ) + return SystemZ::CCMASK_TM_ALL_0; + if (Imm == 0 && CCMask == SystemZ::CCMASK_CMP_NE) + return SystemZ::CCMASK_VTM_SOME_1; + if (Imm == -1 && CCMask == SystemZ::CCMASK_CMP_EQ) + return SystemZ::CCMASK_TM_ALL_1; + if (Imm == -1 && CCMask == SystemZ::CCMASK_CMP_NE) + return SystemZ::CCMASK_VTM_SOME_0; + } else { + assert(TMCCMask2VTM(CCMask) && "No VTM mask possible?"); + return TMCCMask2VTM(CCMask); + } + llvm_unreachable("No VTM mask found."); + } + +public: + CompareTMReplacer(unsigned SrcOpcode, unsigned DstOpcode) + : InstrReplacer(SrcOpcode, DstOpcode) {} + + bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const override { + if (!InstrReplacer::isLegal(MI, TII, MRI)) + return false; + + Register Reg = MI->getOperand(0).getReg(); + if (getVLGVDefMIFromReg(Reg, MRI) == nullptr) { + // Only do a compare w/0 in cases that eliminates an element + // extraction. This case is generally optimized in GPRs (by + // SystemZElimCompare), and it is potentially on the critical path of + // the function. + LLVM_DEBUG(dumpEnclosureMsg("not extract ", MI);); + return false; + } + + int64_t Imm = MI->getOperand(1).getImm(); + SmallVector CCUsers; + if (!TII->findCCUsers(MI, CCUsers)) { + LLVM_DEBUG(dumpEnclosureMsg("CC users ", MI);); + return false; + } + + if (isCompareImm()) { + if (Imm != 0 && Imm != -1) { + LLVM_DEBUG(dumpEnclosureMsg("immediate ", MI);); + return false; + } + + // All CC users must check for either EQ or NE. + for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { + const MachineOperand &CCMaskMO = TII->getCCMaskMO(CCUsers[Idx]); + if (CCMaskMO.getImm() != SystemZ::CCMASK_CMP_EQ && + CCMaskMO.getImm() != SystemZ::CCMASK_CMP_NE) { + LLVM_DEBUG(dumpEnclosureMsg("CC user mask", MI);); + return false; + } + } + } + else if (SrcOpcode == SystemZ::TMLMux) { + // All TM CC users must accept VTM as replacement. + for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { + if (!TMCCMask2VTM(TII->getCCMaskMO(CCUsers[Idx]).getImm())) { + LLVM_DEBUG(dumpEnclosureMsg("CC user mask", MI);); + return false; + } + } + } + + return true; + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + DebugLoc DL = MI->getDebugLoc(); + MachineBasicBlock::iterator Start = getPrevOrEnd(MI); + + Register Reg = MI->getOperand(0).getReg(); + int64_t Imm = MI->getOperand(1).getImm(); + SmallVector CCUsers; + bool Success = TII->findCCUsers(MI, CCUsers); + assert(Success && "Expected to find the CC users."); + + // Load VTM mask. + unsigned RegLanes = Lanes[Reg]; + unsigned VLEI_Opc; + int16_t VLEI_Imm = -1; + unsigned VLEI_EltIdx = lane2EltIdx(RegLanes); + if (SrcOpcode == SystemZ::TMLMux) { + int64_t AllOnes; + if (isByteLane(RegLanes)) { + assert(isUInt<8>(Imm) && "Impossible VLGVB/TM immediate for VTM."); + AllOnes = 0xff; + VLEI_Opc = SystemZ::VLEIB; + } else { + AllOnes = 0xffff; + VLEI_Opc = SystemZ::VLEIH; + if (isFullWordLane(RegLanes)) + VLEI_EltIdx = (VLEI_EltIdx + 1) * 2 - 1; + } + if (Imm != AllOnes) + VLEI_Imm = Imm; + } else + VLEI_Opc = is64BitReg(Reg, MRI) ? SystemZ::VLEIG : SystemZ::VLEIF; + Register VTmp0 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VZERO), VTmp0); + Register VTmp1 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + BuildMI(*MBB, MI, DL, TII->get(VLEI_Opc), VTmp1) + .addReg(VTmp0) + .addImm(VLEI_Imm) + .addImm(VLEI_EltIdx); + + BuildMI(*MBB, MI, DL, TII->get(SystemZ::VTM)) + .addReg(Reg) + .addReg(VTmp1); + + // Update CC users. + for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { + unsigned Flags = CCUsers[Idx]->getDesc().TSFlags; + unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ? + 0 : CCUsers[Idx]->getNumExplicitOperands() - 2); + MachineOperand &CCValid = CCUsers[Idx]->getOperand(FirstOpNum); + MachineOperand &CCMask = CCUsers[Idx]->getOperand(FirstOpNum + 1); + CCValid.setImm(SystemZ::CCMASK_VTM); + CCMask.setImm(getCCMaskForVTM(CCMask.getImm(), Imm)); + } + + LLVM_DEBUG(dumpConversion(MI, Start)); + return true; + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Loading VTM mask : \t"; MI->dump();); + return 1; + } +}; + +// A converter that can insert a scalar value into a vector element when +// there is no other converter available. +class ScalarInserter : public InstrConverterBase { +public: + ScalarInserter() : InstrConverterBase(SystemZ::INSTRUCTION_LIST_END) {} + + bool isLegal(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI) const override { + llvm_unreachable("ScalarInserter is separate during legality phase."); + return false; + } + + bool isLegalInsertion(const MachineInstr *MI, + const SystemZInstrInfo *TII, + const MachineRegisterInfo *MRI, + SmallPtrSet &ClosureInstrs) const { + // Insert the result of MI into a vector element when it is used (only) by + // the closure. + if (!EnableGPRInsertion) + return false; + + for (auto &Op : MI->explicit_operands()) + if (Op.isReg() && Op.isUse() && + Register::isVirtualRegister(Op.getReg()) && + ClosureInstrs.count(MRI->getVRegDef(Op.getReg()))) + return false; // Can't use a reassigned register. + + Register DefReg = getDefedGPRReg(MI, MRI); + if (!DefReg) + return false; + if (MRI->getRegClass(DefReg) == &SystemZ::GRH32BitRegClass) + return false; // Inserting can only be done from the low 32. + bool UsedByClosure = false; + for (auto &UseMI : MRI->use_nodbg_instructions(DefReg)) { + assert(ClosureInstrs.count(&UseMI) && "Expected user in closure."); + UsedByClosure = true; + } + return UsedByClosure; + } + + bool convertInstr(MachineInstr *MI, const SystemZInstrInfo *TII, + MachineRegisterInfo *MRI, + DenseMap &Lanes) override { + MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock::iterator InsPt = std::next(MI->getIterator()); + DebugLoc DL = MI->getDebugLoc(); + Register OrigScalReg = MI->getOperand(0).getReg(); + const TargetRegisterClass *RC = MRI->getRegClass(OrigScalReg); + if (RC == &SystemZ::GRX32BitRegClass) + RC = &SystemZ::GR32BitRegClass; + Register NewScalReg = MRI->createVirtualRegister(RC); + MI->getOperand(0).setReg(NewScalReg); + + unsigned EltIdx = lane2EltIdx(Lanes[OrigScalReg]); + unsigned VLVGOpc = is64BitReg(OrigScalReg, MRI) ? SystemZ::VLVGG + : SystemZ::VLVGF; + Register VTmp0 = MRI->createVirtualRegister(&SystemZ::VR128BitRegClass); + MachineBasicBlock::iterator Built_IMPLDEF = + BuildMI(*MBB, InsPt, DL, TII->get(TargetOpcode::IMPLICIT_DEF), VTmp0); + MachineBasicBlock::iterator Built_VLVG = + BuildMI(*MBB, InsPt, DL, TII->get(VLVGOpc), OrigScalReg) + .addReg(VTmp0) + .addReg(NewScalReg) + .addReg(SystemZ::NoRegister) + .addImm(EltIdx); + + LLVM_DEBUG(dbgs() << "From : "; MI->dump();); + LLVM_DEBUG(dbgs() << " Ins "; Built_IMPLDEF->dump();); + LLVM_DEBUG(dbgs() << " "; Built_VLVG->dump();); + + return false; // Don't erase. + } + + double getExtraCost(const MachineInstr *MI, + MachineRegisterInfo *MRI) const override { + LLVM_DEBUG(dbgs() << "Extra cost for insertion : \t"; MI->dump();); + return 1; + } +}; + +///// Closure: a set of connected virtual registers (edges) and instructions. +struct Closure { + /// Virtual registers in the closure. + DenseSet Edges; + + /// Instructions in the closure. + SmallPtrSet Instrs; + + /// Instructions in the closure that do not have a converter and therefore + /// would have to be inserted into a vector element with a VLVG. + SmallPtrSet NonConvertible; + + /// A set of possible vector lanes for each reassigned register. + /// LSB represents lane (vector element) 0. + DenseMap Lanes; + + /// True if all enclosed instructions can legally be reassigned. + bool Legal; + + /// True if the enclosure should be reassigned. + bool Profitable; + + // Costs + unsigned NumCalls; + + Closure() : Legal(true), Profitable(true), NumCalls(0) {} + + bool empty() const { return Edges.empty(); } + using const_edge_iterator = DenseSet::const_iterator; + iterator_range edges() const { + return iterator_range(Edges.begin(), Edges.end()); + } + + LLVM_DUMP_METHOD void dump(const MachineRegisterInfo *MRI) const { + dbgs() << "Registers: "; + bool First = true; + for (Register Reg : Edges) { + if (!First) + dbgs() << ", "; + First = false; + dbgs() << printReg(Reg, MRI->getTargetRegisterInfo(), 0, MRI); + } + dbgs() << "\n" << "Instructions:\n"; + for (MachineInstr *MI : Instrs) + MI->print(dbgs()); + dbgs() << "\n"; + } +}; + +/// A class for traversing the function while keeping track of live virtual +/// registers and closures. Depends on Closures not being modified. +class LiveClosuresTracker { + std::vector &Closures; + VirtRegLiveness &VRLiveness; + std::map &Reg2ClosureIdx; + + std::set LiveClosures; + std::map > ClosureLiveRegs; + + Closure* getRegClosure(Register Reg) { + if (Reg2ClosureIdx.find(Reg) == Reg2ClosureIdx.end()) + return nullptr; + unsigned Idx = Reg2ClosureIdx[Reg]; + return &Closures[Idx]; + } + + void addLiveClosureReg(Closure *C, Register Reg) { + LiveClosures.insert(C); + ClosureLiveRegs[C].insert(Reg); + } + +public: + LiveClosuresTracker(std::vector &C, VirtRegLiveness &VRL, + std::map &R2CIdx) + : Closures(C), VRLiveness(VRL), Reg2ClosureIdx(R2CIdx) {} + + void enterMBB(MachineBasicBlock *MBB); + void advance(const MachineInstr *MI); + void preScan(const MachineInstr *MI); +}; + +void LiveClosuresTracker::enterMBB(MachineBasicBlock *MBB) { + for (Register Reg : VRLiveness.VRegLiveIns[MBB]) + if (Closure *C = getRegClosure(Reg)) + addLiveClosureReg(C, Reg); +} + +void LiveClosuresTracker::advance(const MachineInstr *MI) { + const MachineRegisterInfo *MRI = &MI->getParent()->getParent()->getRegInfo(); + + for (const MachineOperand &MO : MI->uses()) + if (MO.isReg() && MO.isKill()) { + if (Closure *C = getRegClosure(MO.getReg())) { + assert(LiveClosures.count(C) && "Closure was live."); + assert(ClosureLiveRegs[C].count(MO.getReg()) && "Enclosed reg was live."); + ClosureLiveRegs[C].erase(MO.getReg()); + if (ClosureLiveRegs[C].empty()) + LiveClosures.erase(C); + } + } + + if (Register DefReg = getDefedGPRReg(MI, MRI)) + if (Closure *C = getRegClosure(DefReg)) + addLiveClosureReg(C, DefReg); + +} + +void LiveClosuresTracker::preScan(const MachineInstr *MI) { + if (MI->isCall()) + for (auto *C : LiveClosures) + C->NumCalls++; +} + +class SystemZDomainReassignment : public MachineFunctionPass { + const SystemZSubtarget *STI = nullptr; + MachineRegisterInfo *MRI = nullptr; + const SystemZInstrInfo *TII = nullptr; + const MachineDominatorTree *MDT = nullptr; + + /// A map of available instruction converters. Since the only destination + /// domain is vector, a converter is identified by the source opcode. + DenseMap> Converters; + + /// A converter that can insert a scalar value into a vector element if + /// there is otherwise no converter available. + std::unique_ptr ScalarInsertion; + + /// Finds the instruction converter for Opcode. + const std::unique_ptr& + findConverter(unsigned Opcode) const; + + /// Initialize Converters map. + void initConverters(); + + /// Return true if this register is a candidate for GPR->Vector reassignment. + bool isGPRDomain(Register Reg) const { return isGPRDomainReg(Reg, MRI); } + + /// Starting from \Reg, expand the closure as much as possible. + void buildClosure(Closure &, Register Reg); + + /// Iterate over the closure and find the vector lanes for all registers. + void findVectorLanes(Closure &C, bool ToFinal, const MachineRegisterInfo *MRI); + + /// Reassign the closure to the vector domain. + void reassign(Closure &C) const; + + /// /returns true if it is profitable to reassign the closure. + bool isReassignmentProfitable(const Closure &C); + + /// Scan Reg from definition to users and collect information. + void scanRegister(Register Reg, unsigned &Calls); + +public: + static char ID; + + SystemZDomainReassignment() : MachineFunctionPass(ID) { + initializeSystemZDomainReassignmentPass(*PassRegistry::getPassRegistry()); + } + + StringRef getPassName() const override { return SYSTEMZ_DOMAINREASSIGN_NAME; } + + bool runOnMachineFunction(MachineFunction &MF) override; + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); + } +}; + +char SystemZDomainReassignment::ID = 0; + +} // End anonymous namespace. + +INITIALIZE_PASS(SystemZDomainReassignment, "systemz-domain-reassignment", + SYSTEMZ_DOMAINREASSIGN_NAME, false, false) + +/// Returns an instance of the Domain Reassignment pass. +FunctionPass *llvm:: +createSystemZDomainReassignmentPass(SystemZTargetMachine &TM) { + return new SystemZDomainReassignment(); +} + +// Returns the converter for Opcode, or the ScalarInsertion converter if +// there is none. +const std::unique_ptr& SystemZDomainReassignment:: +findConverter(unsigned Opcode) const { + auto I = Converters.find(Opcode); + if (I != Converters.end()) + return I->second; + return ScalarInsertion; +} + +void SystemZDomainReassignment::initConverters() { + // Define some utility functions for building the Converters map. + auto createPseudoConverter = [&](unsigned Opc) { + Converters[Opc] = std::make_unique(Opc); + }; + auto createCOPYConverter = [&]() { + Converters[SystemZ::COPY] = std::make_unique(); + }; + auto createVLGVConverter = [&](unsigned Opc) { + Converters[Opc] = std::make_unique(Opc); + }; + auto createImmLoadConverter = [&](unsigned From, ImmediateType ImmT) { + Converters[From] = std::make_unique(From, ImmT); + }; + auto createReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createShiftReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createLoadReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createStoreReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createSExtLoadReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createZExtLoadReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createRegExtReplacer = [&](unsigned From, unsigned To, unsigned LowOpc) { + Converters[From] = std::make_unique(From, To, LowOpc); + }; + auto createRegMemReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createRegImmReplacer = [&](unsigned From, unsigned To, + ImmediateType ImmT) { + Converters[From] = std::make_unique(From, To, ImmT); + }; + auto createMulReplacer = [&](unsigned From, unsigned To, ImmediateType ImmT) { + Converters[From] = std::make_unique(From, To, ImmT); + }; + auto createFPIntConvReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + auto createCompareTMReplacer = [&](unsigned From, unsigned To) { + Converters[From] = std::make_unique(From, To); + }; + + // Pseudo converters. + createPseudoConverter(SystemZ::PHI); + createPseudoConverter(SystemZ::IMPLICIT_DEF); + createCOPYConverter(); + + // Vector element extractions. + createVLGVConverter(SystemZ::VLGVG); + createVLGVConverter(SystemZ::VLGVF); + createVLGVConverter(SystemZ::VLGVH); + createVLGVConverter(SystemZ::VLGVB); + + // Immediate loads + createImmLoadConverter(SystemZ::LGHI, SE16); + createImmLoadConverter(SystemZ::LHIMux, SE16); + createImmLoadConverter(SystemZ::LLILL, ZE16); + createImmLoadConverter(SystemZ::LLILH, ZELH16); + createImmLoadConverter(SystemZ::LLILF, ZE32); + createImmLoadConverter(SystemZ::LGFI, SE32); + createImmLoadConverter(SystemZ::IIFMux, UInt32); + + // Register with register instructions + createReplacer(SystemZ::AGRK, SystemZ::VAG); + createReplacer(SystemZ::ARK, SystemZ::VAF); + createReplacer(SystemZ::SGRK, SystemZ::VSG); + createReplacer(SystemZ::SRK, SystemZ::VSF); + createReplacer(SystemZ::LCGR, SystemZ::VLCG); + createReplacer(SystemZ::LCR, SystemZ::VLCF); + createReplacer(SystemZ::NRK, SystemZ::VN); + createReplacer(SystemZ::ORK, SystemZ::VO); + createReplacer(SystemZ::XRK, SystemZ::VX); + createReplacer(SystemZ::NGRK, SystemZ::VN); + createReplacer(SystemZ::OGRK, SystemZ::VO); + createReplacer(SystemZ::XGRK, SystemZ::VX); + + // Shifts + createShiftReplacer(SystemZ::SLLG, SystemZ::VESLG); + createShiftReplacer(SystemZ::SLLK, SystemZ::VESLF); + createShiftReplacer(SystemZ::SRLG, SystemZ::VESRLG); + createShiftReplacer(SystemZ::SRLK, SystemZ::VESRLF); + createShiftReplacer(SystemZ::SRAG, SystemZ::VESRAG); + createShiftReplacer(SystemZ::SRAK, SystemZ::VESRAF); + + // Loads and stores + createLoadReplacer(SystemZ::LG, SystemZ::VLEG); + createLoadReplacer(SystemZ::LMux, SystemZ::VLEF); + createStoreReplacer(SystemZ::STG, SystemZ::VSTEG); + createStoreReplacer(SystemZ::STMux, SystemZ::VSTEF); + createStoreReplacer(SystemZ::ST, SystemZ::VSTEF); + createStoreReplacer(SystemZ::STHMux, SystemZ::VSTEH); + createStoreReplacer(SystemZ::STH, SystemZ::VSTEH); + createStoreReplacer(SystemZ::STCMux, SystemZ::VSTEB); + createStoreReplacer(SystemZ::STC, SystemZ::VSTEB); + // VSTER (z15, rare)? + + // Extensions from memory + createSExtLoadReplacer(SystemZ::LGF, SystemZ::VUPHF); + createSExtLoadReplacer(SystemZ::LHMux, SystemZ::VUPHH); + createZExtLoadReplacer(SystemZ::LLGF, SystemZ::VLLEZF); + createZExtLoadReplacer(SystemZ::LLGH, SystemZ::VLLEZH); + createZExtLoadReplacer(SystemZ::LLGC, SystemZ::VLLEZB); + createZExtLoadReplacer(SystemZ::LLHMux, SystemZ::VLLEZH); + createZExtLoadReplacer(SystemZ::LLCMux, SystemZ::VLLEZB); + + // Extensions of register + createRegExtReplacer(SystemZ::LGFR, SystemZ::VUPHF, SystemZ::VUPLF); + createRegExtReplacer(SystemZ::LLGFR, SystemZ::VUPLHF, SystemZ::VUPLLF); + createRegExtReplacer(SystemZ::LHR, SystemZ::VUPHH, SystemZ::VUPLHW); + createRegExtReplacer(SystemZ::LLHRMux, SystemZ::VUPLHH, SystemZ::VUPLLH); + + // Register with memory instructions + createRegMemReplacer(SystemZ::AG, SystemZ::VAG); + createRegMemReplacer(SystemZ::A, SystemZ::VAF); + createRegMemReplacer(SystemZ::SG, SystemZ::VSG); + createRegMemReplacer(SystemZ::S, SystemZ::VSF); + createRegMemReplacer(SystemZ::N, SystemZ::VN); + createRegMemReplacer(SystemZ::NG, SystemZ::VN); + createRegMemReplacer(SystemZ::O, SystemZ::VO); + createRegMemReplacer(SystemZ::OG, SystemZ::VO); + createRegMemReplacer(SystemZ::X, SystemZ::VX); + createRegMemReplacer(SystemZ::XG, SystemZ::VX); + + // Register with immediate instructions + createRegImmReplacer(SystemZ::AGHIK, SystemZ::VAG, SE16); + createRegImmReplacer(SystemZ::AHIMuxK, SystemZ::VAF, SE16); + createRegImmReplacer(SystemZ::AFIMux, SystemZ::VAF, SInt32); + createRegImmReplacer(SystemZ::OILMux, SystemZ::VO, ZE16); + createRegImmReplacer(SystemZ::OILL64, SystemZ::VO, ZE16); + createRegImmReplacer(SystemZ::NILMux, SystemZ::VN, ANDLow16); + createRegImmReplacer(SystemZ::NILL64, SystemZ::VN, ANDLow16); + createRegImmReplacer(SystemZ::NIFMux, SystemZ::VN, UInt32); + createRegImmReplacer(SystemZ::XIFMux, SystemZ::VX, UInt32); + // XXX: XILF64 + XILHF64 (-1) -> VLEIG + VX + + // Integer multiplication + createMulReplacer(SystemZ::MS, SystemZ::VMEF, NoImmTy); + createMulReplacer(SystemZ::MSRKC, SystemZ::VMEF, NoImmTy); + createMulReplacer(SystemZ::MHI, SystemZ::VMEF, SE16); + createMulReplacer(SystemZ::MSFI, SystemZ::VMEF, SInt32); + + // Integer to FP conversions + createFPIntConvReplacer(SystemZ::CDGBR, SystemZ::WCDGB); + createFPIntConvReplacer(SystemZ::CDLGBR, SystemZ::WCDLGB); + if (STI->hasVectorEnhancements2()) { + createFPIntConvReplacer(SystemZ::CEFBR, SystemZ::WCEFB); + createFPIntConvReplacer(SystemZ::CELFBR, SystemZ::WCELFB); + } + + // FP to integer conversions + // TODO: Conversions with an extended/truncated result? + createFPIntConvReplacer(SystemZ::CGDBR, SystemZ::WCGDB); + createFPIntConvReplacer(SystemZ::CLGDBR, SystemZ::WCLGDB); + if (STI->hasVectorEnhancements2()) { + createFPIntConvReplacer(SystemZ::CFEBR, SystemZ::WCFEB); + createFPIntConvReplacer(SystemZ::CLFEBR, SystemZ::WCLFEB); + } + + // Comparisons / test-under-mask + createCompareTMReplacer(SystemZ::TMLMux, SystemZ::VTM); + createCompareTMReplacer(SystemZ::CHIMux, SystemZ::VTM); + createCompareTMReplacer(SystemZ::CGHI, SystemZ::VTM); + + ScalarInsertion = std::make_unique(); +} + +void SystemZDomainReassignment::buildClosure(Closure &C, Register Reg) { + SmallPtrSet SeenInstrs; + SmallVector Worklist; + MachineInstr *DefMI = MRI->getVRegDef(Reg); + assert(DefMI && "Expected a def of virt reg."); + Worklist.push_back(DefMI); + while (!Worklist.empty()) { + MachineInstr *CurrMI = Worklist.pop_back_val(); + if (!SeenInstrs.insert(CurrMI).second) + continue; + + // Check if CurrMI can be converted. + const std::unique_ptr& Converter = + findConverter(CurrMI->getOpcode()); + if (Converter == ScalarInsertion) { + LLVM_DEBUG(dumpEnclosureMsg("no converter", CurrMI);); + C.NonConvertible.insert(CurrMI); // These will be revisited below. + } else if (Converter->isLegal(CurrMI, TII, MRI)) { + C.Instrs.insert(CurrMI); + } else { + C.Legal = false; + continue; + } + + // Add defining instructions of use-operands to Worklist. + if (Converter != ScalarInsertion) + for (unsigned OpIdx = 0; OpIdx < CurrMI->getNumExplicitOperands(); + ++OpIdx) { + auto &Op = CurrMI->getOperand(OpIdx); + if (Op.isReg() && Op.isUse() && Op.getReg() && + isGPRDomain(Op.getReg())) { + MachineInstr *UseDefMI = MRI->getVRegDef(Op.getReg()); + assert(UseDefMI && "Expected a def of virt reg."); + Worklist.push_back(UseDefMI); + } + assert((!Op.isReg() || !Op.getReg() || + Register::isVirtualRegister(Op.getReg()) || CurrMI->isCopy()) && + "Expected only a COPY to use/define a phys-reg explicitly."); + } + + // If CurrMI defines a register, insert it into closure and add users to + // Worklist. + if (Register DefReg = getDefedGPRReg(CurrMI, MRI)) { + C.Edges.insert(DefReg); + for (auto &UseMI : MRI->use_nodbg_instructions(DefReg)) + if (findConverter(UseMI.getOpcode()) != ScalarInsertion) + Worklist.push_back(&UseMI); + else + C.Legal = false; + } + } + + if (C.Legal && C.NonConvertible.size()) { + // Revisit unconvertible instructions and check if they can all be + // inserted. + ScalarInserter ScInserter; + for (MachineInstr *MI : C.NonConvertible) + if (!ScInserter.isLegalInsertion(MI, TII, MRI, C.Instrs)) { + C.Legal = false; + break; + } + + if (C.Legal) + for (MachineInstr *MI : C.NonConvertible) { + LLVM_DEBUG(dumpEnclosureMsg("inserting ", MI);); + Register DefReg = MI->getOperand(0).getReg(); + C.Instrs.insert(MI); + C.Edges.insert(DefReg); + } + } + + if (!C.Legal) { + LLVM_DEBUG(dbgs() << "--- Invalidated Closure:\n"; + C.dump(MRI);); + return; + } + + /// Find the vector lane to use for each reassigned register. + // Initialize each register to live in any lane. + for (Register Reg : C.Edges) + C.Lanes[Reg] = is64BitReg(Reg, MRI) ? (G0 | G1) : (F0 | F1 | F2 | F3); + // First iteration finds all possible lanes for each register. + LLVM_DEBUG(dbgs() << "--- Constraining vector lanes:\n";); + findVectorLanes(C, false/*ToFinal*/, MRI); + // Second iteration decides on the lane to use for each register. + if (C.Legal) { + LLVM_DEBUG(dbgs() << "--- Selecting vector lanes:\n";); + findVectorLanes(C, true/*ToFinal*/, MRI); + } + + LLVM_DEBUG(if (!C.Legal) { + dbgs() << "--- Invalidated Closure (lanes):\n"; + C.dump(MRI);}); +} + +void SystemZDomainReassignment::findVectorLanes(Closure &C, bool ToFinal, + const MachineRegisterInfo *MRI) { + bool Change = true; + while (Change) { + Change = false; + for (MachineInstr *MI : C.Instrs) { + std::list WList; + WList.push_back(MI); + while (!WList.empty()) { + MachineInstr *CurrMI = WList.front(); + WList.pop_front(); + assert(C.Instrs.count(CurrMI) && "Expected MI inside Closure."); + + if (findConverter(CurrMI->getOpcode()) + ->findLanes(CurrMI, C.Lanes, WList, ToFinal, MRI)) + Change = true; + + // Check that all operands has at least one possible lane. + for (auto &Op : CurrMI->explicit_operands()) + if (Op.isReg() && C.Lanes.find(Op.getReg()) != C.Lanes.end() && + !C.Lanes[Op.getReg()]) { + C.Legal = false; + assert(!ToFinal && "Could not select lane."); + LLVM_DEBUG(dbgs() << "No lanes:"; + CurrMI->dump();); + return; + } + } + } + } +} + +void SystemZDomainReassignment::reassign(Closure &C) const { + assert(C.Legal && "Cannot convert illegal closure"); + + // Iterate all instructions in the closure, convert each one using the + // appropriate converter. + SmallVector ToErase; + for (auto *MI : C.Instrs) + if (findConverter(MI->getOpcode())->convertInstr(MI, TII, MRI, C.Lanes)) + ToErase.push_back(MI); + + // Iterate all registers in the closure, replace them with registers in the + // destination domain. + for (Register Reg : C.edges()) { + for (auto &MO : MRI->use_operands(Reg)) { + if (!MO.getSubReg()) + continue; + + // Allow COPY of subreg if it was inserted during conversion, for + // instance with a WCDGB that uses the subreg_h64 of the vector reg. + // These combinations of RC/SubReg should only occur as part of such + // a converion. + const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg()); + unsigned SRIdx = MO.getSubReg(); + if (MO.getParent()->isCopy() && + ((RC == &SystemZ::GR64BitRegClass && SRIdx == SystemZ::subreg_h64) || + (RC == &SystemZ::GR32BitRegClass && SRIdx == SystemZ::subreg_h32))) + continue; + + // Remove all subregister references as they are not valid in the + // destination domain. + MO.setSubReg(0); + } + assert(isGPRDomain(Reg) && "Expected all regs in closure to be GPRs."); + MRI->setRegClass(Reg, &SystemZ::VR128BitRegClass); + } + + for (auto MI : ToErase) + MI->eraseFromParent(); +} + +bool SystemZDomainReassignment:: +isReassignmentProfitable(const Closure &C) { + assert(C.Legal && "Cannot calculate cost for illegal closure"); + double ConversionCost = 0.0; + for (auto *MI : C.Instrs) + ConversionCost += findConverter(MI->getOpcode())->getExtraCost(MI, MRI); + + LLVM_DEBUG(if (C.NumCalls) + dbgs() << "Calls inside closure: " << C.NumCalls << "\n";); + double TotalCost = ConversionCost + C.NumCalls * 2; + LLVM_DEBUG(dbgs() << "Total extra cost: " << TotalCost << "\n\n"); + + // An interveining call would force any assigned vector registers to be + // spilled around it. + if (C.NumCalls) + return false; + + return true; + return TotalCost < 0.0; +} + +bool SystemZDomainReassignment::runOnMachineFunction(MachineFunction &MF) { + bool Changed = false; + STI = &MF.getSubtarget(); + TII = static_cast(STI->getInstrInfo()); + MRI = &MF.getRegInfo(); + MDT = &getAnalysis(); + + if (skipFunction(MF.getFunction()) || !STI->hasVector() || DisableDomReass) + return false; + assert(MRI->isSSA() && "Expected MIR to be in SSA form"); + + VirtRegLiveness VRLiveness; + VRLiveness.compute_and_setkills(MRI, MDT, &MF); + if (DumpVRLiveness) + VRLiveness.dumpMF(&MF); + + LLVM_DEBUG( + dbgs() << "***** Machine Function before Domain Reassignment *****\n"); + LLVM_DEBUG(MF.print(dbgs())); + + // All edges that are included in some closure + DenseSet EnclosedEdges; +#ifndef NDEBUG + std::set EnclosedInstrs; +#endif + + std::vector Closures; // All legal closures found. + std::map Reg2ClosureIdx; // Regs in legal (saved) closures. + initConverters(); + + // Go over all virtual registers and calculate a closure. + for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) { + Register Reg = Register::index2VirtReg(Idx); + + // Skip uninteresting regs. + if (MRI->reg_nodbg_empty(Reg) || EnclosedEdges.count(Reg) || + !isGPRDomain(Reg)) + continue; + + // Calculate closure starting with Reg. + LLVM_DEBUG(dbgs() << "--- Calculating closure beginning with " + << "virtual register %" << Idx << ". ---\n";); + Closure C; + buildClosure(C, Reg); + + // Keep track of all enclosed edges and instructions. + for (Register E : C.edges()) { + assert(!EnclosedEdges.count(E) && "Edge already in other closure."); + EnclosedEdges.insert(E); + } +#ifndef NDEBUG + for (MachineInstr *MI : C.Instrs) { + assert(!EnclosedInstrs.count(MI) && "Instruction already in other closure."); + EnclosedInstrs.insert(MI); + } +#endif + + // Collect all closures that can potentially be converted. + if (!C.empty() && C.Legal) { + LLVM_DEBUG(dbgs() << "--- Legal closure found:\n";); + LLVM_DEBUG(C.dump(MRI)); + Closures.push_back(std::move(C)); + unsigned Idx = Closures.size() - 1; + for (Register E : Closures.back().edges()) + Reg2ClosureIdx[E] = Idx; + } + } + + // Pre-scan function. + for (auto &MBB : MF) { + LiveClosuresTracker LCT(Closures, VRLiveness, Reg2ClosureIdx); + LCT.enterMBB(&MBB); + for (const MachineInstr &MI : MBB) { + LCT.advance(&MI); + LCT.preScan(&MI); + } + } + + // Decide on which legal closures to reassign. + for (Closure &C : Closures) { + if (!isReassignmentProfitable(C)) { + LLVM_DEBUG(dbgs() << "--- Closure not profitable. ---\n";); + C.Profitable = false; + } + } + + // Reassign profitable closures. + for (Closure &C : Closures) { + if (C.Profitable) { + LLVM_DEBUG(dbgs() << "--- Reassigning closure: ---\n";); + LLVM_DEBUG(C.dump(MRI)); + reassign(C); + LLVM_DEBUG(dbgs() << "--- Closure reassigned. ---\n";); + Changed = true; + } + } + + LLVM_DEBUG( + dbgs() << "\n***** Machine Function after Domain Reassignment *****\n"); + LLVM_DEBUG(MF.print(dbgs())); + + // Todo: make this optional or remove? + MF.verify(this, "After SystemZ Domain reassignment."); + + return Changed; +} + + +// Findings: +// - fp2int conversion + store seems to be done in GPRs +// - LA seemed potentially interesting, but seems mostly used with compares. + +// - Not sure X86 is iterating correctly over the "use operands of the users" + +// - Overflow implications? Index: llvm/lib/Target/SystemZ/SystemZInstrInfo.h =================================================================== --- llvm/lib/Target/SystemZ/SystemZInstrInfo.h +++ llvm/lib/Target/SystemZ/SystemZInstrInfo.h @@ -334,6 +334,23 @@ // handled. The compare instruction is *not* changed. bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const; + // Returns a reference to the MachineOperand of MI containing the CC mask. + MachineOperand& getCCMaskMO(MachineInstr *MI) const; + + // Same function but for const MachineInstrs. + const MachineOperand& getCCMaskMO(const MachineInstr *MI) const { + return getCCMaskMO(const_cast(MI)); + } + + // Find the users of CC defined by MBBI and put them in CCUsers. Return + // false if analysis failed (for instance in case of a live out CC). + bool findCCUsers(MachineBasicBlock::iterator MBBI, + SmallVector &CCUsers) const; + + // Same function but for const MachineInstrs. + bool findCCUsers(const MachineInstr *MI, + SmallVector &CCUsers) const; + // If Opcode is a LOAD opcode for with an associated LOAD AND TRAP // operation exists, returh the opcode for the latter, otherwise return 0. unsigned getLoadAndTrap(unsigned Opcode) const; Index: llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -1828,9 +1828,35 @@ MBBI->getOperand(1).isReg() && !MBBI->mayLoad() && "Not a compare reg/reg."); + SmallVector CCUsers; + if (!findCCUsers(MBBI, CCUsers)) + return false; + + // Update all CC users. + for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { + MachineOperand &CCMaskMO = getCCMaskMO(CCUsers[Idx]); + unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm()); + CCMaskMO.setImm(NewCCMask); + } + + return true; +} + +MachineOperand& SystemZInstrInfo::getCCMaskMO(MachineInstr *MI) const { + assert(MI->readsRegister(SystemZ::CC) && "Expected CC use"); + unsigned Flags = MI->getDesc().TSFlags; + unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ? + 0 : MI->getNumExplicitOperands() - 2); + MachineOperand &CCMaskMO = MI->getOperand(FirstOpNum + 1); + return CCMaskMO; +} + +bool SystemZInstrInfo:: +findCCUsers(MachineBasicBlock::iterator MBBI, + SmallVector &CCUsers) const { + assert(MBBI->definesRegister(SystemZ::CC) && "Expected CC def"); MachineBasicBlock *MBB = MBBI->getParent(); bool CCLive = true; - SmallVector CCUsers; for (MachineBasicBlock::iterator Itr = std::next(MBBI); Itr != MBB->end(); ++Itr) { if (Itr->readsRegister(SystemZ::CC)) { @@ -1851,17 +1877,17 @@ if (LiveRegs.contains(SystemZ::CC)) return false; } + return true; +} - // Update all CC users. - for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) { - unsigned Flags = CCUsers[Idx]->getDesc().TSFlags; - unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ? - 0 : CCUsers[Idx]->getNumExplicitOperands() - 2); - MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1); - unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm()); - CCMaskMO.setImm(NewCCMask); - } - +bool SystemZInstrInfo:: +findCCUsers(const MachineInstr *MI, + SmallVector &CCUsers) const { + SmallVector CCUsers_tmp; + if (!findCCUsers(const_cast(MI), CCUsers_tmp)) + return false; + for (auto I : CCUsers_tmp) + CCUsers.push_back(I); return true; } Index: llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp +++ llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp @@ -253,6 +253,17 @@ } void SystemZPassConfig::addPreRegAlloc() { + if (getOptLevel() != CodeGenOpt::None) { + addPass(createSystemZDomainReassignmentPass(getSystemZTargetMachine())); + // Hoist immediate loads out of loops. + addPass(&EarlyMachineLICMID); + + // Unfortunately none of these passes can remove identical immediate load + // instructions + // addPass(&MachineCSEID); + // addPass(&MachineSinkingID); + // addPass(&DeadMachineInstructionElimID); + } addPass(createSystemZCopyPhysRegsPass(getSystemZTargetMachine())); } Index: llvm/test/CodeGen/SystemZ/buildvector-00.ll =================================================================== --- llvm/test/CodeGen/SystemZ/buildvector-00.ll +++ llvm/test/CodeGen/SystemZ/buildvector-00.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -disable-domreass | FileCheck %s ; Test that the dag combiner can understand that some vector operands are ; all-zeros and then optimize the logical operations. Index: llvm/test/CodeGen/SystemZ/dag-combine-01.ll =================================================================== --- llvm/test/CodeGen/SystemZ/dag-combine-01.ll +++ llvm/test/CodeGen/SystemZ/dag-combine-01.ll @@ -2,7 +2,7 @@ ; incorrectly drop a chain dependency to a store previously chained to ; one of two combined loads. ; -; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -disable-domreass | FileCheck %s @A = common global [2048 x float] zeroinitializer, align 4 Index: llvm/test/CodeGen/SystemZ/dag-combine-03.ll =================================================================== --- llvm/test/CodeGen/SystemZ/dag-combine-03.ll +++ llvm/test/CodeGen/SystemZ/dag-combine-03.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 < %s | FileCheck %s +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -disable-domreass < %s | FileCheck %s ; Test that DAGCombiner gets helped by getKnownBitsForTargetNode() when ; BITCAST nodes are involved on a big-endian target. Index: llvm/test/CodeGen/SystemZ/domain-reassignment-01.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-01.ll @@ -0,0 +1,238 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments of loads and stores. + +define void @fun0(i64* %Src, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VSTEG {{.*}}noreg, 0 + %Val = load i64, i64* %Src + store i64 %Val, i64* %Dst + ret void +} + +define void @fun1(i64* %Src, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: + %Ptr = getelementptr i64, i64* %Src, i64 1000 + %Val = load i64, i64* %Ptr + store i64 %Val, i64* %Dst + ret void +} + +define void @fun2(i64* %Src, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: + %Val = load i64, i64* %Src + %Ptr = getelementptr i64, i64* %Dst, i64 1000 + store i64 %Val, i64* %Ptr + ret void +} + +define i64 @fun3(i64* %Src, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: --- physreg +; CHECK: Invalidated Closure: + %Val = load i64, i64* %Src + ret i64 %Val +} + +define void @fun4(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VSTEF {{.*}}noreg, 0 + %Val = load i32, i32* %Src + store i32 %Val, i32* %Dst + ret void +} + +define void @fun5(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: + %Ptr = getelementptr i32, i32* %Src, i32 2000 + %Val = load i32, i32* %Ptr + store i32 %Val, i32* %Dst + ret void +} + +define void @fun6(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: + %Val = load i32, i32* %Src + %Ptr = getelementptr i32, i32* %Dst, i32 2000 + store i32 %Val, i32* %Ptr + ret void +} + +define i32 @fun7(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: --- physreg +; CHECK: Invalidated Closure: + %Val = load i32, i32* %Src + ret i32 %Val +} + +;;; Truncating stores + +define void @fun8(i32* %Src, i16* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: VSTEH killed [[REG0]]{{.*}}noreg, 1 + %Val = load i32, i32* %Src + %Res = trunc i32 %Val to i16 + store i16 %Res, i16* %Dst + ret void +} + +define void @fun9(i32* %Src, i8* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: VSTEB killed [[REG0]]{{.*}}noreg, 3 + %Val = load i32, i32* %Src + %Res = trunc i32 %Val to i8 + store i8 %Res, i8* %Dst + ret void +} + +define void @fun10(i64* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun10: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = COPY killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 1 + %Val = load i64, i64* %Src + %Res = trunc i64 %Val to i32 + store i32 %Res, i32* %Dst + ret void +} + +define void @fun11(i64* %Src, i16* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = COPY killed [[REG0]] +; CHECK-NEXT: VSTEH killed [[REG1]]{{.*}}noreg, 3 + %Val = load i64, i64* %Src + %Res = trunc i64 %Val to i16 + store i16 %Res, i16* %Dst + ret void +} + +define void @fun12(i64* %Src, i8* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = COPY killed [[REG0]] +; CHECK-NEXT: VSTEB killed [[REG1]]{{.*}}noreg, 7 + %Val = load i64, i64* %Src + %Res = trunc i64 %Val to i8 + store i8 %Res, i8* %Dst + ret void +} + +;;; sign extending loads (allows just one unpack) + +; LGF +define void @fun13(i32* %Src, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun13: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VUPHF [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %Val = load i32, i32* %Src + %Res = sext i32 %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; LHMux +define void @fun14(i16* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEH {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VUPHH [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 0 + %Val = load i16, i16* %Src + %Res = sext i16 %Val to i32 + store i32 %Res, i32* %Dst + ret void +} + +;;; zero exteding loads (VLLEZ) + +; LLGF +define void @fun15(i32* %Src, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLLEZF +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}}noreg, 0 + %Val = load i32, i32* %Src + %Res = zext i32 %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; LLGH +define void @fun16(i16* %Src, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun16: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLLEZH +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}}noreg, 0 + %Val = load i16, i16* %Src + %Res = zext i16 %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; LLGC +define void @fun17(i8* %Src, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun17: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLLEZB +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}}noreg, 0 + %Val = load i8, i8* %Src + %Res = zext i8 %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; LLHMux +define void @fun18(i16* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun18: IsSSA, TracksLiveness +; CHECK: [[REG:%[0-9]+]]:vr128bit = VLLEZH +; CHECK-NEXT: VSTEF killed [[REG]]{{.*}}noreg, 1 + %Val = load i16, i16* %Src + %Res = zext i16 %Val to i32 + store i32 %Res, i32* %Dst + ret void +} + +; LLCMux +define void @fun19(i8* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun19: IsSSA, TracksLiveness +; CHECK: [[REG:%[0-9]+]]:vr128bit = VLLEZB +; CHECK-NEXT: VSTEF killed [[REG]]{{.*}}noreg, 1 + %Val = load i8, i8* %Src + %Res = zext i8 %Val to i32 + store i32 %Res, i32* %Dst + ret void +} + Index: llvm/test/CodeGen/SystemZ/domain-reassignment-02.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-02.ll @@ -0,0 +1,704 @@ + +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments for arithmetic instructions + +; AG, AGRK +define void @fun0(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VAG +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %C = load volatile i64, i64* %Src2 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = add i64 %Sum, %C + store i64 %Res, i64* %Dst + ret void +} + +define void @fun1(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness + %Ptr = getelementptr i64, i64* %Src0, i64 1000 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Ptr + %Sum = add i64 %LHS, %RHS + store i64 %Sum, i64* %Dst + ret void +} + +; A, ARK +define void @fun2(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VAF +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = add i32 %Sum, %C + store i32 %Res, i32* %Dst + ret void +} + +define void @fun3(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: --- no converter: {{.*}}AY +; CHECK: Invalidated Closure: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness + %Ptr = getelementptr i32, i32* %Src0, i32 2000 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Ptr + %Sum = add i32 %LHS, %RHS + store i32 %Sum, i32* %Dst + ret void +} + +; SG, SGRK +define void @fun4(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VSG +; CHECK: VSG +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %C = load volatile i64, i64* %Src2 + %RHS = load i64, i64* %Src1 + %Dff = sub i64 %LHS, %RHS + %Res = sub i64 %Dff, %C + store i64 %Res, i64* %Dst + ret void +} + +define void @fun5(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: --- offset +; CHECK: Invalidated Closure: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness + %Ptr = getelementptr i64, i64* %Src0, i64 1000 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Ptr + %Dff = sub i64 %LHS, %RHS + store i64 %Dff, i64* %Dst + ret void +} + +; S, SRK +define void @fun6(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VSF +; CHECK: VSF +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Dff = sub i32 %LHS, %RHS + %Res = sub i32 %Dff, %C + store i32 %Res, i32* %Dst + ret void +} + +define void @fun7(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: --- no converter: {{.*}}SY +; CHECK: Invalidated Closure: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness + %Ptr = getelementptr i32, i32* %Src0, i32 2000 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Ptr + %Dff = sub i32 %LHS, %RHS + store i32 %Dff, i32* %Dst + ret void +} + +; MS; MSRKC +define void @fun8(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 1 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VMEF killed [[REG0]], [[REG2]] +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VMOF killed [[REG3]], killed [[REG1]] +; CHECK: VSTEF killed [[REG4]]{{.*}}noreg, 1 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Prd = mul i32 %LHS, %RHS + %Res = mul i32 %Prd, %C + store i32 %Res, i32* %Dst + ret void +} + +define void @fun9(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK: --- no converter: {{.*}}MSY +; CHECK: Invalidated Closure: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun9: IsSSA, TracksLiveness + %Ptr = getelementptr i32, i32* %Src0, i32 2000 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Ptr + %Prd = mul i32 %LHS, %RHS + store i32 %Prd, i32* %Dst + ret void +} + +; AGHIK +define void @fun10(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun10: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VSG +; CHECK: VLEIG {{.*}}-16, 0 +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Sum = sub i64 %LHS, %RHS + %Res = add i64 %Sum, -16 + store i64 %Res, i64* %Dst + ret void +} + +; AHIMuxK +define void @fun11(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VLEIF {{.*}}-16, 0 +; CHECK: VAF +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = add i32 %Sum, -16 + store i32 %Res, i32* %Dst + ret void +} + +; AFIMux +define void @fun11b(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun11b: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VLEIH {{.*}} -17297, 1 +; CHECK: VLEIH {{.*}} 1, 0 +; CHECK: VAF +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = add i32 %Sum, 113775 + store i32 %Res, i32* %Dst + ret void +} + +; N +define void @fun12(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VN +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Res = and i32 %LHS, %RHS + store i32 %Res, i32* %Dst + ret void +} + +; NG +define void @fun13(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun13: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VN +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Res = and i64 %LHS, %RHS + store i64 %Res, i64* %Dst + ret void +} + +; O +define void @fun14(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VO +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Res = or i32 %LHS, %RHS + store i32 %Res, i32* %Dst + ret void +} + +; OG +define void @fun15(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VO +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Res = or i64 %LHS, %RHS + store i64 %Res, i64* %Dst + ret void +} + +; X +define void @fun16(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun16: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VX +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Res = xor i32 %LHS, %RHS + store i32 %Res, i32* %Dst + ret void +} + +; XG +define void @fun17(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun17: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VX +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Res = xor i64 %LHS, %RHS + store i64 %Res, i64* %Dst + ret void +} + +; OILMux +define void @fun18(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun18: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VLEIF {{.*}} 1, 0 +; CHECK: VO +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = or i32 %Sum, 1 + store i32 %Res, i32* %Dst + ret void +} + +define void @fun19(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun19: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VLEIH {{.*}} -32768, 1 +; CHECK: VLEIH {{.*}} 0, 0 +; CHECK: VO +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = or i32 %Sum, 32768 + store i32 %Res, i32* %Dst + ret void +} + +; OILL64 +define void @fun20(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun20: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VLEIG {{.*}}, 1, 0 +; CHECK: VO +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = or i64 %Sum, 1 + store i64 %Res, i64* %Dst + ret void +} + +; Immediate load with VLEIG 0 (Hi16 == 0, Lo16:15 == 1) +define void @fun20_b(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun20_b: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VLEIG {{.*}}, 0, 0 +; CHECK: VLEIH {{.*}}, -32752, 3 +; CHECK: VO +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = or i64 %Sum, 32784 + store i64 %Res, i64* %Dst + ret void +} + +; NILMux +define void @fun21(i32* %Src0, i32* %Dst, i32* %Dst1) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun21: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK-NEXT: VSTEF +; CHECK: VLEIF {{.*}} -3, 0 +; CHECK: VN +; CHECK: VSTEF {{.*}}noreg, 0 + %i = load i32, i32* %Src0 + store i32 %i, i32* %Dst1 + %i5 = and i32 %i, -3 + store i32 %i5, i32* %Dst + ret void +} + +define void @fun22(i32* %Src0, i32* %Dst, i32* %Dst1) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun22: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK-NEXT: VSTEF +; CHECK: VLEIH {{.*}} 16, 1 +; CHECK: VLEIH {{.*}} -1, 0 +; CHECK: VN +; CHECK: VSTEF {{.*}}noreg, 0 + %i = load i32, i32* %Src0 + store i32 %i, i32* %Dst1 + %i5 = and i32 %i, -65520 + store i32 %i5, i32* %Dst + ret void +} + +; NILL64 +define void @fun23(i64* %Src0, i64* %Dst, i64* %Dst1) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun23: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK-NEXT: VSTEG +; CHECK: VLEIG {{.*}} -3, 0 +; CHECK: VN +; CHECK: VSTEG {{.*}}noreg, 0 + %i = load i64, i64* %Src0 + store i64 %i, i64* %Dst1 + %i5 = and i64 %i, -3 + store i64 %i5, i64* %Dst + ret void +} + +; Immediate load with VLEIG -1 (Hi16 == -1, Lo16:15 == 0) +define void @fun24(i64* %Src0, i64* %Dst, i64* %Dst1) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun24: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK-NEXT: VSTEG +; CHECK: VLEIG {{.*}} -1, 0 +; CHECK: VLEIH {{.*}} 16, 3 +; CHECK: VN +; CHECK: VSTEG {{.*}}noreg, 0 + %i = load i64, i64* %Src0 + store i64 %i, i64* %Dst1 + %i5 = and i64 %i, -65520 + store i64 %i5, i64* %Dst + ret void +} + +; NIFMux +define void @fun25(i16* %Src, i16* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun25: IsSSA, TracksLiveness +; CHECK: VLLEZH +; CHECK: VLEIF {{.*}} 1, 1 +; CHECK: VN +; CHECK: VSTEH {{.*}}noreg, 3 + %i = load i16, i16* %Src, align 2 + %i2 = and i16 %i, 1 + store i16 %i2, i16* %Dst + ret void +} + +define void @fun26(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun26: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEIH {{.*}} -256, 1 +; CHECK: VLEIH {{.*}} 0, 0 +; CHECK: VN +; CHECK: VSTEF {{.*}}noreg, 0 +bb: + %i = load i32, i32* %Src + br label %bb1 + +bb1: + %i2 = and i32 %i, 65280 + store i32 %i2, i32* %Dst + ret void +} + +; ORK +define void @fun27(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun27: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VO +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = or i32 %Sum, %C + store i32 %Res, i32* %Dst + ret void +} + +; NRK +define void @fun28(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun28: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VN +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = and i32 %Sum, %C + store i32 %Res, i32* %Dst + ret void +} + +; XRK +define void @fun028(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun028: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VX +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = xor i32 %Sum, %C + store i32 %Res, i32* %Dst + ret void +} + +; XIFMux +define void @fun29(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun29: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: VLEIF {{.*}}, -1, 0 +; CHECK: VX +; CHECK: VSTEF {{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %C = load volatile i32, i32* %Src2 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = xor i32 %Sum, -1 + store i32 %Res, i32* %Dst + ret void +} + +; NGRK +define void @fun30(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun30: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VN +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %C = load volatile i64, i64* %Src2 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = and i64 %Sum, %C + store i64 %Res, i64* %Dst + ret void +} + +; OGRK +define void @fun31(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun31: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VO +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %C = load volatile i64, i64* %Src2 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = or i64 %Sum, %C + store i64 %Res, i64* %Dst + ret void +} + +; XGRK +define void @fun32(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun32: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VX +; CHECK: VSTEG {{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %C = load volatile i64, i64* %Src2 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %Res = xor i64 %Sum, %C + store i64 %Res, i64* %Dst + ret void +} + +; MHI -> VMEF +define void @fun33(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun33: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VAF killed [[REG0]], [[REG1]] +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VLEIF {{.*}}-3, 0 +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VMEF killed [[REG2]], [[REG3]] +; CHECK: VSTEF killed [[REG4]]{{.*}}noreg, 1 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = mul i32 %Sum, -3 + store i32 %Res, i32* %Dst + ret void +} + +; MHI -> VMOF +define void @fun34(i64* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun34: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = COPY killed [[REG0]] +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 1 +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VAF killed [[REG1]], [[REG2]] +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLEIF {{.*}}-3, 1 +; CHECK: [[REG5:%[0-9]+:vr128bit]] = VMOF killed [[REG3]], [[REG4]] +; CHECK: VSTEF killed [[REG5]]{{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + + %RHS = load i32, i32* %Src1 + %Sum = add i32 %T0, %RHS + %Res = mul i32 %Sum, -3 + store i32 %Res, i32* %Dst + ret void +} + +; MSFI -> VMEF +define void @fun35(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun35: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 0 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VAF killed [[REG0]], [[REG1]] +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VLEIH {{.*}}vr128bit(tied-def 0), -1, 1 +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLEIH [[REG3]](tied-def 0), 15, 0 +; CHECK: [[REG5:%[0-9]+:vr128bit]] = VMEF killed [[REG2]], [[REG4]] +; CHECK: VSTEF killed [[REG5]]{{.*}}noreg, 1 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %Res = mul i32 %Sum, 1048575 + store i32 %Res, i32* %Dst + ret void +} + +; MSFI -> VMOF +define void @fun36(i64* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun36: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = COPY killed [[REG0]] +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 1 +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VAF killed [[REG1]], [[REG2]] +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLEIH {{.*}}vr128bit(tied-def 0), -7616, 3 +; CHECK: [[REG5:%[0-9]+:vr128bit]] = VLEIH [[REG4]](tied-def 0), 1, 2 +; CHECK: [[REG6:%[0-9]+:vr128bit]] = VMOF killed [[REG3]], [[REG5]] +; CHECK: VSTEF killed [[REG6]]{{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + + %RHS = load i32, i32* %Src1 + %Sum = add i32 %T0, %RHS + %Res = mul i32 %Sum, 123456 + store i32 %Res, i32* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-03.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-03.ll @@ -0,0 +1,75 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments for register extensions (one unpack only). + +; LGFR +define void @fun0(i32* %Src1, i32* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG0:%[0-9]+]]:vr128bit = VAF +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VUPHF killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %LHS = load i32, i32* %Src1 + %RHS = load i32, i32* %Src2 + %Sum = add i32 %LHS, %RHS + %ext = sext i32 %Sum to i64 + store i64 %ext, i64* %Dst + ret void +} + +; LHR +define void @fun1(i32* %Src, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VUPHH killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 1 + %L = load volatile i32, i32* %Src + %T = trunc i32 %L to i16 + %ext = sext i16 %T to i32 + store i32 %ext, i32* %Dst + ret void +} + +; LLGFR +define void @fun2(i32* %Src1, i32* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG0:%[0-9]+]]:vr128bit = VAF +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VUPLHF killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %LHS = load i32, i32* %Src1 + %RHS = load i32, i32* %Src2 + %Sum = add i32 %LHS, %RHS + %ext = zext i32 %Sum to i64 + store i64 %ext, i64* %Dst + ret void +} + +; LLHRMux +define void @fun3(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VAF +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}-16, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VAF {{.*}} [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr128bit = VUPLHH killed [[REG1]] +; CHECK-NEXT: VSTEF killed [[REG2]]{{.*}}noreg, 1 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %AddI = add i32 %Sum, -16 + %T = trunc i32 %AddI to i16 + %ext = zext i16 %T to i32 + store i32 %ext, i32* %Dst + ret void +} + Index: llvm/test/CodeGen/SystemZ/domain-reassignment-04.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-04.ll @@ -0,0 +1,215 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments of immediate loads. + +; 16 bits signed extended to 64 bits: LGHI +define void @fun0(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, 0, 0 +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 0, i64* %tmp1 + ret void +} + +define void @fun1(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, 32767, 0 +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 32767, i64* %tmp1 + ret void +} + +define void @fun2(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, -32768, 0 +; CHECK-NEXT: VSTEG killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 -32768, i64* %tmp1 + ret void +} + +; 16 bits signed extended to 32 bits: LHIMux +define void @fun3(i32* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, 0, 0 +; CHECK-NEXT: VSTEF killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i32* + store i32 0, i32* %tmp1 + ret void +} + +define void @fun4(i32* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, 32767, 0 +; CHECK-NEXT: VSTEF killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i32* + store i32 32767, i32* %tmp1 + ret void +} + +define void @fun5(i32* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, -32768, 0 +; CHECK-NEXT: VSTEF killed [[REG0]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i32* + store i32 -32768, i32* %tmp1 + ret void +} + +; 32 bits signed extended to 64 bits +; High32=0, Hi16=0, Lo16:b15=1 +define void @fun6(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, 0, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, -32768, 3 +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 32768, i64* %tmp1 + ret void +} + +; High32=0, Hi16=2047, Lo16=0 +define void @fun7(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, 0, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, 2047, 2 +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 134152192, i64* %tmp1 + ret void +} + +; High32=0, Hi16=2047, Lo16:b15=0 +define void @fun8(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, 16, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, 2047, 2 +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 134152208, i64* %tmp1 + ret void +} + +; High32=-1, Hi16=-1, Lo16:b15=0 +define void @fun9(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIG {{.*}}, -1, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, 32767, 3 +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 -32769, i64* %tmp1 + ret void +} + +; High32=-1, Hi16=1, Lo16=-1 +define void @fun10(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun10: IsSSA, TracksLiveness +; CHECK: no converter:{{.*}}LLIHF + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 -4294836225, i64* %tmp1 + ret void +} + +; High32=-1, Hi16=1, Lo16:b15=1 +define void @fun11(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK: no converter:{{.*}}LLIHF + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 -4294868992, i64* %tmp1 + ret void +} + +; High32=0, Hi16=1, Lo16:b15=1 +define void @fun12(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, 0, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, -32768, 3 +; CHECK: [[REG2:%[0-9]+]]:vr128bit = VLEIH [[REG1]]{{.*}}, 1, 2 +; CHECK-NEXT: VSTEG killed [[REG2]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 98304, i64* %tmp1 + ret void +} + +; High32=-1, Hi16=1, Lo16:b15=0 +define void @fun13(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK: # Machine code for function fun13: IsSSA, TracksLiveness +; CHECK: no converter:{{.*}}LLIHF + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 -4294873088, i64* %tmp1 + ret void +} + +; 32 bits zero extended to 64 bits +; Hi16=-1 Lo16=-4 +define void @fun14(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, 0, 0 +; CHECK: [[REG1:%[0-9]+]]:vr128bit = VLEIF [[REG0]]{{.*}}, -4, 1 +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 4294967292, i64* %tmp1 + ret void +} + +; Hi16=-16 Lo16=-4 +define void @fun15(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIF {{.*}}, 0, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, -4, 3 +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr128bit = VLEIH [[REG1]]{{.*}}, -16, 2 +; CHECK-NEXT: VSTEG killed [[REG2]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i64* + store i64 4293984252, i64* %tmp1 + ret void +} + +; 32 bit immediate +; Hi16 = 16384, Lo16 = 0 +define void @fun16(i64* %Dst, i64 %Base, i64 %Idx) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun16: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEIH{{.*}}, 0, 1 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLEIH [[REG0]]{{.*}}, 16384, 0 +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}} 0,{{.*}}, 0 + %tmp = add nsw i64 %Base, %Idx + %tmp1 = inttoptr i64 %tmp to i32* + store i32 1073741824, i32* %tmp1 + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-05.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-05.ll @@ -0,0 +1,117 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments for logical instructions. + +; SLLK +define void @fun0(i32* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESLF killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 0 + %Val = load i32, i32* %Src0 + %Res = shl i32 %Val, 2 + store i32 %Res, i32* %Dst + ret void +} + +; SLLG +define void @fun1(i64* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESLG killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %Val = load i64, i64* %Src0 + %Res = shl i64 %Val, 2 + store i64 %Res, i64* %Dst + ret void +} + +; SRLK +define void @fun2(i32* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESRLF killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 0 + %Val = load i32, i32* %Src0 + %Res = lshr i32 %Val, 2 + store i32 %Res, i32* %Dst + ret void +} + +; SRLG +define void @fun3(i64* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESRLG killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %Val = load i64, i64* %Src0 + %Res = lshr i64 %Val, 2 + store i64 %Res, i64* %Dst + ret void +} + +; SRAK +define void @fun4(i32* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESRAF killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 0 + %Val = load i32, i32* %Src0 + %Res = ashr i32 %Val, 2 + store i32 %Res, i32* %Dst + ret void +} + +; SRAG +define void @fun5(i64* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VESRAG killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %Val = load i64, i64* %Src0 + %Res = ashr i64 %Val, 2 + store i64 %Res, i64* %Dst + ret void +} + +; LCGR +define void @fun6(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VAG +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLCG killed [[REG0]] +; CHECK-NEXT: VSTEG killed [[REG1]]{{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Val = add i64 %LHS, %RHS + %Res = sub i64 0, %Val + store i64 %Res, i64* %Dst + ret void +} + +; LCR +define void @fun7(i32* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 0 +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VAF +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VLCF killed [[REG0]] +; CHECK-NEXT: VSTEF killed [[REG1]]{{.*}}noreg, 0 + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Val = add i32 %LHS, %RHS + %Res = sub i32 0, %Val + store i32 %Res, i32* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-06.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-06.ll @@ -0,0 +1,158 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s --check-prefixes=CHECK,Z14 +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z15 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s --check-prefixes=CHECK,Z15 +; REQUIRES: asserts +; +; Test domain reassignments for fp <-> int conversions. + +; CDGBR +define void @fun0(i64* %Src0, double* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr64bit = COPY [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr64bit = WCDGB [[REG1]] +; CHECK-NEXT: [[REG3:%[0-9]+]]:fp64bit = COPY [[REG2]] +; CHECK-NEXT: VST64 killed [[REG3]] + %Val = load i64, i64* %Src0 + %Res = sitofp i64 %Val to double + store double %Res, double* %Dst + ret void +} + +; CEFBR +define void @fun1(i32* %Src0, float* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness + +; Z14: --- Invalidated Closure + +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness + +; Z15: [[REG0:%[0-9]+]]:vr128bit = VLEF{{.*}}noreg, 0 +; Z15-NEXT: [[REG1:%[0-9]+]]:vr32bit = COPY [[REG0]] +; Z15-NEXT: [[REG2:%[0-9]+]]:vr32bit = WCEFB [[REG1]] +; Z15-NEXT: [[REG3:%[0-9]+]]:fp32bit = COPY [[REG2]] +; Z15-NEXT: VST32 killed [[REG3]] + %Val = load i32, i32* %Src0 + %Res = sitofp i32 %Val to float + store float %Res, float* %Dst + ret void +} + +; CDLGBR +define void @fun2(i64* %Src0, double* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr64bit = COPY [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr64bit = WCDLGB [[REG1]] +; CHECK-NEXT: [[REG3:%[0-9]+]]:fp64bit = COPY [[REG2]] +; CHECK-NEXT: VST64 killed [[REG3]] + %Val = load i64, i64* %Src0 + %Res = uitofp i64 %Val to double + store double %Res, double* %Dst + ret void +} + +; CELFBR +define void @fun3(i32* %Src0, float* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness + +; Z14: --- Invalidated Closure + +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness + +; Z15: [[REG0:%[0-9]+]]:vr128bit = VLEF {{.*}}noreg, 0 +; Z15-NEXT: [[REG1:%[0-9]+]]:vr32bit = COPY [[REG0]] +; Z15-NEXT: [[REG2:%[0-9]+]]:vr32bit = WCELFB [[REG1]] +; Z15-NEXT: [[REG3:%[0-9]+]]:fp32bit = COPY [[REG2]] +; Z15-NEXT: VST32 killed [[REG3]] + %Val = load i32, i32* %Src0 + %Res = uitofp i32 %Val to float + store float %Res, float* %Dst + ret void +} + +; CGDBR +define void @fun4(double* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:fp64bit = VL64 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr64bit = COPY killed [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr64bit = WCGDB [[REG1]]:vr64bit, 0, 5 +; CHECK-NEXT: [[REG3:%[0-9]+]]:vr64bit = IMPLICIT_DEF +; CHECK-NEXT: [[REG4:%[0-9]+]]:vr128bit = INSERT_SUBREG [[REG3]]:vr64bit(tied-def 0), [[REG2]] +; CHECK-NEXT: VSTEG killed [[REG4]]{{.*}}noreg, 0 + %Val = load double, double* %Src0 + %Res = fptosi double %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; CFEBR +define void @fun5(float* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness + +; Z14: --- Invalidated Closure + +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness + +; Z15: [[REG0:%[0-9]+]]:fp32bit = VL32 +; Z15-NEXT: [[REG1:%[0-9]+]]:vr32bit = COPY killed [[REG0]] +; Z15-NEXT: [[REG2:%[0-9]+]]:vr32bit = WCFEB [[REG1]]:vr32bit, 0, 5 +; Z15-NEXT: [[REG3:%[0-9]+]]:vr32bit = IMPLICIT_DEF +; Z15-NEXT: [[REG4:%[0-9]+]]:vr128bit = INSERT_SUBREG [[REG3]]:vr32bit(tied-def 0), [[REG2]] +; Z15-NEXT: VSTEF killed [[REG4]]{{.*}}noreg, 0 + + %Val = load float, float* %Src0 + %Res = fptosi float %Val to i32 + store i32 %Res, i32* %Dst + ret void +} + +; CLGDBR +define void @fun6(double* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+]]:fp64bit = VL64 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr64bit = COPY killed [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr64bit = WCLGDB [[REG1]]:vr64bit, 0, 5 +; CHECK-NEXT: [[REG3:%[0-9]+]]:vr64bit = IMPLICIT_DEF +; CHECK-NEXT: [[REG4:%[0-9]+]]:vr128bit = INSERT_SUBREG [[REG3]]:vr64bit(tied-def 0), [[REG2]] +; CHECK-NEXT: VSTEG killed [[REG4]]{{.*}}noreg, 0 + %Val = load double, double* %Src0 + %Res = fptoui double %Val to i64 + store i64 %Res, i64* %Dst + ret void +} + +; CLFEBR +define void @fun7(float* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness + +; Z14: --- no converter: %3:gr32bit = nofpexcept CLFEBR +; Z14: --- Invalidated Closure + +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness + +; Z15: [[REG0:%[0-9]+]]:fp32bit = VL32 +; Z15-NEXT: [[REG1:%[0-9]+]]:vr32bit = COPY killed [[REG0]] +; Z15-NEXT: [[REG2:%[0-9]+]]:vr32bit = WCLFEB [[REG1]]:vr32bit, 0, 5 +; Z15-NEXT: [[REG3:%[0-9]+]]:vr32bit = IMPLICIT_DEF +; Z15-NEXT: [[REG4:%[0-9]+]]:vr128bit = INSERT_SUBREG [[REG3]]:vr32bit(tied-def 0), [[REG2]] +; Z15-NEXT: VSTEF killed [[REG4]]{{.*}}noreg, 0 + + %Val = load float, float* %Src0 + %Res = fptoui float %Val to i32 + store i32 %Res, i32* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-07.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-07.ll @@ -0,0 +1,711 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z15 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s --check-prefix=Z15 + +; REQUIRES: asserts +; +; Test domain reassignments that have special vector lane requirements. + +;; LGF in G1 / LHMux in F1/F3 missing + +; Truncate i64 -> i32 puts result in lane 1. +define void @fun0(i64* %Src0, i64* %Src1, i32* %Src2, i32* %Dst, i16* %Dst1, i8* %Dst2) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VAG +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: [[REG:%[0-9]+]]:vr128bit = VSF +; CHECK-NEXT: VSTEF [[REG]]{{.*}}noreg, 1 +; CHECK-NEXT: VSTEH [[REG]]{{.*}}noreg, 3 +; CHECK-NEXT: VSTEB killed [[REG]]{{.*}}noreg, 7 + %LHS = load i64, i64* %Src0 ; G0 + %RHS = load i64, i64* %Src1 + %Sum = add i64 %LHS, %RHS + %T = trunc i64 %Sum to i32 ; F1 + %RHS2 = load i32, i32* %Src2 + %Res = sub i32 %T, %RHS2 + store i32 %Res, i32* %Dst + %T2 = trunc i32 %Res to i16 + store i16 %T2, i16* %Dst1 + %T3 = trunc i16 %T2 to i8 + store i8 %T3, i8* %Dst2 + ret void +} + +; i32 sign extend puts result in lane 1. +define void @fun1(i64* %Src0, i64* %Src1, i32* %Src2, i64* %Dst, i64* %Dst2, + i32* %Dst3, i16* %Dst4, i8* %Dst5) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VUPHF +; CHECK-NEXT: VSTEG [[REG0]]{{.*}}noreg, 1 +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK: VSG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = VSF +; CHECK: VSTEF [[REG1]]{{.*}}noreg, 3 +; CHECK: VSTEH [[REG1]]{{.*}}noreg, 7 +; CHECK: VSTEB killed [[REG1]]{{.*}}noreg, 15 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + store i64 %S, i64* %Dst + + %L1 = load i64, i64* %Src1 ; G1 + %D = sub i64 %S, %L1 + store i64 %D, i64* %Dst2 + + %T2 = trunc i64 %L1 to i32 ; F3 + %L2 = load i32, i32* %Src2 + %D2 = sub i32 %T2, %L2 + store i32 %D2, i32* %Dst3 + %T3 = trunc i32 %D2 to i16 + store i16 %T3, i16* %Dst4 + %T4 = trunc i16 %T3 to i8 + store i8 %T4, i8* %Dst5 + + ret void +} + +; Extensions in G1 needs a Vector Unpack Low. +define void @fun2(i64* %Src0, i64* %Src1, i32* %Src2, i64* %Dst, i64* %Dst2, + i64* %Dst3, i32* %Dst4, i32* %Dst5) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK: [[REG0:%[0-9]+]]:vr128bit = VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = COPY killed [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr128bit = VUPLF [[REG1]] +; CHECK-NEXT: VSTEG killed [[REG2]]{{.*}}noreg, 1 +; CHECK-NEXT: [[REG3:%[0-9]+]]:vr128bit = VUPLLF [[REG1]] +; CHECK-NEXT: VSTEG killed [[REG3]]{{.*}}noreg, 1 +; CHECK-NEXT: [[REG4:%[0-9]+]]:vr128bit = VUPLHW [[REG1]] +; CHECK-NEXT: VSTEF killed [[REG4]]{{.*}}noreg, 3 +; CHECK-NEXT: [[REG5:%[0-9]+]]:vr128bit = VUPLLH killed [[REG1]] +; CHECK-NEXT: VSTEF killed [[REG5]]{{.*}}noreg, 3 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + + %L1 = load volatile i64, i64* %Src1 ; G1 + %D = sub i64 %S, %L1 + store i64 %D, i64* %Dst + + %T2 = trunc i64 %L1 to i32 ; F3 + %S2 = sext i32 %T2 to i64 + store i64 %S2, i64* %Dst2 ; G1 + + %S3 = zext i32 %T2 to i64 + store i64 %S3, i64* %Dst3 ; G1 + + %T4 = trunc i32 %T2 to i16 ; F3 + %S4 = sext i16 %T4 to i32 + store i32 %S4, i32* %Dst4 + + %S5 = zext i16 %T4 to i32 ; F3 + store i32 %S5, i32* %Dst5 + + ret void +} + +; Vector Unpack High. +; 16 -> 32 bit extensions from G0 (F1) ends up in F3 +define void @fun3(i64* %Src1, i64* %Src2, i32* %Dst, i32* %Dst2) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG0:%[0-9]+]]:vr128bit = VAG +; CHECK-NEXT: [[REG1:%[0-9]+]]:vr128bit = COPY killed [[REG0]] +; CHECK-NEXT: [[REG2:%[0-9]+]]:vr128bit = VUPLHH [[REG1]] +; CHECK-NEXT: VSTEF killed [[REG2]]{{.*}}noreg, 3 +; CHECK-NEXT: [[REG3:%[0-9]+]]:vr128bit = VUPHH killed [[REG1]] +; CHECK-NEXT: VSTEF killed [[REG3]]{{.*}}noreg, 3 + %LHS = load i64, i64* %Src1 + %RHS = load i64, i64* %Src2 + %Sum = add i64 %LHS, %RHS ; G0 + %T = trunc i64 %Sum to i16 + + %ext = zext i16 %T to i32 + store i32 %ext, i32* %Dst ; F3 + + %S2 = sext i16 %T to i32 + store i32 %S2, i32* %Dst2 ; F3 + + ret void +} + +; Load immediate in G1 / F3 +define void @fun4(i64* %Src0, i64* %Dst, i32* %Dst2) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK: VLEIG {{.*}}, -3, 1 +; CHECK: VAG +; CHECK: VSTEG{{.*}}noreg, 1 +; CHECK: VLEIF {{.*}}, -3, 3 +; CHECK: VAF +; CHECK: VSTEF{{.*}}noreg, 3 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + + %Res = add i64 %S, -3 + store i64 %Res, i64* %Dst + + %T2 = trunc i64 %Res to i32 + %Res2 = add i32 %T2, -3 + store i32 %Res2, i32* %Dst2 + + ret void +} + +; i64 arithmetic in lane G1. +define void @fun5(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Src3, i64* %Src4, + i64* %Src5, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VAG +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VN +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VO +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VX +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + + %L1 = load i64, i64* %Src1 + %R1 = add i64 %S, %L1 + + %L2 = load i64, i64* %Src2 + %R2 = sub i64 %R1, %L2 + + %L3 = load i64, i64* %Src3 + %R3 = and i64 %R2, %L3 + + %L4 = load i64, i64* %Src4 + %R4 = or i64 %R3, %L4 + + %L5 = load i64, i64* %Src5 + %R5 = xor i64 %R4, %L5 + + store i64 %R5, i64* %Dst + + ret void +} + +; i32 arithemtic in lane F1 +define void @fun6(i64* %Src0, i32* %Src1, i32* %Src2, i32* %Src3, i32* %Src4, + i32* %Src5, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VAF +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VSF +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VN +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VO +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VX +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 ; F1 + + %L1 = load i32, i32* %Src1 + %R1 = add i32 %T, %L1 + + %L2 = load i32, i32* %Src2 + %R2 = sub i32 %R1, %L2 + + %L3 = load i32, i32* %Src3 + %R3 = and i32 %R2, %L3 + + %L4 = load i32, i32* %Src4 + %R4 = or i32 %R3, %L4 + + %L5 = load i32, i32* %Src5 + %R5 = xor i32 %R4, %L5 + + store i32 %R5, i32* %Dst + + ret void +} + +; i32 arithemtic in lane F3 +define void @fun7(i64* %Src0, i64* %Src1, i32* %Src2, i32* %Src3, i32* %Src4, + i32* %Src5, i32* %Src6, i64* %Dst, i32* %Dst2) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK: VSG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VAF +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VSF +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VN +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VO +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VX +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T0 = trunc i64 %L0 to i32 + %S0 = sext i32 %T0 to i64 ; G1 + + %L = load i64, i64* %Src1 ; G1 + %D = sub i64 %S0, %L + store i64 %D, i64* %Dst + + %T1 = trunc i64 %L to i32 ; F3 + %L1 = load i32, i32* %Src2 + %R1 = add i32 %T1, %L1 + + %L2 = load i32, i32* %Src3 + %R2 = sub i32 %R1, %L2 + + %L3 = load i32, i32* %Src4 + %R3 = and i32 %R2, %L3 + + %L4 = load i32, i32* %Src5 + %R4 = or i32 %R3, %L4 + + %L5 = load i32, i32* %Src6 + %R5 = xor i32 %R4, %L5 + + store i32 %R5, i32* %Dst2 + + ret void +} + +; AGRK and SGRK in G1. +define void @fun8(i64* %Src0, i64* %Src1, i64* %Src2, + i64* %Dst, i64* %Dst1, i64* %Dst2, i64* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VAG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + store i64 %S, i64* %Dst + + %L1 = load i64, i64* %Src1 + store i64 %L1, i64* %Dst1 + %S1 = sub i64 %S, %L1 + + %L2 = load i64, i64* %Src2 + store i64 %L2, i64* %Dst2 + %S2 = add i64 %S1, %L2 + + store i64 %S2, i64* %Dst3 + ret void +} + +; ARK and SRK in F1. +define void @fun9(i64* %Src0, i32* %Src1, i32* %Src2, + i32* %Dst, i32* %Dst1, i32* %Dst2, i32* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VSTEF {{.*}}noreg, 1 +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 +; CHECK-NEXT: VSF +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 +; CHECK-NEXT: VAF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 ; F1 + store i32 %T, i32* %Dst + + %L1 = load i32, i32* %Src1 + store i32 %L1, i32* %Dst1 + %S1 = sub i32 %T, %L1 + + %L2 = load i32, i32* %Src2 + store i32 %L2, i32* %Dst2 + %S2 = add i32 %S1, %L2 + + store i32 %S2, i32* %Dst3 + ret void +} + +; ARK and SRK in F3. +define void @fun10(i64* %Src0, i64* %Src00, i32* %Src1, i32* %Src2, + i64* %Dst, i64* %Dst00, i64* %Dst01, i32* %Dst1, i32* %Dst2, i32* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun10: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 +; CHECK-NEXT: VSF +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 +; CHECK-NEXT: VAF +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 + + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + store i64 %S, i64* %Dst + + %L00 = load volatile i64, i64* %Src00 ; G1 + store i64 %L00, i64* %Dst00 + %D = sub i64 %S, %L00 + store i64 %D, i64* %Dst01 + + %T1 = trunc i64 %D to i32 ; F3 + %L1 = load i32, i32* %Src1 + store i32 %L1, i32* %Dst1 + %S1 = sub i32 %T1, %L1 + + %L2 = load i32, i32* %Src2 + store i32 %L2, i32* %Dst2 + %S2 = add i32 %S1, %L2 + + store i32 %S2, i32* %Dst3 + ret void +} + +; AGHIK and OILL64 in G1 +define void @fun11(i64* %Src0, i64* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK: VSG +; CHECK: VLEIG {{.*}}-16, 1 +; CHECK-NEXT: VAG +; CHECK: VLEIG {{.*}}1, 1 +; CHECK-NEXT: VO +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 ; G0 + %T = trunc i64 %L0 to i32 + %LHS = sext i32 %T to i64 ; G1 + %RHS = load i64, i64* %Src1 + %Sum = sub i64 %LHS, %RHS + %S2 = add i64 %Sum, -16 + %Res = or i64 %S2, 1 + store i64 %Res, i64* %Dst + ret void +} + +; AHIMuxK and OILMux in F1 +define void @fun12(i64* %Src0, i32* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VLEF {{.*}}noreg, 1 +; CHECK-NEXT: VAF +; CHECK: VLEIF {{.*}}-16, 1 +; CHECK-NEXT: VAF +; CHECK: VLEIF {{.*}}1, 1 +; CHECK-NEXT: VO +; CHECK: VSTEF {{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 + %LHS = trunc i64 %L0 to i32 ; F1 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %S2 = add i32 %Sum, -16 + %Res = or i32 %S2, 1 + store i32 %Res, i32* %Dst + ret void +} + +; AHIMuxK and OILMux in F3 +define void @fun13(i64* %Src0, i64* %Src00, i32* %Src1, + i64* %Dst, i64* %Dst00, i64* %Dst01, i32* %Dst1) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun13: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK-NEXT: VSTEG{{.*}}noreg, 1 +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSTEG{{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK-NEXT: VSTEG{{.*}}noreg, 1 +; CHECK: VLEF {{.*}}noreg, 3 +; CHECK-NEXT: VAF +; CHECK: VLEIF {{.*}}-16, 3 +; CHECK-NEXT: VAF +; CHECK: VLEIF {{.*}}1, 3 +; CHECK-NEXT: VO +; CHECK: VSTEF {{.*}}noreg, 3 + %L0 = load volatile i64, i64* %Src0 + %T = trunc i64 %L0 to i32 + %S = sext i32 %T to i64 ; G1 + store i64 %S, i64* %Dst + + %L00 = load volatile i64, i64* %Src00 ; G1 + store i64 %L00, i64* %Dst00 + %D = sub i64 %S, %L00 + store i64 %D, i64* %Dst01 + + %LHS = trunc i64 %D to i32 ; F3 + %RHS = load i32, i32* %Src1 + %Sum = add i32 %LHS, %RHS + %S2 = add i32 %Sum, -16 + %Res = or i32 %S2, 1 + store i32 %Res, i32* %Dst1 + ret void +} + +; Logical instructions in G1. +define void @fun14(i64* %Src0, i64* %Dst, i64* %Dst1, i64* %Dst2, i64* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK-NEXT: VESLG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VESRLG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VESRAG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VLCG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + %L = load volatile i64, i64* %Src0 + %T = trunc i64 %L to i32 + %S = sext i32 %T to i64 ; G1 + %R0 = shl i64 %S, 2 + store i64 %R0, i64* %Dst + %R1 = lshr i64 %S, 2 + store i64 %R1, i64* %Dst1 + %R2 = ashr i64 %S, 2 + store i64 %R2, i64* %Dst2 + %R3 = sub i64 0, %R2 + store i64 %R3, i64* %Dst3 + ret void +} + +; Logical instructions in F1. +define void @fun15(i64* %Src0, i32* %Dst, i32* %Dst1, i32* %Dst2, i32* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VESLF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 +; CHECK-NEXT: VESRLF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 +; CHECK-NEXT: VESRAF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 +; CHECK-NEXT: VLCF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 + %L = load volatile i64, i64* %Src0 + %T = trunc i64 %L to i32 + + %R0 = shl i32 %T, 2 + store i32 %R0, i32* %Dst + %R1 = lshr i32 %T, 2 + store i32 %R1, i32* %Dst1 + %R2 = ashr i32 %T, 2 + store i32 %R2, i32* %Dst2 + %R3 = sub i32 0, %R2 + store i32 %R3, i32* %Dst3 + ret void +} + +; Logical instructions in F1. +define void @fun16(i64* %Src0, i64* %Src00, i32* %Src1, + i64* %Dst0, i64* %Dst00, i64* %Dst01, + i32* %Dst, i32* %Dst1, i32* %Dst2, i32* %Dst3) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun16: IsSSA, TracksLiveness + +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: VUPHF +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK: VLEG {{.*}}noreg, 1 +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 +; CHECK-NEXT: VSG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + +; CHECK: VESLF +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 +; CHECK-NEXT: VESRLF +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 +; CHECK-NEXT: VESRAF +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 +; CHECK-NEXT: VLCF +; CHECK-NEXT: VSTEF {{.*}}noreg, 3 + + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + %S = sext i32 %T0 to i64 ; G1 + store i64 %S, i64* %Dst0 + + %L00 = load volatile i64, i64* %Src00 ; G1 + store i64 %L00, i64* %Dst00 + %D = sub i64 %S, %L00 + store i64 %D, i64* %Dst01 + + %T = trunc i64 %D to i32 ; F3 + %R0 = shl i32 %T, 2 + store i32 %R0, i32* %Dst + %R1 = lshr i32 %T, 2 + store i32 %R1, i32* %Dst1 + %R2 = ashr i32 %T, 2 + store i32 %R2, i32* %Dst2 + %R3 = sub i32 0, %R2 + store i32 %R3, i32* %Dst3 + ret void +} + +; MS; MSRKC +define void @fun17(i32* %Src0, i64* %Src1, i32* %Src2, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun17: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 1 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = COPY killed [[REG1]] +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VMOF killed [[REG0]], killed [[REG2]] +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLEF {{.*}}noreg, 1 +; CHECK: [[REG5:%[0-9]+:vr128bit]] = VMOF killed [[REG3]], [[REG4]] +; CHECK: VSTEF killed [[REG5]]{{.*}}noreg, 1 + %LHS = load i32, i32* %Src0 + %L = load volatile i64, i64* %Src1 + %RHS = trunc i64 %L to i32 + %C = load i32, i32* %Src2 + %Prd = mul i32 %LHS, %RHS + %Res = mul i32 %Prd, %C + store i32 %Res, i32* %Dst + ret void +} + +;;; Some tests with conflicting lanes + +; MSRKC +define void @fun18(i64* %Src0, double* %Src2, i64* %Dst0, i32* %Dst3) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun18: IsSSA, TracksLiveness +; CHECK: No lanes: + %L = load volatile i64, i64* %Src0 + %T = trunc i64 %L to i32 + %S = sext i32 %T to i64 + %Sh = shl i64 %S, 2 + store i64 %Sh, i64* %Dst0 + + %T1 = trunc i64 %Sh to i32 ; F3 + + %L2 = load volatile double, double* %Src2 + %C2 = fptosi double %L2 to i64 ; G0 + %T2 = trunc i64 %C2 to i32 ; F1 + + %Res3 = mul i32 %T1, %T2 + store i32 %Res3, i32* %Dst3 + ret void +} + +; VLEZ +define void @fun19(i64* %Src0, i8* %Src1, i64* %Dst, i64* %Dst1, i64* %Dst2, i64* %Dst3) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun19: IsSSA, TracksLiveness +; CHECK: No lanes: + %L = load volatile i64, i64* %Src0 + %T = trunc i64 %L to i32 + %S = sext i32 %T to i64 ; G1 + %Sh = shl i64 %S, 2 + + %L1 = load i8, i8* %Src1 + %Z1 = zext i8 %L1 to i64 ; G0 + + %Sum = add i64 %Sh, %Z1 ; conflict + store i64 %Sum, i64* %Dst + + ret void +} + +; CDGBR +define void @fun20(i64* %Src, double* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun20: IsSSA, TracksLiveness +; CHECK: No lanes: + %L = load volatile i64, i64* %Src + %T = trunc i64 %L to i32 + %S = sext i32 %T to i64 ; G1 + + %Res = sitofp i64 %S to double + store double %Res, double* %Dst + ret void +} + +; CGDBR +define void @fun21(i64* %Src, double* %Src1, i64* %Dst, i64* %Dst2) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun21: IsSSA, TracksLiveness +; CHECK: No lanes: + %L = load volatile i64, i64* %Src + %T = trunc i64 %L to i32 + %S = sext i32 %T to i64 ; G1 + %Sh = shl i64 %S, 2 + + %D = load double, double* %Src1 + %C = fptosi double %D to i64 ; G0 + + %Res = add i64 %Sh, %C ; conflict + store i64 %Res, i64* %Dst + ret void +} + +; CEFBR +define void @fun22(i64* %Src, float* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun22: IsSSA, TracksLiveness +; Z15: No lanes: + %L = load volatile i64, i64* %Src + %T = trunc i64 %L to i32 + + %Res = sitofp i32 %T to float + store float %Res, float* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-08.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-08.ll @@ -0,0 +1,129 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments involving PHI nodes. + +define void @fun0(i64* %Dst, i64* %Src0, i64* %Src1, i64 %Val0, i64 %Val1) { +; CHECK-LABEL: bb.4.join: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK-LABEL: bb.0 (%ir-block.0): +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEIG {{.*}}, 0, 0 + +; CHECK-LABEL: bb.2.bb1: +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 + +; CHECK-LABEL: bb.3.bb2: +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 + +; CHECK-LABEL: bb.4.join: +; CHECK: [[REG3:%[0-9]+:vr128bit]] = PHI [[REG0]], %bb.0, [[REG2]], %bb.3, [[REG1]], %bb.2 +; CHECK-NEXT: VSTEG killed [[REG3]]{{.*}}noreg, 0 +; CHECK-NEXT: Return + + %Cmp0 = icmp eq i64 %Val0, 0 + br i1 %Cmp0, label %bb0, label %bb3 + +bb0: + %Cmp1 = icmp eq i64 %Val1, 0 + br i1 %Cmp1, label %bb1, label %bb2 + +bb1: + %L0 = load i64, i64* %Src0 + br label %join + +bb2: + %L1 = load i64, i64* %Src1 + br label %join + +bb3: + br label %join + +join: + %Res = phi i64 [%L0, %bb1], [%L1, %bb2], [0, %bb3] + store i64 %Res, i64* %Dst + ret void +} + +; The unpack requires all instructions to use lane 1. +define void @fun1(i64* %Dst, i64* %Src0, i64* %Src1, i64 %Val0, i64 %Val1) { +; CHECK-LABEL: bb.4.join: +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK-LABEL: bb.0 (%ir-block.0): +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEIG {{.*}}, 0, 1 + +; CHECK-LABEL: bb.2.bb1: +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK-NEXT: COPY +; CHECK-NEXT: [[REG1:%[0-9]+:vr128bit]] = VUPHF + +; CHECK-LABEL: bb.3.bb2: +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 1 + +; CHECK-LABEL: bb.4.join: +; CHECK: [[REG3:%[0-9]+:vr128bit]] = PHI [[REG0]], %bb.0, [[REG2]], %bb.3, [[REG1]], %bb.2 +; CHECK-NEXT: VSTEG killed [[REG3]]{{.*}}noreg, 1 +; CHECK-NEXT: Return + + %Cmp0 = icmp eq i64 %Val0, 0 + br i1 %Cmp0, label %bb0, label %bb3 + +bb0: + %Cmp1 = icmp eq i64 %Val1, 0 + br i1 %Cmp1, label %bb1, label %bb2 + +bb1: + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + %S0 = sext i32 %T0 to i64 ; G1 + br label %join + +bb2: + %L1 = load i64, i64* %Src1 + br label %join + +bb3: + br label %join + +join: + %Res = phi i64 [%S0, %bb1], [%L1, %bb2], [0, %bb3] + store i64 %Res, i64* %Dst + ret void +} + +; Conflicting lanes +define void @fun2(i64* %Dst, i64* %Src0, double* %Src1, i64 %Val0, i64 %Val1) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: No lanes: {{.*}} COPY +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness + + %Cmp0 = icmp eq i64 %Val0, 0 + br i1 %Cmp0, label %bb0, label %bb3 + +bb0: + %Cmp1 = icmp eq i64 %Val1, 0 + br i1 %Cmp1, label %bb1, label %bb2 + +bb1: + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + %S0 = sext i32 %T0 to i64 ; G1 + br label %join + +bb2: + %L1 = load double, double* %Src1 + %I1 = fptosi double %L1 to i64 ; G0 + br label %join + +bb3: + br label %join + +join: + %Res = phi i64 [%S0, %bb1], [%I1, %bb2], [0, %bb3] + store i64 %Res, i64* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-09.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-09.ll @@ -0,0 +1,381 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments involving VLGV instructions. + +define void @fun0(<2 x i64>* %Src0, <2 x i64>* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VAG +; CHECK-NEXT: VSTEG {{.*}}noreg, 0 + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 0 + %V1 = load volatile <2 x i64>, <2 x i64>* %Src1 + %EltB = extractelement <2 x i64> %V1, i32 0 + %Res = add i64 %EltA, %EltB + store i64 %Res, i64* %Dst + ret void +} + +define void @fun1(<2 x i64>* %Src0, <2 x i64>* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VAG +; CHECK-NEXT: VSTEG {{.*}}noreg, 1 + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 1 + %V1 = load volatile <2 x i64>, <2 x i64>* %Src1 + %EltB = extractelement <2 x i64> %V1, i32 1 + %Res = add i64 %EltA, %EltB + store i64 %Res, i64* %Dst + ret void +} + +define void @fun2(<2 x i64>* %Src0, <2 x i64>* %Src1, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK: No lanes: +; CHECK-NEXT: --- Invalidated Closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 0 + %V1 = load volatile <2 x i64>, <2 x i64>* %Src1 + %EltB = extractelement <2 x i64> %V1, i32 1 + %Res = add i64 %EltA, %EltB + store i64 %Res, i64* %Dst + ret void +} + +define void @fun3(<4 x i32>* %Src0, <4 x i32>* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VL +; CHECK-NEXT: COPY +; CHECK-NEXT: VAF +; CHECK-NEXT: VSTEF {{.*}}noreg, 1 + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 1 + %V1 = load volatile <4 x i32>, <4 x i32>* %Src1 + %EltB = extractelement <4 x i32> %V1, i32 1 + %Res = add i32 %EltA, %EltB + store i32 %Res, i32* %Dst + ret void +} + +define void @fun4(<4 x i32>* %Src0, <4 x i32>* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: No lanes: {{.*}} ARK +; CHECK-NEXT: --- Invalidated Closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 0 + %V1 = load volatile <4 x i32>, <4 x i32>* %Src1 + %EltB = extractelement <4 x i32> %V1, i32 2 + %Res = add i32 %EltA, %EltB + store i32 %Res, i32* %Dst + ret void +} + +;;;; TM / Compare w/0 + +; Reassign the TMLMux of an extracted element, but not the CHIMux of a load. +define void @fun5(<2 x i1> %Cond, i32* %Src) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: TMLMux +; CHECK-NEXT: BRC 15, 7 +; CHECK: --- not extract {{.*}} CHIMux {{.*}}, 0 +; CHECK-NEXT: --- Invalidated Closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: VZERO +; CHECK-NEXT: VLEIH {{.*}}, 1, 3 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 5 +entry: + %i = load volatile i32, i32* %Src + %i2 = icmp eq i32 %i, 0 + br label %bb1 + +bb1: + %C = extractelement <2 x i1> %Cond, i32 0 + %C1 = and i1 %i2, %C + br i1 %C1, label %bb1, label %bb2 + +bb2: + ret void +} + +; Compare NE -1. +define void @fun6(<4 x i32>* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: VLGVF +; CHECK: CHIMux {{.*}}, -1 +; CHECK-NEXT: BRC 14, 8 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK-NOT: VLGVF +; CHECK: VZERO +; CHECK-NEXT: VLEIF {{.*}}, -1, 3 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 1 +; CHECK: VSTEF {{.*}}noreg, 3 + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 3 + %Cond = icmp ne i32 %EltA, -1 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i32 %EltA , i32* %Dst + ret void + +bb2: + ret void +} + +; Compare EQ -1. +define void @fun7(<4 x i32>* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK: VLGVF +; CHECK: CHIMux {{.*}}, -1 +; CHECK-NEXT: BRC 14, 6 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun7: IsSSA, TracksLiveness +; CHECK-NOT: VLGVF +; CHECK: VZERO +; CHECK-NEXT: VLEIF {{.*}}, -1, 3 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 12 +; CHECK: VSTEF {{.*}}noreg, 3 + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 3 + %Cond = icmp eq i32 %EltA, -1 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i32 %EltA , i32* %Dst + ret void + +bb2: + ret void +} + +; Compare NE 0. +define void @fun8(<4 x i32>* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK: VLGVF +; CHECK: CHIMux {{.*}}, 0 +; CHECK-NEXT: BRC 14, 8 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun8: IsSSA, TracksLiveness +; CHECK-NOT: VLGVF +; CHECK: VZERO +; CHECK-NEXT: VLEIF {{.*}}, -1, 1 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 8 +; CHECK: VSTEF {{.*}}noreg, 1 + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 1 + %Cond = icmp ne i32 %EltA, 0 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i32 %EltA , i32* %Dst + ret void + +bb2: + ret void +} + +; Compare EQ 0. +define void @fun9(<4 x i32>* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK: VLGVF +; CHECK: CHIMux {{.*}}, 0 +; CHECK-NEXT: BRC 14, 6 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun9: IsSSA, TracksLiveness +; CHECK-NOT: VLGVF +; CHECK: VZERO +; CHECK-NEXT: VLEIF {{.*}}, -1, 0 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 5 +; CHECK: VSTEF {{.*}}noreg, 0 + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 0 + %Cond = icmp eq i32 %EltA, 0 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i32 %EltA , i32* %Dst + ret void + +bb2: + ret void +} + +; Compare EQ 2. +define void @fun10(<4 x i32>* %Src0, i32* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun10: IsSSA, TracksLiveness +; CHECK: immediate {{.*}} CHIMux +; CHECK-NEXT: Invalidated Closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun10: IsSSA, TracksLiveness + %V0 = load volatile <4 x i32>, <4 x i32>* %Src0 + %EltA = extractelement <4 x i32> %V0, i32 0 + %Cond = icmp eq i32 %EltA, 2 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i32 %EltA , i32* %Dst + ret void + +bb2: + ret void +} + +; Compare EQ 0, 64 bits. +define void @fun11(<2 x i64>* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK: VLGVG +; CHECK: CGHI {{.*}}, 0 +; CHECK-NEXT: BRC 14, 6 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun11: IsSSA, TracksLiveness +; CHECK-NOT: VLGVG +; CHECK: VZERO +; CHECK-NEXT: VLEIG {{.*}}, -1, 0 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 5 +; CHECK: VSTEG {{.*}}noreg, 0 + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 0 + %Cond = icmp eq i64 %EltA, 0 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i64 %EltA , i64* %Dst + ret void + +bb2: + ret void +} + +; Compare NE -1, 64 bits. +define void @fun12(<2 x i64>* %Src0, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK: VLGVG +; CHECK: CGHI {{.*}}, -1 +; CHECK-NEXT: BRC 14, 8 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun12: IsSSA, TracksLiveness +; CHECK-NOT: VLGVG +; CHECK: VZERO +; CHECK-NEXT: VLEIG {{.*}}, -1, 1 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 1 +; CHECK: VSTEG {{.*}}noreg, 1 + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 1 + %Cond = icmp ne i64 %EltA, -1 + br i1 %Cond, label %bb1, label %bb2 + +bb1: + store i64 %EltA , i64* %Dst + ret void + +bb2: + ret void +} + +; Compare NE -1, 64 bits. +define void @fun13(<2 x i64>* %Src0, i1* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun13: IsSSA, TracksLiveness +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun13: IsSSA, TracksLiveness + %V0 = load volatile <2 x i64>, <2 x i64>* %Src0 + %EltA = extractelement <2 x i64> %V0, i32 0 + %CondA = icmp ne i64 %EltA, -1 + br i1 %CondA, label %bb0, label %bb1 + +bb0: + store i1 %CondA , i1* %Dst + ret void; + +bb1: + ret void +} + +; TM involving i16 extraction. +define void @fun14(<8 x i16>* %Src) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: VLGVH {{.*}}, 7 +; CHECK: TMLMux +; CHECK-NEXT: BRC 15, 8 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun14: IsSSA, TracksLiveness +; CHECK: VZERO +; CHECK-NEXT: VLEIH {{.*}}, 1, 7 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 8 +entry: + %i = load <8 x i16>, <8 x i16>* %Src + %i15 = icmp eq <8 x i16> %i, zeroinitializer + br label %bb2 + +bb2: + %i17 = extractelement <8 x i1> %i15, i32 7 + br i1 %i17, label %bb3, label %bb2 + +bb3: + ret void +} + +; TM involving i8 extraction. +define void @fun15(<16 x i8>* %Src) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: VLGVB {{.*}}, 2 +; CHECK: TMLMux +; CHECK-NEXT: BRC 15, 7 +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun15: IsSSA, TracksLiveness +; CHECK: VZERO +; CHECK-NEXT: VLEIB {{.*}}, 1, 2 +; CHECK-NEXT: VTM +; CHECK-NEXT: BRC 13, 5 +entry: + %i = load <16 x i8>, <16 x i8>* %Src + %i15 = icmp eq <16 x i8> %i, zeroinitializer + br label %bb2 + +bb2: + %i17 = extractelement <16 x i1> %i15, i32 2 + br i1 %i17, label %bb2, label %bb3 + +bb3: + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-10.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-10.mir @@ -0,0 +1,671 @@ +# RUN: llc -mtriple=s390x-linux-gnu -mcpu=z14 -O3 -o - -start-after=finalize-isel %s \ +# RUN: -debug-only=systemz-domain-reassignment -verify-machineinstrs 2>&1 | FileCheck %s +# +# Test domain reassignment of TMLMux in various (theoretical) cases. +# Test extractions of halfword / byte elements. + +--- | + define void @fun0(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun1(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun2(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun3(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun4(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun5(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun6(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun7(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun8(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun9(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun10(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun11(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } + define void @fun12(<2 x i1> %Cond, i32* %Src, i32* %Dst) { ret void } +... + +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun0: +# CHECK: VLGVF {{.*}}, 1 +# CHECK: TMLMux {{.*}}, 1, +# CHECK-NEXT: BRC 15, 7 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun0: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, 1, 3 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 5 +# CHECK: VSTEF {{.*}}noreg, 1 +--- +name: fun0 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 1 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 1, implicit-def $cc + BRC 15, 7, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test all 16 bits zeros. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun1: +# CHECK: VLGVF {{.*}}, 0 +# CHECK: TMLMux {{.*}}, 65535, +# CHECK-NEXT: BRC 15, 8 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun1: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, -1, 1 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 8 +# CHECK: VSTEF {{.*}}noreg, 0 +--- +name: fun1 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 65535, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test all 16 bits ones. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun2: +# CHECK: VLGVF {{.*}}, 0 +# CHECK: TMLMux {{.*}}, 65535, +# CHECK-NEXT: BRC 15, 1 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun2: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, -1, 1 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 1 +# CHECK: VSTEF {{.*}}noreg, 0 +--- +name: fun2 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 65535, implicit-def $cc + BRC 15, 1, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test upper 12 bits at least one bit set. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun3: +# CHECK: VLGVF {{.*}}, 0 +# CHECK: TMLMux {{.*}}, 65520, +# CHECK-NEXT: BRC 15, 7 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun3: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, -16, 1 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 5 +# CHECK: VSTEF {{.*}}noreg, 0 +--- +name: fun3 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 65520, implicit-def $cc + BRC 15, 7, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test lower 4 bits at least one bit zero. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun4: +# CHECK: VLGVF {{.*}}, 0 +# CHECK: TMLMux {{.*}}, 15, +# CHECK-NEXT: BRC 15, 14 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun4: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, 15, 1 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 12 +# CHECK: VSTEF {{.*}}noreg, 0 +--- +name: fun4 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 15, implicit-def $cc + BRC 15, 14, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test a function with live-out CC value. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun5: +# CHECK: CC users : TMLMux +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun5: +--- +name: fun5 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 1, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.3 + + bb.2: + liveins: $cc + + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + BRC 15, 8, %bb.2, implicit $cc + J %bb.3 + + bb.3: + Return + +... + + +# Test TMLLMux with a check including leftmost bit 0. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun6: +# CHECK: CC user mask: TMLMux +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun6: +# CHECK: VLGVF +# CHECK: TMLMux +--- +name: fun6 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 15, implicit-def $cc + BRC 15, 4, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test TMLLMux with a check including leftmost bit 1. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun7: +# CHECK: CC user mask: TMLMux +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun7: +# CHECK: VLGVF +# CHECK: TMLMux +--- +name: fun7 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVF %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 15, implicit-def $cc + BRC 15, 2, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + + +# Test a VLGVH with all 32 bits used. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun8: +# CHECK: context {{.*}} VLGVH +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun8: +# CHECK: VLGVH +# CHECK: TMLMux +--- +name: fun8 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVH %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 15, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + +# Test a VLGVB with all 32 bits used. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun9: +# CHECK: context {{.*}} VLGVB +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun9: +# CHECK: VLGVB +# CHECK: TMLMux +--- +name: fun9 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.3(0x02000000) + + %5:gr64bit = VLGVB %1, $noreg, 7 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 15, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.3 + + bb.2: + STMux %4, %2, 0, $noreg :: (store 4 into %ir.Dst) + + bb.3: + Return + +... + +# Test a VLGVB with a TML that tests all of 8 bits. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun10: +# CHECK: VLGVB {{.*}}, 0 +# CHECK: TMLMux {{.*}}, 255, +# CHECK-NEXT: BRC 15, 8 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun10: +# CHECK: VZERO +# CHECK-NEXT: VLEIB {{.*}}, -1, 0 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 8 +--- +name: fun10 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.1(0x02000000) + + %5:gr64bit = VLGVB %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 255, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.1 + + bb.2: + Return + +... + +# Test a VLGVB with a TML that tests more than 8 bits. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun11: +# CHECK: context {{.*}} VLGVB +# CHECK-NEXT: Invalidated Closure +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun11: +# CHECK: VLGVB +# CHECK: TMLMux +--- +name: fun11 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.1(0x02000000) + + %5:gr64bit = VLGVB %1, $noreg, 0 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 511, implicit-def $cc + BRC 15, 7, %bb.2, implicit $cc + J %bb.1 + + bb.2: + Return + +... + +# Test a VLGVH with a TML that tests all of 16 bits. +# CHECK: ***** Machine Function before Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun12: +# CHECK: VLGVH {{.*}}, 4 +# CHECK: TMLMux {{.*}}, 65535, +# CHECK-NEXT: BRC 15, 8 +# CHECK: ***** Machine Function after Domain Reassignment ***** +# CHECK-NEXT: # Machine code for function fun12: +# CHECK: VZERO +# CHECK-NEXT: VLEIH {{.*}}, -1, 4 +# CHECK-NEXT: VTM +# CHECK-NEXT: BRC 13, 8 +--- +name: fun12 +alignment: 16 +tracksRegLiveness: true +registers: + - { id: 1, class: vr128bit } + - { id: 2, class: addr64bit } + - { id: 4, class: grx32bit } + - { id: 5, class: gr64bit } +liveins: + - { reg: '$r2d', virtual-reg: '%2' } + - { reg: '$v24', virtual-reg: '%1' } +frameInfo: + maxAlignment: 1 +machineFunctionInfo: {} +body: | + bb.0: + liveins: $r2d, $v24 + + %2:addr64bit = COPY $r2d + %1:vr128bit = COPY $v24 + + bb.1: + successors: %bb.2(0x7e000000), %bb.1(0x02000000) + + %5:gr64bit = VLGVH %1, $noreg, 4 + %4:grx32bit = COPY %5.subreg_l32 + TMLMux %4, 65535, implicit-def $cc + BRC 15, 8, %bb.2, implicit $cc + J %bb.1 + + bb.2: + Return + +... Index: llvm/test/CodeGen/SystemZ/domain-reassignment-11.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-11.ll @@ -0,0 +1,169 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs -domreass-inserts 2>&1 | FileCheck %s +; REQUIRES: asserts +; +; Test domain reassignments involving insertion of scalar results into closures. + +; Insert one scalar value into closure. +define void @fun0(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:gr64bit]] = MSGRKC +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLVGG {{.*}}, [[REG0]], $noreg, 0 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG3:%[0-9]+:vr128bit]] = VAG killed [[REG1]], [[REG2]] +; CHECK-NEXT: VSTEG killed [[REG3]]{{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Prd = mul i64 %LHS, %RHS + %Prd1 = mul i64 %Prd, %RHS + %RHS2 = load i64, i64* %Src2 + %Sum = add i64 %Prd1, %RHS2 + store i64 %Sum, i64* %Dst + ret void +} + +; Insert one scalar value into closure using it twice. +define void @fun1(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Src3, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun1: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:gr64bit]] = MSGRKC +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLVGG {{.*}}, [[REG0]], $noreg, 0 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG3:%[0-9]+:vr128bit]] = VAG [[REG1]], [[REG2]] +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK-NEXT: [[REG5:%[0-9]+:vr128bit]] = VAG killed [[REG1]], [[REG4]] +; CHECK-NEXT: [[REG6:%[0-9]+:vr128bit]] = VAG killed [[REG3]], killed [[REG5]] +; CHECK-NEXT: VSTEG killed [[REG6]]{{.*}}noreg, 0 + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Prd = mul i64 %LHS, %RHS + %Prd1 = mul i64 %Prd, %RHS + %RHS2 = load i64, i64* %Src2 + %Sum = add i64 %Prd1, %RHS2 + %RHS3 = load i64, i64* %Src3 + %Sum2 = add i64 %Prd1, %RHS3 + %Sum3 = add i64 %Sum, %Sum2 + store i64 %Sum3, i64* %Dst + ret void +} + +; A scalar value used in closure but also outside of it should not be inserted. +define void @fun2(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Src3, i64* %Dst, i64* %Dst2) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness +; CHECK-NOT: Reassigning closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun2: IsSSA, TracksLiveness + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Prd = mul i64 %LHS, %RHS + %Prd1 = mul i64 %Prd, %RHS + %RHS2 = load i64, i64* %Src2 + %Sum = add i64 %Prd1, %RHS2 + store i64 %Sum, i64* %Dst + %Div = load i64, i64* %Src3 + %D = sdiv i64 %Prd1, %Div + store i64 %D, i64* %Dst2 + ret void +} + +; A scalar value used in closure, but scalar instruction using value defined +; in closure. +define void @fun3(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness +; CHECK-NOT: Reassigning closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun3: IsSSA, TracksLiveness + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Prd = mul i64 %LHS, %RHS + %RHS2 = load i64, i64* %Src2 + %Prd1 = mul i64 %Prd, %RHS2 + %Sum = add i64 %Prd1, %RHS2 + store i64 %Sum, i64* %Dst + ret void +} + +; A scalar value inserted into vector element G1. +define void @fun4(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Src3, i64* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun4: IsSSA, TracksLiveness +; CHECK: VLEG {{.*}}noreg, 0 +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VUPHF +; CHECK: [[REG1:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 1 +; CHECK: [[REG2:%[0-9]+:vr128bit]] = VAG killed [[REG0]], [[REG1]] +; CHECK: [[REG3:%[0-9]+:gr64bit]] = MSGRKC +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VLVGG {{.*}} [[REG3]], $noreg, 1 +; CHECK: [[REG5:%[0-9]+:vr128bit]] = VAG killed [[REG2]], killed [[REG4]] +; CHECK: VSTEG killed [[REG5]]{{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + %S0 = sext i32 %T0 to i64 ; G1 + + %L1 = load i64, i64* %Src1 + %R1 = add i64 %S0, %L1 + + %LHS = load i64, i64* %Src2 + %RHS = load i64, i64* %Src3 + %Prd = mul i64 %LHS, %RHS + %Prd1 = mul i64 %Prd, %RHS ; MSGRKC + + %Res = add i64 %R1, %Prd1 + store i64 %Res, i64* %Dst + ret void +} + +; A scalar value inserted into vector element F1. +define void @fun5(i64* %Src0, float* %Src1, i32* %Dst) { +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun5: IsSSA, TracksLiveness +; CHECK: [[REG0:%[0-9]+:vr128bit]] = VLEG {{.*}}noreg, 0 +; CHECK: [[REG1:%[0-9]+:vr128bit]] = COPY killed [[REG0]] +; CHECK: [[REG2:%[0-9]+:gr32bit]] = nofpexcept CFEBR +; CHECK: [[REG3:%[0-9]+:vr128bit]] = VLVGF {{.*}} [[REG2]], $noreg, 1 +; CHECK: [[REG4:%[0-9]+:vr128bit]] = VAF killed [[REG1]], killed [[REG3]] +; CHECK: VSTEF killed [[REG4]]{{.*}}noreg, 1 + %L0 = load volatile i64, i64* %Src0 + %T = trunc i64 %L0 to i32 ; F1 + + %L1 = load float, float* %Src1 + %I1 = fptosi float %L1 to i32 + + %Res = add i32 %T, %I1 + store i32 %Res, i32* %Dst + ret void +} + +; A scalar value needs insertion but the two users require different lanes. +define void @fun6(i64* %Src0, i64* %Src1, i64* %Src2, i64* %Src3, + double* %Src4, i64* %Dst1, i64* %Dst2) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness +; CHECK: Invalidated Closure (lanes) +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun6: IsSSA, TracksLiveness + %L0 = load volatile i64, i64* %Src0 + %T0 = trunc i64 %L0 to i32 + %S0 = sext i32 %T0 to i64 + + %L1 = load i64, i64* %Src1 + %R1 = add i64 %S0, %L1 ; G1 + + %LHS = load i64, i64* %Src2 + %RHS = load i64, i64* %Src3 + %Prd = mul i64 %LHS, %RHS + %Prd1 = mul i64 %Prd, %RHS ; MSGRKC + + %L4 = load double, double* %Src4 + %I4 = fptosi double %L4 to i64 ; G0 + + %A1 = add i64 %R1, %Prd1 ; Conflict + store i64 %A1, i64* %Dst1 + + %A2 = add i64 %I4, %Prd1 ; Conflict + store i64 %A2, i64* %Dst2 + + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-12.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-12.ll @@ -0,0 +1,19 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -debug-only=systemz-domain-reassignment \ +; RUN: -verify-machineinstrs 2>&1 | FileCheck %s +; REQUIRES: asserts +; + +declare i32* @foo() + +; Don't reassign a register around a call. +define void @fun0(i64* %Src, i64 *%Dst) { +; CHECK: ***** Machine Function before Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness +; CHECK-NOT: Reassigning closure +; CHECK: ***** Machine Function after Domain Reassignment ***** +; CHECK-NEXT: # Machine code for function fun0: IsSSA, TracksLiveness + %L = load i64, i64* %Src + %call = call i32* @foo() + store i64 %L, i64* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/domain-reassignment-13.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/domain-reassignment-13.ll @@ -0,0 +1,65 @@ +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -verify-machineinstrs 2>&1 \ +; RUN: | FileCheck %s +; +; Test that inserted loads of constants are hoisted out of loops. + +define void @fun0(<4 x i8> %B15) { +; CHECK-LABEL: fun0 +; CHECK: vzero +; CHECK: vleif +; CHECK: =>This Inner Loop Header +BB: + br label %CF34 + +CF34: + %Tr24 = trunc <4 x i8> %B15 to <4 x i1> + %E28 = extractelement <4 x i1> %Tr24, i32 3 + br i1 %E28, label %CF34, label %CF36 + +CF36: + ret void +} + +define void @fun1(i64* %Src0, i64* %Src1, i64* %Dst, i1 %Cond) { +; CHECK-LABEL: fun1 +; CHECK: vleig +; CHECK: =>This Inner Loop Header +entry: + br label %loop + +loop: + %LHS = load i64, i64* %Src0 + %RHS = load i64, i64* %Src1 + %Sum = sub i64 %LHS, %RHS + %Res = add i64 %Sum, -16 + store i64 %Res, i64* %Dst + br i1 %Cond, label %loop, label %exit + +exit: + ret void +} + +; TODO: There is only need to do one VLEIF -16 in this test case. +define void @fun2(i32* %Src0, i32* %Src1, i32* %Src2, i32* %Dst, i1 %Cond) { +; CHECK-LABEL: fun2 +entry: + %LHS = load i32, i32* %Src0 + %RHS = load i32, i32* %Src1 + %Sum = sub i32 %LHS, %RHS + %A = add i32 %Sum, -16 + + %L2 = load i32, i32* %Src2 + %S2 = sub i32 %LHS, %L2 + %A2 = add i32 %S2, -16 + + %Res = mul i32 %A, %A2 + store i32 %Res, i32* %Dst + ret void +} + +; TODO: An instruction that has a converter that does not accept it should be +; inserted with -domreass-inserts +define void @fun3(i64 %arg, i64* %Dst) { + store i64 %arg, i64* %Dst + ret void +} Index: llvm/test/CodeGen/SystemZ/knownbits.ll =================================================================== --- llvm/test/CodeGen/SystemZ/knownbits.ll +++ llvm/test/CodeGen/SystemZ/knownbits.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 < %s | FileCheck %s +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -disable-domreass < %s | FileCheck %s ; Test that DAGCombiner gets helped by computeKnownBitsForTargetNode(). Index: llvm/test/CodeGen/SystemZ/stack-clash-protection.ll =================================================================== --- llvm/test/CodeGen/SystemZ/stack-clash-protection.ll +++ llvm/test/CodeGen/SystemZ/stack-clash-protection.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -O3 | FileCheck %s +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -O3 -disable-domreass | FileCheck %s ; ; Test stack clash protection probing for static allocas. Index: llvm/test/CodeGen/SystemZ/subregliveness-04.ll =================================================================== --- llvm/test/CodeGen/SystemZ/subregliveness-04.ll +++ llvm/test/CodeGen/SystemZ/subregliveness-04.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -disable-early-taildup -disable-cgp -systemz-subreg-liveness < %s | FileCheck %s +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 -disable-early-taildup -disable-cgp \ +; RUN: -systemz-subreg-liveness -disable-domreass < %s | FileCheck %s ; Check for successful compilation. ; CHECK: lhi %r0, -5 Index: llvm/test/CodeGen/SystemZ/tls-08.ll =================================================================== --- llvm/test/CodeGen/SystemZ/tls-08.ll +++ llvm/test/CodeGen/SystemZ/tls-08.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mcpu=z196 -mtriple=s390x-linux-gnu -O0 \ -; RUN: -stop-before=regallocfast 2>&1 | FileCheck %s +; RUN: -stop-before=regallocfast -disable-domreass 2>&1 | FileCheck %s ; RUN: llc < %s -mcpu=z196 -mtriple=s390x-linux-gnu -O3 \ -; RUN: -stop-before=livevars 2>&1 | FileCheck %s +; RUN: -stop-before=livevars -disable-domreass 2>&1 | FileCheck %s ; ; Test that copies to/from access registers are handled before regalloc with ; GR32 regs.