Index: include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -151,8 +151,16 @@ return false; unsigned NumDefs = MI.getNumOperands() - 1; - MachineInstr *MergeI = getOpcodeDef(TargetOpcode::G_MERGE_VALUES, - MI.getOperand(NumDefs).getReg(), MRI); + + unsigned MergingOpcode; + if (MRI.getType(MI.getOperand(NumDefs).getReg()).isVector()) + MergingOpcode = TargetOpcode::G_CONCAT_VECTORS; + else + MergingOpcode = TargetOpcode::G_MERGE_VALUES; + + MachineInstr *MergeI = + getOpcodeDef(MergingOpcode, MI.getOperand(NumDefs).getReg(), MRI); + if (!MergeI) return false; Index: include/llvm/CodeGen/GlobalISel/LegalizerInfo.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizerInfo.h +++ include/llvm/CodeGen/GlobalISel/LegalizerInfo.h @@ -651,6 +651,20 @@ return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy); } + /// Widen the scalar to match the size of another. + LegalizeRuleSet &minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx) { + typeIdx(TypeIdx); + return widenScalarIf( + [=](const LegalityQuery &Query) { + return Query.Types[LargeTypeIdx].getScalarSizeInBits() > + Query.Types[TypeIdx].getSizeInBits(); + }, + [=](const LegalityQuery &Query) { + return std::make_pair(TypeIdx, + Query.Types[LargeTypeIdx].getElementType()); + }); + } + /// Add more elements to the vector to reach the next power of two. /// No effect if the type is not a vector or the element count is a power of /// two. Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1541,22 +1541,22 @@ // Return the scalar if it is a <1 x Ty> vector. if (CAZ->getNumElements() == 1) return translate(*CAZ->getElementValue(0u), Reg); - std::vector Ops; + SmallVector Ops; for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { Constant &Elt = *CAZ->getElementValue(i); Ops.push_back(getOrCreateVReg(Elt)); } - EntryBuilder.buildMerge(Reg, Ops); + EntryBuilder.buildBuildVector(Reg, Ops); } else if (auto CV = dyn_cast(&C)) { // Return the scalar if it is a <1 x Ty> vector. if (CV->getNumElements() == 1) return translate(*CV->getElementAsConstant(0), Reg); - std::vector Ops; + SmallVector Ops; for (unsigned i = 0; i < CV->getNumElements(); ++i) { Constant &Elt = *CV->getElementAsConstant(i); Ops.push_back(getOrCreateVReg(Elt)); } - EntryBuilder.buildMerge(Reg, Ops); + EntryBuilder.buildBuildVector(Reg, Ops); } else if (auto CE = dyn_cast(&C)) { switch(CE->getOpcode()) { #define HANDLE_INST(NUM, OPCODE, CLASS) \ @@ -1572,7 +1572,7 @@ for (unsigned i = 0; i < CV->getNumOperands(); ++i) { Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); } - EntryBuilder.buildMerge(Reg, Ops); + EntryBuilder.buildBuildVector(Reg, Ops); } else if (auto *BA = dyn_cast(&C)) { EntryBuilder.buildBlockAddress(Reg, BA); } else Index: lib/CodeGen/GlobalISel/Legalizer.cpp =================================================================== --- lib/CodeGen/GlobalISel/Legalizer.cpp +++ lib/CodeGen/GlobalISel/Legalizer.cpp @@ -64,6 +64,7 @@ case TargetOpcode::G_SEXT: case TargetOpcode::G_MERGE_VALUES: case TargetOpcode::G_UNMERGE_VALUES: + case TargetOpcode::G_CONCAT_VECTORS: return true; } } Index: lib/CodeGen/GlobalISel/MachineIRBuilder.cpp =================================================================== --- lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -494,6 +494,10 @@ if (Ops.size() == 1) return buildCast(Res, Ops[0]); + // If we're trying to merge vectors, we should use G_CONCAT_VECTORS instead. + if (getMRI()->getType(Res).isVector()) + return buildConcatVectors(Res, Ops); + MachineInstrBuilder MIB = buildInstr(TargetOpcode::G_MERGE_VALUES); MIB.addDef(Res); for (unsigned i = 0; i < Ops.size(); ++i) Index: lib/CodeGen/MachineVerifier.cpp =================================================================== --- lib/CodeGen/MachineVerifier.cpp +++ lib/CodeGen/MachineVerifier.cpp @@ -1055,6 +1055,32 @@ } break; } + case TargetOpcode::G_MERGE_VALUES: { + // G_MERGE_VALUES should only be used to merge scalars into a larger scalar, + // e.g. s2N = MERGE sN, sN + // Merging multiple scalars into a vector is not allowed, should use + // G_BUILD_VECTOR for that. + LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); + LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); + if (DstTy.isVector() || SrcTy.isVector()) + report("G_MERGE_VALUES cannot operate on vectors", MI); + break; + } + case TargetOpcode::G_UNMERGE_VALUES: { + LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); + LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg()); + // For now G_UNMERGE can split vectors. + for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) { + if (MRI->getType(MI->getOperand(i).getReg()) != DstTy) + report("G_UNMERGE_VALUES destination types do not match", MI); + } + if (SrcTy.getSizeInBits() != + (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) { + report("G_UNMERGE_VALUES source operand does not cover dest operands", + MI); + } + break; + } case TargetOpcode::G_BUILD_VECTOR: { // Source types must be scalars, dest type a vector. Total size of scalars // must match the dest vector size. Index: lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- lib/Target/AArch64/AArch64InstructionSelector.cpp +++ lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -65,6 +65,15 @@ bool selectCompareBranch(MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const; + // Helper to generate an equivalent of scalar_to_vector into a new register, + // returned via 'Dst'. + bool emitScalarToVector(unsigned &Dst, const LLT DstTy, + const TargetRegisterClass *DstRC, unsigned Scalar, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + MachineRegisterInfo &MRI) const; + bool selectBuildVector(MachineInstr &I, MachineRegisterInfo &MRI) const; + ComplexRendererFns selectArithImmed(MachineOperand &Root) const; ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root, @@ -1528,11 +1537,130 @@ return constrainSelectedInstRegOperands(*MovMI, TII, TRI, RBI); } } + case TargetOpcode::G_BUILD_VECTOR: + return selectBuildVector(I, MRI); } return false; } +bool AArch64InstructionSelector::emitScalarToVector( + unsigned &Dst, const LLT DstTy, const TargetRegisterClass *DstRC, + unsigned Scalar, MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, MachineRegisterInfo &MRI) const { + Dst = MRI.createVirtualRegister(DstRC); + + unsigned UndefVec = MRI.createVirtualRegister(DstRC); + MachineInstr &UndefMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(), + TII.get(TargetOpcode::IMPLICIT_DEF)) + .addDef(UndefVec); + + auto BuildFn = [&](unsigned SubregIndex) { + MachineInstr &InsMI = *BuildMI(MBB, MBBI, MBBI->getDebugLoc(), + TII.get(TargetOpcode::INSERT_SUBREG)) + .addDef(Dst) + .addUse(UndefVec) + .addUse(Scalar) + .addImm(SubregIndex); + constrainSelectedInstRegOperands(UndefMI, TII, TRI, RBI); + return constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI); + }; + + switch (DstTy.getElementType().getSizeInBits()) { + case 32: + return BuildFn(AArch64::ssub); + case 64: + return BuildFn(AArch64::dsub); + default: + return false; + } +} + +bool AArch64InstructionSelector::selectBuildVector( + MachineInstr &I, MachineRegisterInfo &MRI) const { + assert(I.getOpcode() == TargetOpcode::G_BUILD_VECTOR); + // Until we port more of the optimized selections, for now just use a vector + // insert sequence. + const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); + const LLT EltTy = MRI.getType(I.getOperand(1).getReg()); + unsigned EltSize = EltTy.getSizeInBits(); + if (EltSize < 32 || EltSize > 64) + return false; // Don't support all element types yet. + const RegisterBank &RB = *RBI.getRegBank(I.getOperand(1).getReg(), MRI, TRI); + unsigned Opc; + unsigned SubregIdx; + if (RB.getID() == AArch64::GPRRegBankID) { + if (EltSize == 32) { + Opc = AArch64::INSvi32gpr; + SubregIdx = AArch64::ssub; + } else { + Opc = AArch64::INSvi64gpr; + SubregIdx = AArch64::dsub; + } + } else { + if (EltSize == 32) { + Opc = AArch64::INSvi32lane; + SubregIdx = AArch64::ssub; + } else { + Opc = AArch64::INSvi64lane; + SubregIdx = AArch64::dsub; + } + } + + if (EltSize * DstTy.getNumElements() != 128) + return false; // Don't handle unpacked vectors yet. + + unsigned DstVec = 0; + const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( + DstTy, RBI.getRegBank(AArch64::FPRRegBankID), RBI); + emitScalarToVector(DstVec, DstTy, DstRC, I.getOperand(1).getReg(), + *I.getParent(), I.getIterator(), MRI); + for (unsigned i = 2, e = DstTy.getSizeInBits() / EltSize + 1; i < e; ++i) { + unsigned InsDef; + // For the last insert re-use the dst reg of the G_BUILD_VECTOR. + if (i + 1 < e) + InsDef = MRI.createVirtualRegister(DstRC); + else + InsDef = I.getOperand(0).getReg(); + unsigned LaneIdx = i - 1; + if (RB.getID() == AArch64::FPRRegBankID) { + unsigned ImpDef = MRI.createVirtualRegister(DstRC); + MachineInstr &ImpDefMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(), + TII.get(TargetOpcode::IMPLICIT_DEF)) + .addDef(ImpDef); + unsigned InsSubDef = MRI.createVirtualRegister(DstRC); + MachineInstr &InsSubMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(), + TII.get(TargetOpcode::INSERT_SUBREG)) + .addDef(InsSubDef) + .addUse(ImpDef) + .addUse(I.getOperand(i).getReg()) + .addImm(SubregIdx); + MachineInstr &InsEltMI = + *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc)) + .addDef(InsDef) + .addUse(DstVec) + .addImm(LaneIdx) + .addUse(InsSubDef) + .addImm(0); + constrainSelectedInstRegOperands(ImpDefMI, TII, TRI, RBI); + constrainSelectedInstRegOperands(InsSubMI, TII, TRI, RBI); + constrainSelectedInstRegOperands(InsEltMI, TII, TRI, RBI); + DstVec = InsDef; + } else { + MachineInstr &InsMI = + *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc)) + .addDef(InsDef) + .addUse(DstVec) + .addImm(LaneIdx) + .addUse(I.getOperand(i).getReg()); + constrainSelectedInstRegOperands(InsMI, TII, TRI, RBI); + DstVec = InsDef; + } + } + I.eraseFromParent(); + return true; +} + /// SelectArithImmed - Select an immediate value that can be represented as /// a 12-bit value shifted left by either 0 or 12. If so, return true with /// Val set to the 12-bit value and Shift set to the shifter operand. Index: lib/Target/AArch64/AArch64LegalizerInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -48,7 +48,7 @@ const LLT v2s64 = LLT::vector(2, 64); getActionDefinitionsBuilder(G_IMPLICIT_DEF) - .legalFor({p0, s1, s8, s16, s32, s64}) + .legalFor({p0, s1, s8, s16, s32, s64, v2s64}) .clampScalar(0, s1, s64) .widenScalarToNextPow2(0, 8); @@ -396,6 +396,18 @@ return VecTy == v4s32 || VecTy == v2s64; }); + getActionDefinitionsBuilder(G_BUILD_VECTOR) + .legalFor({{v4s32, s32}, {v2s64, s64}}) + .clampNumElements(0, v4s32, v4s32) + .clampNumElements(0, v2s64, v2s64) + + // Deal with larger scalar types, which will be implicitly truncated. + .legalIf([=](const LegalityQuery &Query) { + return Query.Types[0].getScalarSizeInBits() < + Query.Types[1].getSizeInBits(); + }) + .minScalarSameAs(1, 0); + computeTables(); verify(*ST.getInstrInfo()); } Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -182,6 +182,14 @@ (Ty1.getSizeInBits() % 32 == 0); }); + getActionDefinitionsBuilder(G_BUILD_VECTOR) + .legalIf([=](const LegalityQuery &Query) { + const LLT &VecTy = Query.Types[0]; + const LLT &ScalarTy = Query.Types[1]; + return VecTy.getSizeInBits() % 32 == 0 && + ScalarTy.getSizeInBits() % 32 == 0 && + VecTy.getSizeInBits() <= 512; + }); // Merge/Unmerge for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; Index: lib/Target/X86/X86InstructionSelector.cpp =================================================================== --- lib/Target/X86/X86InstructionSelector.cpp +++ lib/Target/X86/X86InstructionSelector.cpp @@ -373,6 +373,7 @@ case TargetOpcode::G_UNMERGE_VALUES: return selectUnmergeValues(I, MRI, MF, CoverageInfo); case TargetOpcode::G_MERGE_VALUES: + case TargetOpcode::G_CONCAT_VECTORS: return selectMergeValues(I, MRI, MF, CoverageInfo); case TargetOpcode::G_EXTRACT: return selectExtract(I, MRI, MF); @@ -1349,7 +1350,8 @@ bool X86InstructionSelector::selectMergeValues( MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF, CodeGenCoverage &CoverageInfo) const { - assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) && + assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES || + I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS) && "unexpected instruction"); // Split to inserts. Index: lib/Target/X86/X86LegalizerInfo.cpp =================================================================== --- lib/Target/X86/X86LegalizerInfo.cpp +++ lib/Target/X86/X86LegalizerInfo.cpp @@ -271,7 +271,7 @@ // Merge/Unmerge for (const auto &Ty : {v4s32, v2s64}) { - setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } setAction({G_MERGE_VALUES, 1, s64}, Legal); @@ -316,11 +316,11 @@ // Merge/Unmerge for (const auto &Ty : {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { - setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) { - setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } @@ -367,12 +367,12 @@ // Merge/Unmerge for (const auto &Ty : {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) { - setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { - setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } @@ -400,11 +400,11 @@ // Merge/Unmerge for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) { - setAction({G_MERGE_VALUES, Ty}, Legal); + setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) { - setAction({G_MERGE_VALUES, 1, Ty}, Legal); + setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } Index: test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -1561,7 +1561,7 @@ define <2 x i32> @test_constantaggzerovector_v2i32() { ; CHECK-LABEL: name: test_constantaggzerovector_v2i32 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x i32> zeroinitializer } @@ -1569,7 +1569,7 @@ define <2 x float> @test_constantaggzerovector_v2f32() { ; CHECK-LABEL: name: test_constantaggzerovector_v2f32 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x float> zeroinitializer } @@ -1577,7 +1577,7 @@ define i32 @test_constantaggzerovector_v3i32() { ; CHECK-LABEL: name: test_constantaggzerovector_v3i32 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32), [[ZERO]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ZERO]](s32), [[ZERO]](s32), [[ZERO]](s32) ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>) %elt = extractelement <3 x i32> zeroinitializer, i32 1 ret i32 %elt @@ -1587,7 +1587,7 @@ ; CHECK-LABEL: name: test_constantdatavector_v2i32 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) ret <2 x i32> } @@ -1597,7 +1597,7 @@ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 -; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32) ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>) %elt = extractelement <3 x i32> , i32 1 ret i32 %elt @@ -1609,7 +1609,7 @@ ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 -; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32) ; CHECK: $q0 = COPY [[VEC]](<4 x s32>) ret <4 x i32> } @@ -1618,7 +1618,7 @@ ; CHECK-LABEL: name: test_constantdatavector_v2f64 ; CHECK: [[FC1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 ; CHECK: [[FC2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[FC1]](s64), [[FC2]](s64) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FC1]](s64), [[FC2]](s64) ; CHECK: $q0 = COPY [[VEC]](<2 x s64>) ret <2 x double> } @@ -1662,7 +1662,7 @@ ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C0]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %vec = insertelement <1 x i32> undef, i32 %arg, i32 0 @@ -1688,7 +1688,7 @@ ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C0]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<2 x s32>) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> @@ -1701,7 +1701,7 @@ ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C0]](s32), [[C1]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<3 x s32>) ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<3 x s32>) %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <3 x i32> @@ -1717,7 +1717,7 @@ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 -; CHECK: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32) +; CHECK: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], [[MASK]](<4 x s32>) ; CHECK: $q0 = COPY [[VEC]](<4 x s32>) %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> @@ -1730,7 +1730,7 @@ ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C3]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], [[MASK]](<2 x s32>) ; CHECK: $d0 = COPY [[VEC]](<2 x s32>) %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> @@ -1758,7 +1758,7 @@ ; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 14 ; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 ; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 -; CHECK: [[MASK:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C8]](s32), [[C1]](s32), [[C9]](s32), [[C2]](s32), [[C10]](s32), [[C3]](s32), [[C11]](s32), [[C4]](s32), [[C12]](s32), [[C5]](s32), [[C13]](s32), [[C6]](s32), [[C14]](s32), [[C7]](s32), [[C15]](s32) +; CHECK: [[MASK:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C8]](s32), [[C1]](s32), [[C9]](s32), [[C2]](s32), [[C10]](s32), [[C3]](s32), [[C11]](s32), [[C4]](s32), [[C12]](s32), [[C5]](s32), [[C13]](s32), [[C6]](s32), [[C14]](s32), [[C7]](s32), [[C15]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], [[MASK]](<16 x s32>) ; CHECK: $q0 = COPY [[VEC]](<16 x s8>) %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> @@ -1768,7 +1768,7 @@ ; CHECK-LABEL: test_constant_vector ; CHECK: [[UNDEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF ; CHECK: [[F:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 -; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16) +; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16) ; CHECK: $d0 = COPY [[M]](<4 x s16>) define <4 x half> @test_constant_vector() { ret <4 x half> Index: test/CodeGen/AArch64/GlobalISel/legalize-add.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-add.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-add.mir @@ -140,8 +140,8 @@ %1:_(<2 x s64>) = COPY $q1 %2:_(<2 x s64>) = COPY $q2 %3:_(<2 x s64>) = COPY $q3 - %4:_(<4 x s64>) = G_MERGE_VALUES %0, %1 - %5:_(<4 x s64>) = G_MERGE_VALUES %2, %3 + %4:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1 + %5:_(<4 x s64>) = G_CONCAT_VECTORS %2, %3 %6:_(<4 x s64>) = G_ADD %4, %5 %7:_(<2 x s64>), %8:_(<2 x s64>) = G_UNMERGE_VALUES %6 $q0 = COPY %7 @@ -179,8 +179,8 @@ %1(<2 x s64>) = COPY $q1 %2(<2 x s64>) = COPY $q2 %3(<2 x s64>) = COPY $q3 - %4(<6 x s64>) = G_MERGE_VALUES %0, %1, %2 - %5(<6 x s64>) = G_MERGE_VALUES %1, %2, %3 + %4(<6 x s64>) = G_CONCAT_VECTORS %0, %1, %2 + %5(<6 x s64>) = G_CONCAT_VECTORS %1, %2, %3 %6(<6 x s64>) = G_ADD %4, %5 %7(<2 x s64>), %8(<2 x s64>), %9(<2 x s64>) = G_UNMERGE_VALUES %6 $q0 = COPY %7 Index: test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir @@ -0,0 +1,41 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: legal_v4s32 +body: | + bb.0: + liveins: $w0, $w1, $w2, $w3 + ; CHECK-LABEL: name: legal_v4s32 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $w3 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32) + ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>) + ; CHECK: RET_ReallyLR + %0:_(s32) = COPY $w0 + %1:_(s32) = COPY $w1 + %2:_(s32) = COPY $w2 + %3:_(s32) = COPY $w3 + %4:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + $q0 = COPY %4(<4 x s32>) + RET_ReallyLR +... +--- +name: legal_v2s64 +body: | + bb.0: + liveins: $x0, $x1 + ; CHECK-LABEL: name: legal_v2s64 + ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64) + ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: RET_ReallyLR + %0:_(s64) = COPY $x0 + %1:_(s64) = COPY $x1 + %2:_(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64) + $q0 = COPY %2(<2 x s64>) + RET_ReallyLR +... Index: test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/legalize-extract-vector-elt.mir @@ -0,0 +1,21 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: test_eve_1 +body: | + bb.0: + liveins: $q0 + ; CHECK-LABEL: name: test_eve_1 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s32) + ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[SEXT]](s64) + ; CHECK: $x0 = COPY [[EVEC]](s64) + ; CHECK: RET_ReallyLR + %0:_(<2 x s64>) = COPY $q0 + %1:_(s32) = G_CONSTANT i32 1 + %2:_(s64) = G_EXTRACT_VECTOR_ELT %0(<2 x s64>), %1(s32) + $x0 = COPY %2(s64) + RET_ReallyLR +... Index: test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir @@ -17,17 +17,29 @@ - { id: 3, class: _ } - { id: 4, class: _ } - { id: 5, class: _ } + - { id: 6, class: _ } + - { id: 7, class: _ } body: | bb.0: liveins: $w0 ; CHECK-LABEL: name: test_legalize_merge_v3s64 ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0 - ; CHECK: [[MV:%[0-9]+]]:_(<3 x s64>) = G_MERGE_VALUES [[COPY]](s64), [[COPY]](s64), [[COPY]](s64) - ; CHECK: $x0 = COPY [[COPY]](s64) - ; CHECK: $noreg = PATCHABLE_RET [[MV]](<3 x s64>) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY]](s64), [[COPY]](s64) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 + ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s64>), [[C]](s64) + ; CHECK: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s64>), [[C1]](s64) + ; CHECK: $x0 = COPY [[EVEC1]](s64) + ; CHECK: $noreg = PATCHABLE_RET [[BUILD_VECTOR]](<3 x s64>) %0(s64) = COPY $x0 - %1(<3 x s64>) = G_MERGE_VALUES %0(s64), %0(s64), %0(s64) - %2(s64), %3(s64), %4(s64) = G_UNMERGE_VALUES %1(<3 x s64>) - $x0 = COPY %3(s64) + %1(<3 x s64>) = G_BUILD_VECTOR %0(s64), %0(s64), %0(s64) + %2(s64) = G_CONSTANT i64 0 + %3(s64) = G_CONSTANT i64 1 + %4(s64) = G_CONSTANT i64 2 + %5(s64) = G_EXTRACT_VECTOR_ELT %1(<3 x s64>), %2(s64) + %6(s64) = G_EXTRACT_VECTOR_ELT %1(<3 x s64>), %3(s64) + %7(s64) = G_EXTRACT_VECTOR_ELT %1(<3 x s64>), %4(s64) + $x0 = COPY %6(s64) $noreg = PATCHABLE_RET %1(<3 x s64>) ... Index: test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir +++ test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir @@ -70,7 +70,7 @@ # DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected # # DEBUG-NEXT: G_BUILD_VECTOR (opcode 52): 2 type indices -# DEBUG: .. type index coverage check SKIPPED: no rules defined +# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected # # DEBUG-NEXT: G_CONCAT_VECTORS (opcode 53): 2 type indices # DEBUG: .. type index coverage check SKIPPED: no rules defined Index: test/CodeGen/AArch64/GlobalISel/select-build-vector.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/select-build-vector.mir @@ -0,0 +1,301 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s +--- | + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + target triple = "aarch64" + + define <4 x float> @test_f32(float %a, float %b, float %c, float %d) { + ret <4 x float> undef + } + + define <2 x double> @test_f64(double %a, double %b) { + ret <2 x double> undef + } + + define <4 x i32> @test_i32(i32 %a, i32 %b, i32 %c, i32 %d) { + ret <4 x i32> undef + } + + define <2 x i64> @test_i64(i64 %a, i64 %b) { + ret <2 x i64> undef + } + +... +--- +name: test_f32 +alignment: 2 +exposesReturnsTwice: false +legalized: true +regBankSelected: true +selected: false +failedISel: false +tracksRegLiveness: true +registers: + - { id: 0, class: fpr, preferred-register: '' } + - { id: 1, class: fpr, preferred-register: '' } + - { id: 2, class: fpr, preferred-register: '' } + - { id: 3, class: fpr, preferred-register: '' } + - { id: 4, class: fpr, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: _, preferred-register: '' } + - { id: 10, class: _, preferred-register: '' } + - { id: 11, class: _, preferred-register: '' } + - { id: 12, class: _, preferred-register: '' } + - { id: 13, class: gpr, preferred-register: '' } + - { id: 14, class: gpr, preferred-register: '' } + - { id: 15, class: gpr, preferred-register: '' } + - { id: 16, class: gpr, preferred-register: '' } +liveins: +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: +stack: +constants: +body: | + bb.0 (%ir-block.0): + liveins: $s0, $s1, $s2, $s3 + + ; CHECK-LABEL: name: test_f32 + ; CHECK: liveins: $s0, $s1, $s2, $s3 + ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1 + ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s2 + ; CHECK: [[COPY3:%[0-9]+]]:fpr32 = COPY $s3 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub + ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.ssub + ; CHECK: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0 + ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY2]], %subreg.ssub + ; CHECK: [[INSvi32lane1:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane]], 2, [[INSERT_SUBREG2]], 0 + ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY3]], %subreg.ssub + ; CHECK: [[INSvi32lane2:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane1]], 3, [[INSERT_SUBREG3]], 0 + ; CHECK: $q0 = COPY [[INSvi32lane2]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:fpr(s32) = COPY $s0 + %1:fpr(s32) = COPY $s1 + %2:fpr(s32) = COPY $s2 + %3:fpr(s32) = COPY $s3 + %4:fpr(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + $q0 = COPY %4(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_f64 +alignment: 2 +exposesReturnsTwice: false +legalized: true +regBankSelected: true +selected: false +failedISel: false +tracksRegLiveness: true +registers: + - { id: 0, class: fpr, preferred-register: '' } + - { id: 1, class: fpr, preferred-register: '' } + - { id: 2, class: fpr, preferred-register: '' } + - { id: 3, class: fpr, preferred-register: '' } + - { id: 4, class: fpr, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: gpr, preferred-register: '' } + - { id: 10, class: gpr, preferred-register: '' } +liveins: +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: +stack: +constants: +body: | + bb.0 (%ir-block.0): + liveins: $d0, $d1, $d2, $d3 + + ; CHECK-LABEL: name: test_f64 + ; CHECK: liveins: $d0, $d1, $d2, $d3 + ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0 + ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub + ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.dsub + ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0 + ; CHECK: $q0 = COPY [[INSvi64lane]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:fpr(s64) = COPY $d0 + %1:fpr(s64) = COPY $d1 + %4:fpr(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64) + $q0 = COPY %4(<2 x s64>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_i32 +alignment: 2 +exposesReturnsTwice: false +legalized: true +regBankSelected: true +selected: false +failedISel: false +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: fpr, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } + - { id: 9, class: _, preferred-register: '' } + - { id: 10, class: _, preferred-register: '' } + - { id: 11, class: _, preferred-register: '' } + - { id: 12, class: _, preferred-register: '' } +liveins: +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: +stack: +constants: +body: | + bb.0 (%ir-block.0): + liveins: $w0, $w1, $w2, $w3 + + ; CHECK-LABEL: name: test_i32 + ; CHECK: liveins: $w0, $w1, $w2, $w3 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2 + ; CHECK: [[COPY3:%[0-9]+]]:gpr32 = COPY $w3 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub + ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[COPY1]] + ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr]], 2, [[COPY2]] + ; CHECK: [[INSvi32gpr2:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr1]], 3, [[COPY3]] + ; CHECK: $q0 = COPY [[INSvi32gpr2]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:gpr(s32) = COPY $w0 + %1:gpr(s32) = COPY $w1 + %2:gpr(s32) = COPY $w2 + %3:gpr(s32) = COPY $w3 + %4:fpr(<4 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32), %2(s32), %3(s32) + $q0 = COPY %4(<4 x s32>) + RET_ReallyLR implicit $q0 + +... +--- +name: test_i64 +alignment: 2 +exposesReturnsTwice: false +legalized: true +regBankSelected: true +selected: false +failedISel: false +tracksRegLiveness: true +registers: + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: gpr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } + - { id: 4, class: fpr, preferred-register: '' } + - { id: 5, class: _, preferred-register: '' } + - { id: 6, class: _, preferred-register: '' } + - { id: 7, class: _, preferred-register: '' } + - { id: 8, class: _, preferred-register: '' } +liveins: +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + stackProtector: '' + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false + localFrameSize: 0 + savePoint: '' + restorePoint: '' +fixedStack: +stack: +constants: +body: | + bb.0 (%ir-block.0): + liveins: $x0, $x1, $x2, $x3 + + ; CHECK-LABEL: name: test_i64 + ; CHECK: liveins: $x0, $x1, $x2, $x3 + ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF + ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub + ; CHECK: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[COPY1]] + ; CHECK: $q0 = COPY [[INSvi64gpr]] + ; CHECK: RET_ReallyLR implicit $q0 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = COPY $x1 + %4:fpr(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64) + $q0 = COPY %4(<2 x s64>) + RET_ReallyLR implicit $q0 + +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir @@ -58,12 +58,12 @@ liveins: $vgpr0 ; CHECK-LABEL: name: extract_vector_elt_0_v5i32 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[MV:%[0-9]+]]:_(<5 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<5 x s32>), [[C]](s32) ; CHECK: $vgpr0 = COPY [[EVEC]](s32) %0:_(s32) = COPY $vgpr0 - %1:_(<5 x s32>) = G_MERGE_VALUES %0, %0, %0, %0, %0 + %1:_(<5 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0 %2:_(s32) = G_CONSTANT i32 0 %3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2 $vgpr0 = COPY %3 @@ -77,12 +77,12 @@ liveins: $vgpr0 ; CHECK-LABEL: name: extract_vector_elt_0_v6i32 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[MV:%[0-9]+]]:_(<6 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<6 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<6 x s32>), [[C]](s32) ; CHECK: $vgpr0 = COPY [[EVEC]](s32) %0:_(s32) = COPY $vgpr0 - %1:_(<6 x s32>) = G_MERGE_VALUES %0, %0, %0, %0, %0, %0 + %1:_(<6 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0 %2:_(s32) = G_CONSTANT i32 0 %3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2 $vgpr0 = COPY %3 @@ -96,12 +96,12 @@ liveins: $vgpr0 ; CHECK-LABEL: name: extract_vector_elt_0_v7i32 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[MV:%[0-9]+]]:_(<7 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<7 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<7 x s32>), [[C]](s32) ; CHECK: $vgpr0 = COPY [[EVEC]](s32) %0:_(s32) = COPY $vgpr0 - %1:_(<7 x s32>) = G_MERGE_VALUES %0, %0, %0, %0, %0, %0, %0 + %1:_(<7 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0 %2:_(s32) = G_CONSTANT i32 0 %3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2 $vgpr0 = COPY %3 @@ -115,12 +115,12 @@ liveins: $vgpr0 ; CHECK-LABEL: name: extract_vector_elt_0_v8i32 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<8 x s32>), [[C]](s32) ; CHECK: $vgpr0 = COPY [[EVEC]](s32) %0:_(s32) = COPY $vgpr0 - %1:_(<8 x s32>) = G_MERGE_VALUES %0, %0, %0, %0, %0, %0, %0, %0 + %1:_(<8 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0, %0 %2:_(s32) = G_CONSTANT i32 0 %3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2 $vgpr0 = COPY %3 @@ -134,12 +134,12 @@ liveins: $vgpr0 ; CHECK-LABEL: name: extract_vector_elt_0_v16i32 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 - ; CHECK: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32) ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[MV]](<16 x s32>), [[C]](s32) ; CHECK: $vgpr0 = COPY [[EVEC]](s32) %0:_(s32) = COPY $vgpr0 - %1:_(<16 x s32>) = G_MERGE_VALUES %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0 + %1:_(<16 x s32>) = G_BUILD_VECTOR %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0, %0 %2:_(s32) = G_CONSTANT i32 0 %3:_(s32) = G_EXTRACT_VECTOR_ELT %1, %2 $vgpr0 = COPY %3 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values-build-vector.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values-build-vector.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values-build-vector.mir @@ -23,11 +23,11 @@ ; CHECK-LABEL: name: test_merge_s32_s32_v2s32 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32) ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](<2 x s32>) %0:_(s32) = G_CONSTANT i32 0 %1:_(s32) = G_CONSTANT i32 1 - %2:_(<2 x s32>) = G_MERGE_VALUES %0:_(s32), %1:_(s32) + %2:_(<2 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32) $vgpr0_vgpr1 = COPY %2(<2 x s32>) ... @@ -39,12 +39,12 @@ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 - ; CHECK: [[MV:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C]](s32), [[C1]](s32), [[C2]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C2]](s32) ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[MV]](<3 x s32>) %0:_(s32) = G_CONSTANT i32 0 %1:_(s32) = G_CONSTANT i32 1 %2:_(s32) = G_CONSTANT i32 2 - %3:_(<3 x s32>) = G_MERGE_VALUES %0:_(s32), %1:_(s32), %2:_(s32) + %3:_(<3 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %2:_(s32) $vgpr0_vgpr1_vgpr2 = COPY %3(<3 x s32>) ... @@ -55,11 +55,11 @@ ; CHECK-LABEL: name: test_merge_s64_s64_s128 ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 - ; CHECK: [[MV:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64) + ; CHECK: [[MV:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64) ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](<2 x s64>) %0:_(s64) = G_CONSTANT i64 0 %1:_(s64) = G_CONSTANT i64 1 - %2:_(<2 x s64>) = G_MERGE_VALUES %0(s64), %1(s64) + %2:_(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64) $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2(<2 x s64>) ... @@ -72,13 +72,13 @@ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2 ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 3 - ; CHECK: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[C]](s64), [[C1]](s64), [[C2]](s64), [[C3]](s64) + ; CHECK: [[MV:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64), [[C2]](s64), [[C3]](s64) ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV]](<4 x s64>) %0:_(s64) = G_CONSTANT i64 0 %1:_(s64) = G_CONSTANT i64 1 %2:_(s64) = G_CONSTANT i64 2 %3:_(s64) = G_CONSTANT i64 3 - %4:_(<4 x s64>) = G_MERGE_VALUES %0(s64), %1(s64), %2(s64), %3(s64) + %4:_(<4 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64), %2(s64), %3(s64) $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %4(<4 x s64>) ... @@ -109,6 +109,6 @@ # %16:_(s32) = G_CONSTANT i32 16 -# %17:_(<17 x s32>) = G_MERGE_VALUES %0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32), %10:_(s32), %11:_(s32), %12:_(s32), %13:_(s32), %14:_(s32), %15:_(s32), %16:_(s32) +# %17:_(<17 x s32>) = G_BUILD_VECTOR %0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32), %10:_(s32), %11:_(s32), %12:_(s32), %13:_(s32), %14:_(s32), %15:_(s32), %16:_(s32) # S_ENDPGM implicit %17(<17 x s32>) # ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir @@ -16,18 +16,3 @@ $vgpr2 = COPY %2(s32) ... ---- -name: test_unmerge_v2s32_s32 -body: | - bb.0: - liveins: $vgpr0_vgpr1 - ; CHECK-LABEL: name: test_unmerge_v2s32_s32 - ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 - ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) - ; CHECK: $vgpr0 = COPY [[UV]](s32) - ; CHECK: $vgpr2 = COPY [[UV1]](s32) - %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 - %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0:_(<2 x s32>) - $vgpr0 = COPY %1(s32) - $vgpr2 = COPY %2(s32) -... Index: test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll +++ test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll @@ -440,7 +440,7 @@ ; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $r0 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C0]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>) ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>) %vec = insertelement <1 x i32> undef, i32 %arg, i32 0 @@ -456,7 +456,7 @@ ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C0]](s32), [[C1]](s32) ; CHECK-DAG: [[V1:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32) ; CHECK-DAG: [[V2:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<3 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<3 x s32>) @@ -476,7 +476,7 @@ ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32) ; CHECK-DAG: [[V1:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32) ; CHECK-DAG: [[V2:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32) ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[V2]](<2 x s32>), [[UNDEF]], [[MASK]](<4 x s32>) @@ -499,7 +499,7 @@ ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK-DAG: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; CHECK-DAG: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 -; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32) +; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C3]](s32) ; CHECK-DAG: [[V1:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[UNDEF]], [[ARG1]](s32), [[C0]](s32) ; CHECK-DAG: [[V2:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[V1]], [[ARG2]](s32), [[C1]](s32) ; CHECK-DAG: [[V3:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[V2]], [[ARG3]](s32), [[C2]](s32) @@ -521,7 +521,7 @@ ; CHECK-LABEL: name: test_constantstruct_v2s32 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK: G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[C3]] %vec = extractvalue %struct.v2s32 {<2 x i32>}, 0 @@ -535,7 +535,7 @@ ; CHECK-LABEL: name: test_constantstruct_v2s32_s32_s32 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 -; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32) +; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C2]](s32) ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 Index: test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll =================================================================== --- test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll +++ test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll @@ -285,7 +285,7 @@ ; X32: liveins: $xmm0, $xmm1 ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 - ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) + ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>) ; X32: $xmm0 = COPY [[UV]](<4 x s32>) ; X32: $xmm1 = COPY [[UV1]](<4 x s32>) @@ -295,7 +295,7 @@ ; X64: liveins: $xmm0, $xmm1 ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 - ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) + ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>) ; X64: $xmm0 = COPY [[UV]](<4 x s32>) ; X64: $xmm1 = COPY [[UV1]](<4 x s32>) @@ -494,8 +494,8 @@ ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2 ; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 ; X32: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0, align 0) - ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) - ; X32: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>) + ; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) + ; X32: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>) ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>) ; X32: $xmm0 = COPY [[UV]](<4 x s32>) @@ -503,7 +503,7 @@ ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1 ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 - ; X32: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>) + ; X32: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>) ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp ; X32: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]] ; X32: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>) @@ -517,8 +517,8 @@ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2 ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm3 - ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) - ; X64: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>) + ; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>) + ; X64: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>) ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>) ; X64: $xmm0 = COPY [[UV]](<4 x s32>) @@ -526,7 +526,7 @@ ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1 ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0 ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1 - ; X64: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>) + ; X64: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>) ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp ; X64: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]] ; X64: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>) Index: test/CodeGen/X86/GlobalISel/legalize-add-v256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add-v256.mir +++ test/CodeGen/X86/GlobalISel/legalize-add-v256.mir @@ -49,8 +49,8 @@ ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]] ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]] - ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) - ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) + ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) + ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>) ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]] @@ -83,12 +83,12 @@ ; SSE2: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>) ; SSE2: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]] ; SSE2: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]] - ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) + ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>) ; AVX1: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>) ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]] - ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) + ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>) ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>) ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>) ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]] @@ -121,13 +121,13 @@ ; SSE2: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>) ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]] ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]] - ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) + ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>) ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>) ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>) ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]] - ; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) + ; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>) ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>) ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]] ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>) @@ -159,12 +159,12 @@ ; SSE2: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>) ; SSE2: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]] ; SSE2: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]] - ; SSE2: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>) + ; SSE2: [[MV:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>) ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>) ; AVX1: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>) ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]] ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]] - ; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>) + ; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>) ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>) ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>) ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]] Index: test/CodeGen/X86/GlobalISel/legalize-add-v512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-add-v512.mir +++ test/CodeGen/X86/GlobalISel/legalize-add-v512.mir @@ -51,13 +51,13 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]] ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]] - ; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) + ; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>) ; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>) ; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>) ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]] - ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>) + ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>) ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]] ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>) @@ -91,13 +91,13 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV5]] ; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]] - ; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>) + ; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>) ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>) ; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>) ; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>) ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]] - ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>) + ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>) ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]] ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>) @@ -131,7 +131,7 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV5]] ; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]] - ; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>) + ; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>) ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>) ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]] ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>) @@ -167,7 +167,7 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV5]] ; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]] - ; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>) + ; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>) ; AVX1: $zmm0 = COPY [[MV]](<8 x s64>) ; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]] ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>) @@ -215,16 +215,16 @@ ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]] ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]] ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]] - ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) - ; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) + ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>) + ; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>) ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>) ; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>) ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]] ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]] ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>) ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>) - ; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>) - ; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>) + ; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>) + ; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>) ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]] ; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>) ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>) @@ -234,8 +234,8 @@ %3(<32 x s8>) = COPY $ymm1 %4(<32 x s8>) = COPY $ymm2 %5(<32 x s8>) = COPY $ymm3 - %0(<64 x s8>) = G_MERGE_VALUES %2(<32 x s8>), %3(<32 x s8>) - %1(<64 x s8>) = G_MERGE_VALUES %4(<32 x s8>), %5(<32 x s8>) + %0(<64 x s8>) = G_CONCAT_VECTORS %2(<32 x s8>), %3(<32 x s8>) + %1(<64 x s8>) = G_CONCAT_VECTORS %4(<32 x s8>), %5(<32 x s8>) %6(<64 x s8>) = G_ADD %0, %1 %7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>) $ymm0 = COPY %7(<32 x s8>) Index: test/CodeGen/X86/GlobalISel/select-merge-vec256.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-merge-vec256.mir +++ test/CodeGen/X86/GlobalISel/select-merge-vec256.mir @@ -33,7 +33,7 @@ ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]] ; AVX512VL: RET 0, implicit $ymm0 %0(<4 x s32>) = IMPLICIT_DEF - %1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>) + %1(<8 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>) $ymm0 = COPY %1(<8 x s32>) RET 0, implicit $ymm0 Index: test/CodeGen/X86/GlobalISel/select-merge-vec512.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-merge-vec512.mir +++ test/CodeGen/X86/GlobalISel/select-merge-vec512.mir @@ -30,7 +30,7 @@ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]] ; ALL: RET 0, implicit $zmm0 %0(<4 x s32>) = IMPLICIT_DEF - %1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>) + %1(<16 x s32>) = G_CONCAT_VECTORS %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>) $zmm0 = COPY %1(<16 x s32>) RET 0, implicit $zmm0 @@ -53,7 +53,7 @@ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]] ; ALL: RET 0, implicit $zmm0 %0(<8 x s32>) = IMPLICIT_DEF - %1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>) + %1(<16 x s32>) = G_CONCAT_VECTORS %0(<8 x s32>), %0(<8 x s32>) $zmm0 = COPY %1(<16 x s32>) RET 0, implicit $zmm0