Index: include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -28,6 +28,18 @@ MachineRegisterInfo &MRI; const LegalizerInfo &LI; + static bool isArtifactCast(unsigned Opc) { + switch (Opc) { + case TargetOpcode::G_TRUNC: + case TargetOpcode::G_SEXT: + case TargetOpcode::G_ZEXT: + case TargetOpcode::G_ANYEXT: + return true; + default: + return false; + } + } + public: LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI, const LegalizerInfo &LI) @@ -208,21 +220,33 @@ return false; unsigned NumDefs = MI.getNumOperands() - 1; + MachineInstr *SrcDef = + getDefIgnoringCopies(MI.getOperand(NumDefs).getReg(), MRI); + if (!SrcDef) + return false; LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg()); LLT DestTy = MRI.getType(MI.getOperand(0).getReg()); + MachineInstr *MergeI = SrcDef; + unsigned ConvertOp = 0; + + // Handle intermediate conversions + unsigned SrcOp = SrcDef->getOpcode(); + if (isArtifactCast(SrcOp)) { + ConvertOp = SrcOp; + MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI); + } + // FIXME: Handle scalarizing concat_vectors (scalar result type with vector + // source) unsigned MergingOpcode = getMergeOpcode(OpTy, DestTy); - MachineInstr *MergeI = - getOpcodeDef(MergingOpcode, MI.getOperand(NumDefs).getReg(), MRI); - - if (!MergeI) + if (!MergeI || MergeI->getOpcode() != MergingOpcode) return false; const unsigned NumMergeRegs = MergeI->getNumOperands() - 1; if (NumMergeRegs < NumDefs) { - if (NumDefs % NumMergeRegs != 0) + if (ConvertOp != 0 || NumDefs % NumMergeRegs != 0) return false; Builder.setInstr(MI); @@ -244,7 +268,7 @@ } } else if (NumMergeRegs > NumDefs) { - if (NumMergeRegs % NumDefs != 0) + if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0) return false; Builder.setInstr(MI); @@ -266,10 +290,22 @@ } } else { + LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg()); + if (ConvertOp) { + Builder.setInstr(MI); + + for (unsigned Idx = 0; Idx < NumDefs; ++Idx) { + Register MergeSrc = MergeI->getOperand(Idx + 1).getReg(); + Builder.buildInstr(ConvertOp, {MI.getOperand(Idx).getReg()}, + {MergeSrc}); + } + + markInstAndDefDead(MI, *MergeI, DeadInsts); + return true; + } // FIXME: is a COPY appropriate if the types mismatch? We know both // registers are allocatable by now. - if (MRI.getType(MI.getOperand(0).getReg()) != - MRI.getType(MergeI->getOperand(1).getReg())) + if (DestTy != MergeSrcTy) return false; for (unsigned Idx = 0; Idx < NumDefs; ++Idx) @@ -417,8 +453,10 @@ MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc); if (MRI.hasOneUse(PrevRegSrc)) { if (TmpDef != &DefMI) { - assert(TmpDef->getOpcode() == TargetOpcode::COPY && - "Expecting copy here"); + assert(TmpDef->getOpcode() == TargetOpcode::COPY || + isArtifactCast(TmpDef->getOpcode()) && + "Expecting copy or artifact cast here"); + DeadInsts.push_back(TmpDef); } } else Index: include/llvm/CodeGen/GlobalISel/Utils.h =================================================================== --- include/llvm/CodeGen/GlobalISel/Utils.h +++ include/llvm/CodeGen/GlobalISel/Utils.h @@ -15,6 +15,7 @@ #define LLVM_CODEGEN_GLOBALISEL_UTILS_H #include "llvm/ADT/StringRef.h" +#include "llvm/CodeGen/Register.h" namespace llvm { @@ -130,9 +131,12 @@ /// See if Reg is defined by an single def instruction that is /// Opcode. Also try to do trivial folding if it's a COPY with /// same types. Returns null otherwise. -MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg, +MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI); +MachineInstr *getDefIgnoringCopies(Register Reg, + const MachineRegisterInfo &MRI); + /// Returns an APFloat from Val converted to the appropriate size. APFloat getAPFloatFromSize(double Val, unsigned Size); Index: lib/CodeGen/GlobalISel/Utils.cpp =================================================================== --- lib/CodeGen/GlobalISel/Utils.cpp +++ lib/CodeGen/GlobalISel/Utils.cpp @@ -281,8 +281,8 @@ return MI->getOperand(1).getFPImm(); } -llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg, - const MachineRegisterInfo &MRI) { +llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg, + const MachineRegisterInfo &MRI) { auto *DefMI = MRI.getVRegDef(Reg); auto DstTy = MRI.getType(DefMI->getOperand(0).getReg()); if (!DstTy.isValid()) @@ -294,7 +294,14 @@ break; DefMI = MRI.getVRegDef(SrcReg); } - return DefMI->getOpcode() == Opcode ? DefMI : nullptr; + return DefMI; + ; +} + +llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg, + const MachineRegisterInfo &MRI) { + MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI); + return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr; } APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) { Index: test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-unmerge-values.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/artifact-combiner-unmerge-values.mir @@ -0,0 +1,484 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck %s + +--- +name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32) + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32) + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32) + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32) + ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s32) = G_ANYEXT %6(s1) + %9:_(s32) = G_ANYEXT %7(s1) + %10:_(<2 x s32>) = G_BUILD_VECTOR %8, %9 + %11:_(<2 x s1>) = G_TRUNC %10(<2 x s32>) + %12:_(s1), %13:_(s1) = G_UNMERGE_VALUES %11 + %14:_(s32) = G_SEXT %12 + %15:_(s32) = G_SEXT %13 + %16:_(<2 x s32>) = G_BUILD_VECTOR %14, %15 + $vgpr0_vgpr1 = COPY %16 + +... + +# Requires looking thorugh extra copies between the build_vector, +# trunc and unmerge. +--- +name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32_extra_copies +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32_extra_copies + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32) + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32) + ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32) + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32) + ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s32) = G_ANYEXT %6(s1) + %9:_(s32) = G_ANYEXT %7(s1) + %10:_(<2 x s32>) = G_BUILD_VECTOR %8, %9 + %11:_(<2 x s32>) = COPY %10 + %12:_(<2 x s1>) = G_TRUNC %11(<2 x s32>) + %13:_(<2 x s1>) = COPY %12 + %14:_(s1), %15:_(s1) = G_UNMERGE_VALUES %13 + %16:_(s32) = G_SEXT %14 + %17:_(s32) = G_SEXT %15 + %18:_(<2 x s32>) = G_BUILD_VECTOR %16, %17 + $vgpr0_vgpr1 = COPY %18 + +... + +--- +name: test_unmerge_values_s32_sext_v2s32_of_build_vector_v2s16 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_sext_v2s32_of_build_vector_v2s16 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ANYEXT]](s16) + ; CHECK: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[ANYEXT1]](s16) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT]](s32), [[SEXT1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s16) = G_ANYEXT %6 + %9:_(s16) = G_ANYEXT %7 + %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9 + %11:_(<2 x s32>) = G_SEXT %10 + %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11 + %14:_(<2 x s32>) = G_BUILD_VECTOR %12, %13 + $vgpr0_vgpr1 = COPY %14 + +... + +--- +name: test_unmerge_values_s32_zext_v2s32_of_build_vector_v2s16 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_zext_v2s32_of_build_vector_v2s16 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16) + ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZEXT]](s32), [[ZEXT1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s16) = G_ANYEXT %6(s1) + %9:_(s16) = G_ANYEXT %7(s1) + %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9 + %11:_(<2 x s32>) = G_ZEXT %10 + %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11 + %14:_(<2 x s32>) = G_BUILD_VECTOR %12(s32), %13(s32) + $vgpr0_vgpr1 = COPY %14(<2 x s32>) + +... + +--- +name: test_unmerge_values_s32_anyext_v2s32_of_build_vector_v2s16 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_anyext_v2s32_of_build_vector_v2s16 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s16) = G_ANYEXT %6(s1) + %9:_(s16) = G_ANYEXT %7(s1) + %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9 + %11:_(<2 x s32>) = G_ANYEXT %10 + %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11 + %14:_(<2 x s32>) = G_BUILD_VECTOR %12, %13 + $vgpr0_vgpr1 = COPY %14 + +... + +--- +name: test_unmerge_values_v2s16_zext_v4s32_of_build_vector_v4s16 + +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_v2s16_zext_v4s32_of_build_vector_v4s16 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1) + ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16) + ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16) + ; CHECK: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16) + ; CHECK: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ZEXT]](s32), [[ZEXT1]](s32), [[ZEXT2]](s32), [[ZEXT3]](s32) + ; CHECK: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s32>) + ; CHECK: S_ENDPGM 0, implicit [[UV4]](<2 x s16>), implicit [[UV5]](<2 x s16>), implicit [[UV6]](<2 x s16>), implicit [[UV7]](<2 x s16>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>) + %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>) + %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4 + %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5 + %8:_(s16) = G_ANYEXT %6 + %9:_(s16) = G_ANYEXT %7 + %10:_(<4 x s16>) = G_BUILD_VECTOR %8, %9, %8, %9 + %11:_(<4 x s32>) = G_ZEXT %10 + %12:_(<2 x s16>), %13:_(<2 x s16>), %14:_(<2 x s16>), %15:_(<2 x s16>) = G_UNMERGE_VALUES %11 + S_ENDPGM 0, implicit %12, implicit %13, implicit %14, implicit %15 + +... + +--- +name: test_unmerge_values_s1_trunc_v4s1_of_concat_vectors_v4s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v4s1_of_concat_vectors_v4s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s1), [[UV1:%[0-9]+]]:_(s1), [[UV2:%[0-9]+]]:_(s1), [[UV3:%[0-9]+]]:_(s1) = G_UNMERGE_VALUES [[TRUNC]](<4 x s1>) + ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s1) + ; CHECK: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s1) + ; CHECK: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s1) + ; CHECK: [[SEXT3:%[0-9]+]]:_(s32) = G_SEXT [[UV3]](s1) + ; CHECK: $vgpr0 = COPY [[SEXT]](s32) + ; CHECK: $vgpr1 = COPY [[SEXT1]](s32) + ; CHECK: $vgpr2 = COPY [[SEXT2]](s32) + ; CHECK: $vgpr3 = COPY [[SEXT3]](s32) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s1>) = G_TRUNC %2 + %4:_(s1), %5:_(s1), %6:_(s1), %7:_(s1) = G_UNMERGE_VALUES %3 + %8:_(s32) = G_SEXT %4 + %9:_(s32) = G_SEXT %5 + %10:_(s32) = G_SEXT %6 + %11:_(s32) = G_SEXT %7 + $vgpr0 = COPY %8 + $vgpr1 = COPY %9 + $vgpr2 = COPY %10 + $vgpr3 = COPY %11 +... + +--- +name: test_unmerge_values_s16_of_concat_vectors_v2s16_v2s16 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s16_of_concat_vectors_v2s16_v2s16 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>) + ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s16), implicit [[UV1]](s16), implicit [[UV2]](s16), implicit [[UV3]](s16) + %0:_(<2 x s16>) = COPY $vgpr0 + %1:_(<2 x s16>) = COPY $vgpr1 + %2:_(<4 x s16>) = G_CONCAT_VECTORS %0, %1 + %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %2 + S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6 +... + +--- +name: test_unmerge_values_s32_of_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr1_vgpr2 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr1_vgpr2 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %2 + S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6 +... + +--- +name: test_unmerge_values_s32_of_concat_vectors_v2s64_v2s64 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v2s64_v2s64 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32), implicit [[UV4]](s32), implicit [[UV5]](s32), implicit [[UV6]](s32), implicit [[UV7]](s32) + %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1 + %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32), %10:_(s32) = G_UNMERGE_VALUES %2 + S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9, implicit %10 +... + +--- +name: test_unmerge_values_s32_of_trunc_concat_vectors_v2s64_v2s64 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_of_trunc_concat_vectors_v2s64_v2s64 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>) + ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s32>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s64>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<4 x s32>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32) + %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s32>) = G_TRUNC %2 + %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7 +... + +--- +name: test_unmerge_values_s64_of_sext_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s64_of_sext_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[UV]](s32) + ; CHECK: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[UV1]](s32) + ; CHECK: [[SEXT2:%[0-9]+]]:_(s64) = G_SEXT [[UV2]](s32) + ; CHECK: [[SEXT3:%[0-9]+]]:_(s64) = G_SEXT [[UV3]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[SEXT]](s64), [[SEXT1]](s64), [[SEXT2]](s64), [[SEXT3]](s64) + ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>) + ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s64>) = G_SEXT %2 + %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7 +... + +--- +name: test_unmerge_values_s64_of_zext_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s64_of_zext_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[UV]](s32) + ; CHECK: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[UV1]](s32) + ; CHECK: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[UV2]](s32) + ; CHECK: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[UV3]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ZEXT]](s64), [[ZEXT1]](s64), [[ZEXT2]](s64), [[ZEXT3]](s64) + ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>) + ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s64>) = G_ZEXT %2 + %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7 +... + +--- +name: test_unmerge_values_s64_of_anyext_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s64_of_anyext_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32) + ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[UV2]](s32) + ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[UV3]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64), [[ANYEXT2]](s64), [[ANYEXT3]](s64) + ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>) + ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s64>) = G_ANYEXT %2 + %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7 +... + +--- +name: test_unmerge_values_s8_of_trunc_v4s16_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s8_of_trunc_v4s16_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s8), implicit [[UV1]](s8), implicit [[UV2]](s8), implicit [[UV3]](s8), implicit [[UV4]](s8), implicit [[UV5]](s8), implicit [[UV6]](s8), implicit [[UV7]](s8) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s16>) = G_TRUNC %2 + %4:_(s8), %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8), %10:_(s8), %11:_(s8) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9, implicit %10, implicit %11 +... + +--- +name: test_unmerge_values_s16_of_anyext_v4s64_concat_vectors_v2s32_v2s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s16_of_anyext_v4s64_concat_vectors_v2s32_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32) + ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32) + ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[UV2]](s32) + ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[UV3]](s32) + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64), [[ANYEXT2]](s64), [[ANYEXT3]](s64) + ; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16), [[UV16:%[0-9]+]]:_(s16), [[UV17:%[0-9]+]]:_(s16), [[UV18:%[0-9]+]]:_(s16), [[UV19:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>) + ; CHECK: S_ENDPGM 0, implicit [[UV4]](s16), implicit [[UV5]](s16), implicit [[UV6]](s16), implicit [[UV7]](s16) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1 + %3:_(<4 x s64>) = G_ANYEXT %2 + %4:_(s16), %5:_(s16), %6:_(s16), %7:_(s16), %8:_(s16), %9:_(s16), %10:_(s16), %11:_(s16), %12:_(s16), %13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16), %17:_(s16), %18:_(s16), %19:_(s16) = G_UNMERGE_VALUES %3 + S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7 +... + +# FIXME: Handle this +--- +name: test_unmerge_values_s32_of_concat_vectors_v4s32_v4s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v4s32_v4s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5 + ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr6_vgpr7 + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>), [[COPY2]](<2 x s32>), [[COPY3]](<2 x s32>) + ; CHECK: [[TRUNC:%[0-9]+]]:_(<8 x s16>) = G_TRUNC [[CONCAT_VECTORS]](<8 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<8 x s16>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<2 x s32>) = COPY $vgpr4_vgpr5 + %3:_(<2 x s32>) = COPY $vgpr6_vgpr7 + %4:_(<8 x s32>) = G_CONCAT_VECTORS %0, %1, %2, %3 + %5:_(<8 x s16>) = G_TRUNC %4 + %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %5 + S_ENDPGM 0, implicit %6, implicit %7, implicit %8, implicit %9 +... + +--- +name: test_unmerge_values_s64_of_build_vector_v4s32 +body: | + bb.0: + ; CHECK-LABEL: name: test_unmerge_values_s64_of_build_vector_v4s32 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32) + ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>) + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>) + ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(<4 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3 + %5:_(<4 x s16>) = G_TRUNC %4 + %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5 + S_ENDPGM 0, implicit %6, implicit %7 +...