Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -1358,18 +1358,62 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - unsigned Opc = MI.getOpcode(); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Flags = MI.getFlags(); - unsigned Size = MRI.getType(DstReg).getSizeInBits(); - int NumParts = Size / NarrowSize; - // FIXME: Don't know how to handle the situation where the small vectors - // aren't all the same size yet. - if (Size % NarrowSize != 0) - return UnableToLegalize; + const unsigned Opc = MI.getOpcode(); + const unsigned NarrowSize = NarrowTy.getSizeInBits(); + const unsigned DstReg = MI.getOperand(0).getReg(); + const unsigned Flags = MI.getFlags(); + const LLT DstTy = MRI.getType(DstReg); + const unsigned Size = DstTy.getSizeInBits(); + const int NumParts = Size / NarrowSize; + const LLT EltTy = DstTy.getElementType(); + const unsigned EltSize = EltTy.getSizeInBits(); + const unsigned NumOps = MI.getNumOperands() - 1; + + if (NarrowSize * NumParts != Size) { + // Only handle the case where the leftover is one element. + if (NarrowSize * NumParts + EltSize != Size) + return UnableToLegalize; + + unsigned AccumDstReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildUndef(AccumDstReg); + + for (unsigned Offset = 0; Offset < Size; Offset += NarrowSize) { + if (Offset + NarrowSize < Size) { + SmallVector SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + unsigned PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset); + SrcOps.push_back(PartOpReg); + } + + unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + + unsigned PartInsertReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset); + AccumDstReg = PartInsertReg; + Offset += NarrowSize; + } + } + + const unsigned ExtraOffset = NarrowSize * NumParts; + assert(ExtraOffset + EltSize == Size); + + SmallVector SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + unsigned PartOpReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), ExtraOffset); + SrcOps.push_back(PartOpReg); + } + + unsigned PartDstReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, ExtraOffset); + MI.eraseFromParent(); + + return Legalized; + } - unsigned NumOps = MI.getNumOperands() - 1; SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs); Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -158,11 +158,24 @@ setAction({G_FRAME_INDEX, PrivatePtr}, Legal); - getActionDefinitionsBuilder( + auto &FPOpActions = getActionDefinitionsBuilder( { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA}) - .legalFor({S32, S64}) + .legalFor({S32, S64}); + + if (ST.has16BitInsts()) { + FPOpActions + .legalFor({S16}); + } + + if (ST.hasVOP3PInsts()) { + FPOpActions + .legalFor({V2S16}) + .clampMaxNumElements(0, S16, 2); + } + + FPOpActions .scalarize(0) - .clampScalar(0, S32, S64); + .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64); getActionDefinitionsBuilder(G_FPTRUNC) .legalFor({{S32, S64}, {S16, S32}}); @@ -365,8 +378,8 @@ .legalIf([=](const LegalityQuery &Query) { const LLT &Ty0 = Query.Types[0]; const LLT &Ty1 = Query.Types[1]; - return (Ty0.getSizeInBits() % 32 == 0) && - (Ty1.getSizeInBits() % 32 == 0); + return (Ty0.getSizeInBits() % 16 == 0) && + (Ty1.getSizeInBits() % 16 == 0); }); getActionDefinitionsBuilder(G_BUILD_VECTOR) Index: test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir @@ -72,6 +72,31 @@ $vgpr0_vgpr1 = COPY %2 ... +--- +name: test_and_v3i32 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_and_v3i32 + ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + ; CHECK: [[DEF:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[COPY]](<3 x s32>), 0 + ; CHECK: [[EXTRACT1:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[COPY1]](<3 x s32>), 0 + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[EXTRACT]], [[EXTRACT1]] + ; CHECK: [[INSERT:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[DEF]], [[AND]](<2 x s32>), 0 + ; CHECK: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](<3 x s32>), 64 + ; CHECK: [[EXTRACT3:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<3 x s32>), 64 + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[EXTRACT2]], [[EXTRACT3]] + ; CHECK: [[INSERT1:%[0-9]+]]:_(<3 x s32>) = G_INSERT [[INSERT]], [[AND1]](s32), 64 + ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](<3 x s32>) + %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + %2:_(<3 x s32>) = G_AND %0, %1 + $vgpr0_vgpr1_vgpr2 = COPY %2 +... + --- name: test_and_v4i32 body: | @@ -93,6 +118,34 @@ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2 ... +--- +name: test_and_v5i32 +body: | + bb.0: + + ; CHECK-LABEL: name: test_and_v5i32 + ; CHECK: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[DEF1:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[DEF2:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[DEF]](<5 x s32>), 0 + ; CHECK: [[EXTRACT1:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[DEF1]](<5 x s32>), 0 + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[EXTRACT]], [[EXTRACT1]] + ; CHECK: [[INSERT:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[DEF2]], [[AND]](<2 x s32>), 0 + ; CHECK: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[DEF]](<5 x s32>), 128 + ; CHECK: [[EXTRACT3:%[0-9]+]]:_(s32) = G_EXTRACT [[DEF1]](<5 x s32>), 128 + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[EXTRACT2]], [[EXTRACT3]] + ; CHECK: [[INSERT1:%[0-9]+]]:_(<5 x s32>) = G_INSERT [[INSERT]], [[AND1]](s32), 128 + ; CHECK: [[DEF3:%[0-9]+]]:_(<8 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[INSERT2:%[0-9]+]]:_(<8 x s32>) = G_INSERT [[DEF3]], [[INSERT1]](<5 x s32>), 0 + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](<8 x s32>) + %0:_(<5 x s32>) = G_IMPLICIT_DEF + %1:_(<5 x s32>) = G_IMPLICIT_DEF + %2:_(<5 x s32>) = G_AND %0, %1 + %3:_(<8 x s32>) = G_IMPLICIT_DEF + %4:_(<8 x s32>) = G_INSERT %3, %2, 0 + $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %4 +... + --- name: test_and_i8 body: | Index: test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-fma.mir @@ -91,12 +91,8 @@ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32) - ; VI: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16) - ; VI: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16) - ; VI: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16) - ; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; VI: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) + ; VI: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]] + ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16) ; VI: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: test_fma_s16 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -105,12 +101,8 @@ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32) - ; GFX9: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16) - ; GFX9: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16) - ; GFX9: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16) - ; GFX9: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; GFX9: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16) + ; GFX9: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16) ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 @@ -349,37 +341,16 @@ ; VI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) ; VI: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) ; VI: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<2 x s16>) - ; VI: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; VI: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; VI: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; VI: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; VI: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; VI: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; VI: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; VI: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16) + ; VI: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[UV]], [[UV2]], [[UV4]] + ; VI: [[FMA1:%[0-9]+]]:_(s16) = G_FMA [[UV1]], [[UV3]], [[UV5]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMA]](s16), [[FMA1]](s16) ; VI: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) ; GFX9-LABEL: name: test_fma_v2s16 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 ; GFX9: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) - ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) - ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<2 x s16>) - ; GFX9: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; GFX9: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; GFX9: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; GFX9: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; GFX9: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; GFX9: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; GFX9: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; GFX9: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; GFX9: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; GFX9: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16) - ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) + ; GFX9: [[FMA:%[0-9]+]]:_(<2 x s16>) = G_FMA [[COPY]], [[COPY1]], [[COPY2]] + ; GFX9: $vgpr0 = COPY [[FMA]](<2 x s16>) %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s16>) = COPY $vgpr1 %2:_(<2 x s16>) = COPY $vgpr2 @@ -423,47 +394,27 @@ ; VI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<3 x s16>) ; VI: [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<3 x s16>) ; VI: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF2]](<3 x s16>) - ; VI: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; VI: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; VI: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) - ; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; VI: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; VI: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; VI: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; VI: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) - ; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; VI: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; VI: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; VI: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; VI: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) - ; VI: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] - ; VI: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16) + ; VI: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[UV]], [[UV3]], [[UV6]] + ; VI: [[FMA1:%[0-9]+]]:_(s16) = G_FMA [[UV1]], [[UV4]], [[UV7]] + ; VI: [[FMA2:%[0-9]+]]:_(s16) = G_FMA [[UV2]], [[UV5]], [[UV8]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[FMA]](s16), [[FMA1]](s16), [[FMA2]](s16) ; VI: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s16>) ; GFX9-LABEL: name: test_fma_v3s16 ; GFX9: [[DEF:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF ; GFX9: [[DEF2:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<3 x s16>) - ; GFX9: [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<3 x s16>) - ; GFX9: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF2]](<3 x s16>) - ; GFX9: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; GFX9: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; GFX9: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) - ; GFX9: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; GFX9: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; GFX9: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; GFX9: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; GFX9: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) - ; GFX9: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; GFX9: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; GFX9: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; GFX9: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; GFX9: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) - ; GFX9: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] - ; GFX9: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16) - ; GFX9: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s16>) + ; GFX9: [[DEF3:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[DEF]](<3 x s16>), 0 + ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[DEF1]](<3 x s16>), 0 + ; GFX9: [[EXTRACT2:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[DEF2]](<3 x s16>), 0 + ; GFX9: [[FMA:%[0-9]+]]:_(<2 x s16>) = G_FMA [[EXTRACT]], [[EXTRACT1]], [[EXTRACT2]] + ; GFX9: [[INSERT:%[0-9]+]]:_(<3 x s16>) = G_INSERT [[DEF3]], [[FMA]](<2 x s16>), 0 + ; GFX9: [[EXTRACT3:%[0-9]+]]:_(s16) = G_EXTRACT [[DEF]](<3 x s16>), 32 + ; GFX9: [[EXTRACT4:%[0-9]+]]:_(s16) = G_EXTRACT [[DEF1]](<3 x s16>), 32 + ; GFX9: [[EXTRACT5:%[0-9]+]]:_(s16) = G_EXTRACT [[DEF2]](<3 x s16>), 32 + ; GFX9: [[FMA1:%[0-9]+]]:_(s16) = G_FMA [[EXTRACT3]], [[EXTRACT4]], [[EXTRACT5]] + ; GFX9: [[INSERT1:%[0-9]+]]:_(<3 x s16>) = G_INSERT [[INSERT]], [[FMA1]](s16), 32 + ; GFX9: S_NOP 0, implicit [[INSERT1]](<3 x s16>) %0:_(<3 x s16>) = G_IMPLICIT_DEF %1:_(<3 x s16>) = G_IMPLICIT_DEF %2:_(<3 x s16>) = G_IMPLICIT_DEF @@ -513,57 +464,23 @@ ; VI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) ; VI: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) ; VI: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>) - ; VI: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; VI: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; VI: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) - ; VI: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; VI: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; VI: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; VI: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; VI: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16) - ; VI: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; VI: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; VI: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; VI: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) - ; VI: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16) - ; VI: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] - ; VI: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) - ; VI: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; VI: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) - ; VI: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16) - ; VI: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]] - ; VI: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32) - ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16) + ; VI: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[UV]], [[UV4]], [[UV8]] + ; VI: [[FMA1:%[0-9]+]]:_(s16) = G_FMA [[UV1]], [[UV5]], [[UV9]] + ; VI: [[FMA2:%[0-9]+]]:_(s16) = G_FMA [[UV2]], [[UV6]], [[UV10]] + ; VI: [[FMA3:%[0-9]+]]:_(s16) = G_FMA [[UV3]], [[UV7]], [[UV11]] + ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FMA]](s16), [[FMA1]](s16), [[FMA2]](s16), [[FMA3]](s16) ; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) ; GFX9-LABEL: name: test_fma_v4s16 ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 ; GFX9: [[COPY2:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr4_vgpr5 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) - ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) - ; GFX9: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>) - ; GFX9: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16) - ; GFX9: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16) - ; GFX9: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16) - ; GFX9: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FPEXT]], [[FPEXT1]], [[FPEXT2]] - ; GFX9: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA]](s32) - ; GFX9: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16) - ; GFX9: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16) - ; GFX9: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16) - ; GFX9: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FPEXT3]], [[FPEXT4]], [[FPEXT5]] - ; GFX9: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA1]](s32) - ; GFX9: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16) - ; GFX9: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16) - ; GFX9: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16) - ; GFX9: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FPEXT6]], [[FPEXT7]], [[FPEXT8]] - ; GFX9: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA2]](s32) - ; GFX9: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16) - ; GFX9: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16) - ; GFX9: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16) - ; GFX9: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FPEXT9]], [[FPEXT10]], [[FPEXT11]] - ; GFX9: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMA3]](s32) - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16) - ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; GFX9: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY2]](<4 x s16>) + ; GFX9: [[FMA:%[0-9]+]]:_(<2 x s16>) = G_FMA [[UV]], [[UV2]], [[UV4]] + ; GFX9: [[FMA1:%[0-9]+]]:_(<2 x s16>) = G_FMA [[UV1]], [[UV3]], [[UV5]] + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[FMA]](<2 x s16>), [[FMA1]](<2 x s16>) + ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) %0:_(<4 x s16>) = COPY $vgpr0_vgpr1 %1:_(<4 x s16>) = COPY $vgpr2_vgpr3 %2:_(<4 x s16>) = COPY $vgpr4_vgpr5