Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -756,6 +756,46 @@ widenScalarDst(MI, WideTy.getScalarType(), 0); return Legalized; } + case TargetOpcode::G_MERGE_VALUES: { + if (TypeIdx != 1) + return UnableToLegalize; + + unsigned DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (!DstTy.isScalar()) + return UnableToLegalize; + + unsigned NumSrc = MI.getNumOperands() - 1; + unsigned EltSize = DstTy.getSizeInBits() / NumSrc; + LLT EltTy = LLT::scalar(EltSize); + + unsigned ResultReg = MRI.createGenericVirtualRegister(DstTy); + unsigned Offset = 0; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I, + Offset += EltSize) { + assert(MRI.getType(MI.getOperand(I).getReg()) == EltTy); + + unsigned ShiftAmt = MRI.createGenericVirtualRegister(DstTy); + unsigned Shl = MRI.createGenericVirtualRegister(DstTy); + unsigned ZextInput = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildZExt(ZextInput, MI.getOperand(I).getReg()); + + if (Offset != 0) { + unsigned NextResult = I + 1 == E ? DstReg : + MRI.createGenericVirtualRegister(DstTy); + + MIRBuilder.buildConstant(ShiftAmt, Offset); + MIRBuilder.buildShl(Shl, ZextInput, ShiftAmt); + MIRBuilder.buildOr(NextResult, ResultReg, Shl); + ResultReg = NextResult; + } else { + ResultReg = ZextInput; + } + } + + MI.eraseFromParent(); + return Legalized; + } case TargetOpcode::G_UADDO: case TargetOpcode::G_USUBO: { if (TypeIdx == 1) Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -34,6 +34,7 @@ }; const LLT S1 = LLT::scalar(1); + const LLT S8 = LLT::scalar(8); const LLT S16 = LLT::scalar(16); const LLT S32 = LLT::scalar(32); const LLT S64 = LLT::scalar(64); @@ -195,11 +196,13 @@ .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, {S32, S1}, {S64, S1}, {S16, S1}, // FIXME: Hack - {S128, S32}}) + {S128, S32}, {S32, LLT::scalar(24)}}) .scalarize(0); getActionDefinitionsBuilder(G_TRUNC) .legalFor({{S32, S64}, {S16, S32}, {S16, S64}}) + // FIXME: Hack + .legalFor({{S8, S32}, {S1, S32}}) .scalarize(0); getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) @@ -444,6 +447,13 @@ }; getActionDefinitionsBuilder(Op) + .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) + // Clamp the little scalar to s8-s256 and make it a power of 2. It's not + // worth considering the multiples of 64 since 2*192 and 2*384 are not + // valid. + .clampScalar(LitTyIdx, S16, S256) + .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) + // Break up vectors with weird elements into scalars .fewerElementsIf( [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, @@ -470,12 +480,6 @@ } return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); }) - .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) - // Clamp the little scalar to s8-s256 and make it a power of 2. It's not - // worth considering the multiples of 64 since 2*192 and 2*384 are not - // valid. - .clampScalar(LitTyIdx, S16, S256) - .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) .legalIf([=](const LegalityQuery &Query) { const LLT &BigTy = Query.Types[BigTyIdx]; const LLT &LitTy = Query.Types[LitTyIdx]; Index: test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir @@ -0,0 +1,64 @@ +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: test_merge_s16_s8_s8 +body: | + bb.0: + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s16) = G_MERGE_VALUES %0, %1 + %3:_(s32) = G_ANYEXT %2 + $vgpr0 = COPY %3 +... + +--- +name: test_merge_s24_s8_s8_s8 +body: | + bb.0: + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s8) = G_CONSTANT i8 2 + %3:_(s24) = G_MERGE_VALUES %0, %1, %2 + %4:_(s32) = G_ANYEXT %3 + $vgpr0 = COPY %4 +... + +--- +name: test_merge_s32_s8_s8_s8_s8 +body: | + bb.0: + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s8) = G_CONSTANT i8 2 + %3:_(s8) = G_CONSTANT i8 3 + %4:_(s32) = G_MERGE_VALUES %0, %1, %2, %3 + $vgpr0 = COPY %4 +... + +--- +name: test_merge_s64_s32_s32 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s64) = G_MERGE_VALUES %0, %1 + $vgpr1_vgpr2 = COPY %2 +... + +--- +name: test_merge_s64_s16_s16_s16_s16 +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(s16) = G_TRUNC %0 + %5:_(s16) = G_TRUNC %1 + %6:_(s16) = G_TRUNC %2 + %7:_(s16) = G_TRUNC %3 + %8:_(s64) = G_MERGE_VALUES %4, %5, %6, %7 + $vgpr1_vgpr2 = COPY %8 +...