Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -2722,6 +2722,52 @@ return true; } +// Produce a vector of s16 elements from s32 pieces. +static void truncToS16Vector(MachineIRBuilder &B, Register DstReg, + ArrayRef UnmergeParts) { + const LLT S16 = LLT::scalar(16); + + SmallVector RemergeParts(UnmergeParts.size()); + for (int I = 0, E = UnmergeParts.size(); I != E; ++I) + RemergeParts[I] = B.buildTrunc(S16, UnmergeParts[I]).getReg(0); + + B.buildBuildVector(DstReg, RemergeParts); +} + +/// Convert a set of s32 registers to a result vector with s16 elements. +static void bitcastToS16Vector(MachineIRBuilder &B, Register DstReg, + ArrayRef UnmergeParts) { + MachineRegisterInfo &MRI = *B.getMRI(); + const LLT V2S16 = LLT::vector(2, 16); + LLT TargetTy = MRI.getType(DstReg); + int NumElts = UnmergeParts.size(); + + if (NumElts == 1) { + assert(TargetTy == V2S16); + B.buildBitcast(DstReg, UnmergeParts[0]); + return; + } + + SmallVector RemergeParts(NumElts); + for (int I = 0; I != NumElts; ++I) + RemergeParts[I] = B.buildBitcast(V2S16, UnmergeParts[I]).getReg(0); + + if (TargetTy.getSizeInBits() == 32u * NumElts) { + B.buildConcatVectors(DstReg, RemergeParts); + return; + } + + const LLT V3S16 = LLT::vector(3, 16); + const LLT V6S16 = LLT::vector(6, 16); + + // Widen to v6s16 and unpack v3 parts. + assert(TargetTy == V3S16); + + RemergeParts.push_back(B.buildUndef(V2S16).getReg(0)); + auto Concat = B.buildConcatVectors(V6S16, RemergeParts); + B.buildUnmerge({DstReg, MRI.createGenericVirtualRegister(V3S16)}, Concat); +} + // FIXME: Just vector trunc should be sufficent, but legalization currently // broken. static void repackUnpackedD16Load(MachineIRBuilder &B, Register DstReg, @@ -2781,6 +2827,7 @@ Register DstReg = MI.getOperand(0).getReg(); LLT Ty = MRI->getType(DstReg); + const LLT EltTy = Ty.getScalarType(); const bool IsD16 = Ty.getScalarType() == S16; const unsigned NumElts = Ty.isVector() ? Ty.getNumElements() : 1; @@ -2821,18 +2868,38 @@ // Insert after the instruction. B.setInsertPt(*MI.getParent(), ++MI.getIterator()); - // TODO: Should probably unmerge to s32 pieces and repack instead of using - // extracts. - if (RoundedTy == Ty) { - B.buildExtract(DstReg, TFEReg, 0); - } else { - // If we had to round the data type (i.e. this was a <3 x s16>), do the - // weird extract separately. - auto DataPart = B.buildExtract(RoundedTy, TFEReg, 0); - B.buildExtract(DstReg, DataPart, 0); + // Now figure out how to copy the new result register back into the old + // result. + + SmallVector UnmergeResults(TFETy.getNumElements(), Dst1Reg); + int NumDataElts = TFETy.getNumElements() - 1; + + if (!Ty.isVector()) { + // Simplest case is a trivial unmerge (plus a truncate for d16). + UnmergeResults[0] = Ty == S32 ? + DstReg : MRI->createGenericVirtualRegister(S32); + + B.buildUnmerge(UnmergeResults, TFEReg); + if (Ty != S32) + B.buildTrunc(DstReg, UnmergeResults[0]); + return true; } - B.buildExtract(Dst1Reg, TFEReg, RoundedTy.getSizeInBits()); + // We have to repack into a new vector of some kind. + for (int I = 0; I != NumDataElts; ++I) + UnmergeResults[I] = MRI->createGenericVirtualRegister(S32); + B.buildUnmerge(UnmergeResults, TFEReg); + + // Drop the final TFE element. + ArrayRef DataPart(UnmergeResults.data(), NumDataElts); + + if (EltTy == S32) + B.buildBuildVector(DstReg, DataPart); + else if (ST.hasUnpackedD16VMem()) + truncToS16Vector(B, DstReg, DataPart); + else + bitcastToS16Vector(B, DstReg, DataPart); + return true; } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.d16.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.d16.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.d16.ll @@ -210,10 +210,9 @@ ; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; UNPACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8") - ; UNPACKED: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 0 - ; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32 - ; UNPACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32) + ; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; UNPACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; UNPACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32) ; UNPACKED: $vgpr0 = COPY [[COPY10]](s32) ; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0 ; PACKED-LABEL: name: image_load_tfe_f16 @@ -232,11 +231,9 @@ ; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; PACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8") - ; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INT]](<2 x s32>), 0 - ; PACKED: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[EXTRACT]](<2 x s16>) - ; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32 - ; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; PACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; PACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; PACKED: [[COPY10:%[0-9]+]]:_(s32) = COPY [[UV]](s32) ; PACKED: $vgpr0 = COPY [[COPY10]](s32) ; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0 %res = call { half, i32 } @llvm.amdgcn.image.load.2d.sl_f16i32s.i32(i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) @@ -263,11 +260,12 @@ ; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; UNPACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8") - ; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[INT]](<3 x s32>), 0 - ; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[EXTRACT]](<2 x s32>), 0 - ; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64 - ; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; UNPACKED: $vgpr0 = COPY [[EXTRACT1]](<2 x s16>) + ; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>) + ; UNPACKED: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32) + ; UNPACKED: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32) + ; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16) + ; UNPACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; UNPACKED: $vgpr0 = COPY [[BUILD_VECTOR1]](<2 x s16>) ; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0 ; PACKED-LABEL: name: image_load_tfe_v2f16 ; PACKED: bb.1 (%ir-block.0): @@ -285,10 +283,10 @@ ; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; PACKED: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8") - ; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INT]](<2 x s32>), 0 - ; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32 - ; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; PACKED: $vgpr0 = COPY [[EXTRACT]](<2 x s16>) + ; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32) + ; PACKED: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; PACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>) ; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0 %res = call { <2 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v2f16i32s.i32(i32 3, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) %tex = extractvalue { <2 x half>, i32 } %res, 0 @@ -314,16 +312,22 @@ ; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; UNPACKED: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<3 x s32>) = G_EXTRACT [[INT]](<4 x s32>), 0 - ; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[EXTRACT]](<3 x s32>), 0 - ; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<4 x s32>), 96 - ; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; UNPACKED: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; UNPACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT1]](<3 x s16>), 0 - ; UNPACKED: [[EXTRACT3:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0 - ; UNPACKED: [[EXTRACT4:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32 - ; UNPACKED: $vgpr0 = COPY [[EXTRACT3]](<2 x s16>) - ; UNPACKED: $vgpr1 = COPY [[EXTRACT4]](<2 x s16>) + ; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; UNPACKED: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32) + ; UNPACKED: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32) + ; UNPACKED: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32) + ; UNPACKED: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16) + ; UNPACKED: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF1]](s16) + ; UNPACKED: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR1]](<2 x s16>), [[BUILD_VECTOR2]](<2 x s16>) + ; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0 + ; UNPACKED: G_STORE [[UV3]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; UNPACKED: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; UNPACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[EXTRACT]](<3 x s16>), 0 + ; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0 + ; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32 + ; UNPACKED: $vgpr0 = COPY [[EXTRACT1]](<2 x s16>) + ; UNPACKED: $vgpr1 = COPY [[EXTRACT2]](<2 x s16>) ; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; PACKED-LABEL: name: image_load_tfe_v3f16 ; PACKED: bb.1 (%ir-block.0): @@ -341,16 +345,19 @@ ; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; PACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; PACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[INT]](<3 x s32>), 0 - ; PACKED: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[EXTRACT]](<4 x s16>), 0 - ; PACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64 - ; PACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; PACKED: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; PACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF1]], [[EXTRACT1]](<3 x s16>), 0 - ; PACKED: [[EXTRACT3:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0 - ; PACKED: [[EXTRACT4:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32 - ; PACKED: $vgpr0 = COPY [[EXTRACT3]](<2 x s16>) - ; PACKED: $vgpr1 = COPY [[EXTRACT4]](<2 x s16>) + ; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>) + ; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32) + ; PACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV1]](s32) + ; PACKED: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF + ; PACKED: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST]](<2 x s16>), [[BITCAST1]](<2 x s16>), [[DEF1]](<2 x s16>) + ; PACKED: [[UV3:%[0-9]+]]:_(<3 x s16>), [[UV4:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s16>) + ; PACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; PACKED: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; PACKED: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF2]], [[UV3]](<3 x s16>), 0 + ; PACKED: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 0 + ; PACKED: [[EXTRACT1:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[INSERT]](<4 x s16>), 32 + ; PACKED: $vgpr0 = COPY [[EXTRACT]](<2 x s16>) + ; PACKED: $vgpr1 = COPY [[EXTRACT1]](<2 x s16>) ; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %res = call { <3 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v3f16i32s.i32(i32 7, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) %tex = extractvalue { <3 x half>, i32 } %res, 0 @@ -376,13 +383,16 @@ ; UNPACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; UNPACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; UNPACKED: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; UNPACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[INT]](<5 x s32>), 0 - ; UNPACKED: [[EXTRACT1:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[EXTRACT]](<4 x s32>), 0 - ; UNPACKED: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<5 x s32>), 128 - ; UNPACKED: G_STORE [[EXTRACT2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; UNPACKED: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[EXTRACT1]](<4 x s16>) - ; UNPACKED: $vgpr0 = COPY [[UV]](<2 x s16>) - ; UNPACKED: $vgpr1 = COPY [[UV1]](<2 x s16>) + ; UNPACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; UNPACKED: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32) + ; UNPACKED: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32) + ; UNPACKED: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV2]](s32) + ; UNPACKED: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[UV3]](s32) + ; UNPACKED: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; UNPACKED: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16) + ; UNPACKED: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[TRUNC3]](s16) + ; UNPACKED: $vgpr0 = COPY [[BUILD_VECTOR1]](<2 x s16>) + ; UNPACKED: $vgpr1 = COPY [[BUILD_VECTOR2]](<2 x s16>) ; UNPACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 ; PACKED-LABEL: name: image_load_tfe_v4f16 ; PACKED: bb.1 (%ir-block.0): @@ -400,12 +410,12 @@ ; PACKED: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; PACKED: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; PACKED: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; PACKED: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[INT]](<3 x s32>), 0 - ; PACKED: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64 - ; PACKED: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; PACKED: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s16>) - ; PACKED: $vgpr0 = COPY [[UV]](<2 x s16>) - ; PACKED: $vgpr1 = COPY [[UV1]](<2 x s16>) + ; PACKED: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>) + ; PACKED: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV]](s32) + ; PACKED: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[UV1]](s32) + ; PACKED: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; PACKED: $vgpr0 = COPY [[BITCAST]](<2 x s16>) + ; PACKED: $vgpr1 = COPY [[BITCAST1]](<2 x s16>) ; PACKED: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 %res = call { <4 x half>, i32 } @llvm.amdgcn.image.load.2d.sl_v4f16i32s.i32(i32 15, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) %tex = extractvalue { <4 x half>, i32 } %res, 0 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.load.2d.ll @@ -115,10 +115,9 @@ ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; GCN: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 1, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 8 from custom "TargetCustom8") - ; GCN: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 0 - ; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<2 x s32>), 32 - ; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; GCN: $vgpr0 = COPY [[EXTRACT]](s32) + ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; GCN: G_STORE [[UV1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GCN: $vgpr0 = COPY [[UV]](s32) ; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0 %res = call { float, i32 } @llvm.amdgcn.image.load.2d.sl_f32i32s.i32(i32 1, i32 %s, i32 %t, <8 x i32> %rsrc, i32 1, i32 0) %tex = extractvalue { float, i32 } %res, 0 @@ -144,10 +143,8 @@ ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; GCN: [[INT:%[0-9]+]]:_(<3 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 3, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; GCN: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[INT]](<3 x s32>), 0 - ; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<3 x s32>), 64 - ; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<2 x s32>) + ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<3 x s32>) + ; GCN: G_STORE [[UV2]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) ; GCN: $vgpr0 = COPY [[UV]](s32) ; GCN: $vgpr1 = COPY [[UV1]](s32) ; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 @@ -175,10 +172,8 @@ ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; GCN: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 7, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") - ; GCN: [[EXTRACT:%[0-9]+]]:_(<3 x s32>) = G_EXTRACT [[INT]](<4 x s32>), 0 - ; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<4 x s32>), 96 - ; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s32>) + ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GCN: G_STORE [[UV3]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) ; GCN: $vgpr0 = COPY [[UV]](s32) ; GCN: $vgpr1 = COPY [[UV1]](s32) ; GCN: $vgpr2 = COPY [[UV2]](s32) @@ -207,10 +202,8 @@ ; GCN: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) ; GCN: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF ; GCN: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 32 from custom "TargetCustom8") - ; GCN: [[EXTRACT:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT [[INT]](<5 x s32>), 0 - ; GCN: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[INT]](<5 x s32>), 128 - ; GCN: G_STORE [[EXTRACT1]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) - ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[EXTRACT]](<4 x s32>) + ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GCN: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) ; GCN: $vgpr0 = COPY [[UV]](s32) ; GCN: $vgpr1 = COPY [[UV1]](s32) ; GCN: $vgpr2 = COPY [[UV2]](s32)