Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -3436,32 +3436,152 @@ B.buildBuildVector(DstReg, RemergeParts); } +/// Turn a set of s16 typed registers in \p A16AddrRegs into a dword sized +/// vector with s16 typed elements. +static void packImageA16AddressToDwords(MachineIRBuilder &B, + MachineInstr &MI, + SmallVectorImpl &PackedAddrs, + int DimIdx, + int NumVAddrs) { + const LLT S16 = LLT::scalar(16); + const LLT V2S16 = LLT::vector(2, 16); + + SmallVector A16AddrRegs; + A16AddrRegs.resize(NumVAddrs); + + for (int I = 0; I != NumVAddrs; ++I) { + A16AddrRegs[I] = MI.getOperand(DimIdx + I).getReg(); + assert(B.getMRI()->getType(A16AddrRegs[I]) == S16); + } + + // Round to dword. + if (NumVAddrs % 2 != 0) + A16AddrRegs.push_back(B.buildUndef(S16).getReg(0)); + + PackedAddrs.resize(A16AddrRegs.size() / 2); + for (int I = 0, E = PackedAddrs.size(); I != E; ++I) { + PackedAddrs[I] = B.buildBuildVector( + V2S16, {A16AddrRegs[2 * I], A16AddrRegs[2 * I + 1]}).getReg(0); + } +} + +// Return number of address operands in an image intrinsic. +static int getImageNumVAddr(const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr, + const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode) { + const AMDGPU::MIMGDimInfo *DimInfo + = AMDGPU::getMIMGDimInfo(ImageDimIntr->Dim); + + int NumGradients = BaseOpcode->Gradients ? DimInfo->NumGradients : 0; + int NumCoords = BaseOpcode->Coordinates ? DimInfo->NumCoords : 0; + int NumLCM = BaseOpcode->LodOrClampOrMip ? 1 : 0; + return BaseOpcode->NumExtraArgs + NumGradients + NumCoords + NumLCM; +} + +/// Return first address operand index in an image intrinsic. +static int getImageVAddrIdxBegin(const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode, + int NumDefs) { + if (BaseOpcode->Atomic) + return NumDefs + 1 + (BaseOpcode->AtomicX2 ? 2 : 1); + + int DMaskIdx = NumDefs + 1 + (BaseOpcode->Store ? 1 : 0); + return DMaskIdx + 1; +} + +/// Rewrite image intrinsics to use register layouts expected by the subtarget. +/// +/// Depending on the subtarget, load/store with 16-bit element data need to be +/// rewritten to use the low half of 32-bit registers, or directly use a packed +/// layout. 16-bit addresses should also sometimes be packed into 32-bit +/// registers. +/// +/// We don't want to directly select image instructions just yet, but also want +/// to exposes all register repacking to the legalizer/combiners. We also don't +/// want a selected instrution entering RegBankSelect. In order to avoid +/// defining a multitude of intermediate image instructions, directly hack on +/// the intrinsic's arguments. In cases like a16 addreses, this requires padding +/// now unnecessary arguments with $noreg. bool AMDGPULegalizerInfo::legalizeImageIntrinsic( MachineInstr &MI, MachineIRBuilder &B, GISelChangeObserver &Observer, const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr) const { - bool IsTFE = MI.getNumExplicitDefs() == 2; - + const int NumDefs = MI.getNumExplicitDefs(); + bool IsTFE = NumDefs == 2; // We are only processing the operands of d16 image operations on subtargets // that use the unpacked register layout, or need to repack the TFE result. - // TODO: Need to handle a16 images too // TODO: Do we need to guard against already legalized intrinsics? - if (!IsTFE && !ST.hasUnpackedD16VMem()) - return true; - const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = AMDGPU::getMIMGBaseOpcodeInfo(ImageDimIntr->BaseOpcode); - if (BaseOpcode->Atomic) // No d16 atomics, or TFE. - return true; - B.setInstr(MI); MachineRegisterInfo *MRI = B.getMRI(); const LLT S32 = LLT::scalar(32); const LLT S16 = LLT::scalar(16); + // Index of first address argument + const int AddrIdx = getImageVAddrIdxBegin(BaseOpcode, NumDefs); + + // Check for 16 bit addresses and pack if true. + int DimIdx = AddrIdx + BaseOpcode->NumExtraArgs; + LLT AddrTy = MRI->getType(MI.getOperand(DimIdx).getReg()); + const bool IsA16 = AddrTy == S16; + + // TODO: Handle NSA vs. non-NSA for non-a16 case. + + // Rewrite the addressing register layout before doing anything else. + if (IsA16) { + if (!ST.hasFeature(AMDGPU::FeatureR128A16)) + return false; + + const int NumVAddrs = getImageNumVAddr(ImageDimIntr, BaseOpcode); + + // If the register allocator cannot place the address registers contiguously + // without introducing moves, then using the non-sequential address encoding + // is always preferable, since it saves VALU instructions and is usually a + // wash in terms of code size or even better. + // + // However, we currently have no way of hinting to the register allocator + // that MIMG addresses should be placed contiguously when it is possible to + // do so, so force non-NSA for the common 2-address case as a heuristic. + // + // SIShrinkInstructions will convert NSA encodings to non-NSA after register + // allocation when possible. + const bool UseNSA = NumVAddrs >= 3 && + ST.hasFeature(AMDGPU::FeatureNSAEncoding); + + if (NumVAddrs > 1) { + SmallVector PackedRegs; + packImageA16AddressToDwords(B, MI, PackedRegs, DimIdx, NumVAddrs); + + if (!UseNSA && PackedRegs.size() > 1) { + LLT PackedAddrTy = LLT::vector(2 * PackedRegs.size(), 16); + auto Concat = B.buildConcatVectors(PackedAddrTy, PackedRegs); + PackedRegs[0] = Concat.getReg(0); + PackedRegs.resize(1); + } + + // FIXME: We'll notify the observer multiple times if there are further + // modifications later. + Observer.changingInstr(MI); + + const int NumPacked = PackedRegs.size(); + for (int I = 0; I != NumVAddrs; ++I) { + assert(MI.getOperand(DimIdx + I).getReg() != AMDGPU::NoRegister); + + if (I < NumPacked) + MI.getOperand(DimIdx + I).setReg(PackedRegs[I]); + else + MI.getOperand(DimIdx + I).setReg(AMDGPU::NoRegister); + } + + Observer.changedInstr(MI); + } + } + + if (BaseOpcode->Atomic) // No d16 atomics, or TFE. + return true; + if (BaseOpcode->Store) { // No TFE for stores? Register VData = MI.getOperand(1).getReg(); LLT Ty = MRI->getType(VData); @@ -3470,9 +3590,13 @@ B.setInstr(MI); - Observer.changingInstr(MI); - MI.getOperand(1).setReg(handleD16VData(B, *MRI, VData)); - Observer.changedInstr(MI); + Register RepackedReg = handleD16VData(B, *MRI, VData); + if (RepackedReg != VData) { + Observer.changingInstr(MI); + MI.getOperand(1).setReg(RepackedReg); + Observer.changedInstr(MI); + } + return true; } @@ -3557,7 +3681,7 @@ } // Must be an image load. - if (!Ty.isVector() || Ty.getElementType() != S16) + if (!ST.hasUnpackedD16VMem() || !Ty.isVector() || Ty.getElementType() != S16) return true; B.setInsertPt(*MI.getParent(), ++MI.getIterator()); Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.atomic.dim.a16.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.atomic.dim.a16.ll @@ -0,0 +1,1201 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+r128-a16 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s + +define amdgpu_ps float @atomic_swap_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_swap_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_swap_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.swap.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_add_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_sub_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_sub_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.sub.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_sub_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.sub.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_smin_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_smin_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.smin.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_smin_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.smin.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + + +define amdgpu_ps float @atomic_umin_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_umin_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.umin.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_umin_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.umin.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_smax_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_smax_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.smax.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_smax_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.smax.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.smax.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_umax_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_umax_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.umax.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_umax_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.umax.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.umax.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_and_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_and_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.and.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_and_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.and.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.and.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_or_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_or_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.or.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_or_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.or.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.or.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_xor_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_xor_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.xor.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_xor_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.xor.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.xor.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_inc_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_inc_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.inc.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_inc_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.inc.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.inc.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_dec_1d(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_dec_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.dec.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_dec_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.dec.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.dec.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_cmpswap_1d(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i16 %s) { + ; GFX9-LABEL: name: atomic_cmpswap_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.1d), [[COPY8]](s32), [[COPY9]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_cmpswap_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.1d), [[COPY8]](s32), [[COPY9]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i16(i32 %cmp, i32 %swap, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_2d(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t) { + ; GFX9-LABEL: name: atomic_add_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2d), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2d), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.2d.i32.i16(i32 %data, i16 %s, i16 %t, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_3d(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %r) { + ; GFX9-LABEL: name: atomic_add_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.3d), [[COPY8]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.3d), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.3d.i32.i16(i32 %data, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_cube(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %face) { + ; GFX9-LABEL: name: atomic_add_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.cube), [[COPY8]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.cube), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.cube.i32.i16(i32 %data, i16 %s, i16 %t, i16 %face, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_1darray(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %slice) { + ; GFX9-LABEL: name: atomic_add_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1darray), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY11]](s32), [[COPY12]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1darray), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.1darray.i32.i16(i32 %data, i16 %s, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_2darray(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %slice) { + ; GFX9-LABEL: name: atomic_add_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2darray), [[COPY8]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2darray), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.2darray.i32.i16(i32 %data, i16 %s, i16 %t, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_2dmsaa(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %fragid) { + ; GFX9-LABEL: name: atomic_add_2dmsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2dmsaa), [[COPY8]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_2dmsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2dmsaa), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.2dmsaa.i32.i16(i32 %data, i16 %s, i16 %t, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_2darraymsaa(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %slice, i16 %fragid) { + ; GFX9-LABEL: name: atomic_add_2darraymsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2darraymsaa), [[COPY8]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_2darraymsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY9]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[COPY16]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.2darraymsaa), [[COPY8]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.2darraymsaa.i32.i16(i32 %data, i16 %s, i16 %t, i16 %slice, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_add_1d_slc(<8 x i32> inreg %rsrc, i32 %data, i16 %s) { + ; GFX9-LABEL: name: atomic_add_1d_slc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_add_1d_slc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY9]](s32) + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.add.1d), [[COPY8]](s32), [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.add.1d.i32.i16(i32 %data, i16 %s, <8 x i32> %rsrc, i32 0, i32 2) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_cmpswap_2d(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i16 %s, i16 %t) { + ; GFX9-LABEL: name: atomic_cmpswap_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.2d), [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_cmpswap_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.2d), [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.2d.i32.i16(i32 %cmp, i32 %swap, i16 %s, i16 %t, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_cmpswap_3d(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i16 %s, i16 %t, i16 %r) { + ; GFX9-LABEL: name: atomic_cmpswap_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.3d), [[COPY8]](s32), [[COPY9]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_cmpswap_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY15]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.3d), [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.3d.i32.i16(i32 %cmp, i32 %swap, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +define amdgpu_ps float @atomic_cmpswap_2darraymsaa(<8 x i32> inreg %rsrc, i32 %cmp, i32 %swap, i16 %s, i16 %t, i16 %slice, i16 %fragid) { + ; GFX9-LABEL: name: atomic_cmpswap_2darraymsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.2darraymsaa), [[COPY8]](s32), [[COPY9]](s32), [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: atomic_cmpswap_2darraymsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY10]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY11]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY12]](s32) + ; GFX10NSA: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY13]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.atomic.cmpswap.2darraymsaa), [[COPY8]](s32), [[COPY9]](s32), [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (volatile dereferenceable load store 4 on custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %v = call i32 @llvm.amdgcn.image.atomic.cmpswap.2darraymsaa.i32.i16(i32 %cmp, i32 %swap, i16 %s, i16 %t, i16 %slice, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + %out = bitcast i32 %v to float + ret float %out +} + +declare i32 @llvm.amdgcn.image.atomic.swap.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.sub.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.smin.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.umin.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.smax.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.umax.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.and.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.or.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.xor.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.inc.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.dec.1d.i32.i16(i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.1d.i32.i16(i32, i32, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.2d.i32.i16(i32, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.3d.i32.i16(i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.cube.i32.i16(i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.1darray.i32.i16(i32, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.2darray.i32.i16(i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.2dmsaa.i32.i16(i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.add.2darraymsaa.i32.i16(i32, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.2d.i32.i16(i32, i32, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.3d.i32.i16(i32, i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.cube.i32.i16(i32, i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.1darray.i32.i16(i32, i32, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.2darray.i32.i16(i32, i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.2dmsaa.i32.i16(i32, i32, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 +declare i32 @llvm.amdgcn.image.atomic.cmpswap.2darraymsaa.i32.i16(i32, i32, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #0 + +attributes #0 = { nounwind } Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.dim.a16.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-llvm.amdgcn.image.dim.a16.ll @@ -0,0 +1,3287 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s +; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+r128-a16 -stop-after=legalizer -o - %s | FileCheck -check-prefix=GFX10NSA %s + +define amdgpu_ps <4 x float> @load_1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %t = extractelement <2 x i16> %coords, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i16(i32 15, i16 %s, i16 %t, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.3d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %r = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_cube(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.cube), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_1darray(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %slice = extractelement <2 x i16> %coords, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i16(i32 15, i16 %s, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_2darray(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_2dmsaa(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_2dmsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2dmsaa), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2dmsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2dmsaa), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %fragid = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.2dmsaa.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_2darraymsaa(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_2darraymsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2darraymsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %fragid = extractelement <2 x i16> %coords_hi, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %slice, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_mip_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.1d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %mip = extractelement <2 x i16> %coords, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 15, i16 %s, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_2d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_mip_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %mip = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_3d(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_mip_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.3d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %r = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %r, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_cube(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_mip_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.cube), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_1darray(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_mip_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.1darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %slice = extractelement <2 x i16> %coords_lo, i32 1 + %mip = extractelement <2 x i16> %coords_hi, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 15, i16 %s, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_mip_2darray(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_mip_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_mip_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.mip.2darray), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + %v = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps void @store_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %t = extractelement <2 x i16> %coords, i32 1 + call void @llvm.amdgcn.image.store.2d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %r = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.3d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.cube.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %slice = extractelement <2 x i16> %coords, i32 1 + call void @llvm.amdgcn.image.store.1darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.2darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %slice, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_2dmsaa(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_2dmsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2dmsaa), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_2dmsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2dmsaa), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %fragid = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.2dmsaa.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_2darraymsaa(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_2darraymsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2darraymsaa), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_2darraymsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.2darraymsaa), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %fragid = extractelement <2 x i16> %coords_hi, i32 1 + call void @llvm.amdgcn.image.store.2darraymsaa.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %slice, i16 %fragid, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_1d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_mip_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY13]](s32), [[COPY14]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %mip = extractelement <2 x i16> %coords, i32 1 + call void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_2d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_mip_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.2d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %mip = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_3d(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_mip_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.3d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %r = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + call void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %r, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_cube(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_mip_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.cube), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + call void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_1darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_mip_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[DEF]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.1darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %slice = extractelement <2 x i16> %coords_lo, i32 1 + %mip = extractelement <2 x i16> %coords_hi, i32 0 + call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_mip_2darray(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: store_mip_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX9: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_mip_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY13]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY14:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY15:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY14]](s32), [[COPY15]](s32) + ; GFX10NSA: [[COPY16:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY17:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY16]](s32), [[COPY17]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.mip.2darray), [[BUILD_VECTOR1]](<4 x s32>), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %mip = extractelement <2 x i16> %coords_hi, i32 1 + call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, i16 %t, i16 %slice, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps <4 x float> @getresinfo_1d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_1d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_1d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.1d.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_2d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_2d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_2d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.2d.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_3d(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_3d + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.3d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_3d + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.3d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.3d.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_cube(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_cube + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.cube), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_cube + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.cube), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.cube.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_1darray(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_1darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1darray), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_1darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1darray), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.1darray.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_2darray(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_2darray + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2darray), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_2darray + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2darray), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.2darray.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_2dmsaa(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_2dmsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2dmsaa), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_2dmsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2dmsaa), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.2dmsaa.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @getresinfo_2darraymsaa(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_2darraymsaa + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2darraymsaa), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_2darraymsaa + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.2darraymsaa), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.getresinfo.2darraymsaa.v4f32.i16(i32 15, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %v +} + +define amdgpu_ps float @load_1d_V1(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_V1 + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 8, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") + ; GFX9: $vgpr0 = COPY [[INT]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0 + ; GFX10NSA-LABEL: name: load_1d_V1 + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 8, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 4 from custom "TargetCustom8") + ; GFX10NSA: $vgpr0 = COPY [[INT]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call float @llvm.amdgcn.image.load.1d.f32.i16(i32 8, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret float %v +} + +define amdgpu_ps <2 x float> @load_1d_V2(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_V2 + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 9, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 8 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 + ; GFX10NSA-LABEL: name: load_1d_V2 + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 9, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable load 8 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<2 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call <2 x float> @llvm.amdgcn.image.load.1d.v2f32.i16(i32 9, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret <2 x float> %v +} + +define amdgpu_ps void @store_1d_V1(<8 x i32> inreg %rsrc, float %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d_V1 + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[COPY8]](s32), 2, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 4 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d_V1 + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[COPY8]](s32), 2, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 4 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.f32.i16(float %vdata, i32 2, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps void @store_1d_V2(<8 x i32> inreg %rsrc, <2 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d_V2 + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY10]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<2 x s32>), 12, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 8 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d_V2 + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY10]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<2 x s32>), 12, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 :: (dereferenceable store 8 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.v2f32.i16(<2 x float> %vdata, i32 12, i16 %s, <8 x i32> %rsrc, i32 0, i32 0) + ret void +} + +define amdgpu_ps <4 x float> @load_1d_glc(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_glc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 1 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1d_glc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 1 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 1) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_1d_slc(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_slc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1d_slc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 2) + ret <4 x float> %v +} + +define amdgpu_ps <4 x float> @load_1d_glc_slc(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_glc_slc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1d_glc_slc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 3 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 3) + ret <4 x float> %v +} + +define amdgpu_ps void @store_1d_glc(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d_glc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 1 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d_glc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 1 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 1) + ret void +} + +define amdgpu_ps void @store_1d_slc(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d_slc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d_slc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 2 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 2) + ret void +} + +define amdgpu_ps void @store_1d_glc_slc(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: store_1d_glc_slc + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX9: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX9: S_ENDPGM 0 + ; GFX10NSA-LABEL: name: store_1d_glc_slc + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY12]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.store.1d), [[BUILD_VECTOR1]](<4 x s32>), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 3 :: (dereferenceable store 16 into custom "TargetCustom8") + ; GFX10NSA: S_ENDPGM 0 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + call void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float> %vdata, i32 15, i16 %s, <8 x i32> %rsrc, i32 0, i32 3) + ret void +} + +define amdgpu_ps <4 x float> @getresinfo_dmask0(<8 x i32> inreg %rsrc, <4 x float> %vdata, <2 x i16> %coords) { + ; GFX9-LABEL: name: getresinfo_dmask0 + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1d), 0, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: getresinfo_dmask0 + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.amdgcn.image.getresinfo.1d), 0, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0 + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<4 x s32>) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %mip = extractelement <2 x i16> %coords, i32 0 + %r = call <4 x float> @llvm.amdgcn.image.getresinfo.1d.v4f32.i16(i32 0, i16 %mip, <8 x i32> %rsrc, i32 0, i32 0) + ret <4 x float> %r +} + +define amdgpu_ps <4 x float> @load_1d_tfe(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_1d_tfe + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX9: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_1d_tfe + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.1d), 15, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX10NSA: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.1d.sl_v4f32i32s.i16(i32 15, i16 %s, <8 x i32> %rsrc, i32 1, i32 0) + %data = extractvalue { <4 x float>, i32 } %v, 0 + %tfe = extractvalue { <4 x float>, i32 } %v, 1 + store i32 %tfe, i32 addrspace(1)* undef + ret <4 x float> %data +} + +define amdgpu_ps <4 x float> @load_2d_tfe(<8 x i32> inreg %rsrc, <2 x i16> %coords) { + ; GFX9-LABEL: name: load_2d_tfe + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX9: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX9: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2d_tfe + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY9]](s32), [[COPY10]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX10NSA: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords, i32 0 + %t = extractelement <2 x i16> %coords, i32 1 + %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.2d.sl_v4f32i32s.i16(i32 15, i16 %s, i16 %t, <8 x i32> %rsrc, i32 1, i32 0) + %data = extractvalue { <4 x float>, i32 } %v, 0 + %tfe = extractvalue { <4 x float>, i32 } %v, 1 + store i32 %tfe, i32 addrspace(1)* undef + ret <4 x float> %data +} + +define amdgpu_ps <4 x float> @load_3d_tfe(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_3d_tfe + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF1]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.3d), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX9: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_3d_tfe + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[DEF1]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.3d), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX10NSA: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %r = extractelement <2 x i16> %coords_hi, i32 0 + %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.3d.sl_v4f32i32s.i16(i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 1, i32 0) + %data = extractvalue { <4 x float>, i32 } %v, 0 + %tfe = extractvalue { <4 x float>, i32 } %v, 1 + store i32 %tfe, i32 addrspace(1)* undef + ret <4 x float> %data +} + +define amdgpu_ps <4 x float> @load_2darraymsaa_tfe(<8 x i32> inreg %rsrc, <2 x i16> %coords_lo, <2 x i16> %coords_hi) { + ; GFX9-LABEL: name: load_2darraymsaa_tfe + ; GFX9: bb.1.main_body: + ; GFX9: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX9: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX9: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX9: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX9: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[CONCAT_VECTORS]](<4 x s16>), $noreg, $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX9: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX9: $vgpr0 = COPY [[UV]](s32) + ; GFX9: $vgpr1 = COPY [[UV1]](s32) + ; GFX9: $vgpr2 = COPY [[UV2]](s32) + ; GFX9: $vgpr3 = COPY [[UV3]](s32) + ; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 + ; GFX10NSA-LABEL: name: load_2darraymsaa_tfe + ; GFX10NSA: bb.1.main_body: + ; GFX10NSA: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $vgpr0, $vgpr1 + ; GFX10NSA: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr2 + ; GFX10NSA: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr3 + ; GFX10NSA: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr4 + ; GFX10NSA: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr5 + ; GFX10NSA: [[COPY4:%[0-9]+]]:_(s32) = COPY $sgpr6 + ; GFX10NSA: [[COPY5:%[0-9]+]]:_(s32) = COPY $sgpr7 + ; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8 + ; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9 + ; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 + ; GFX10NSA: [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 + ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32) + ; GFX10NSA: [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF + ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>) + ; GFX10NSA: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX10NSA: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX10NSA: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY9]](<2 x s16>) + ; GFX10NSA: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX10NSA: [[COPY10:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32) + ; GFX10NSA: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY10]](s32), [[COPY11]](s32) + ; GFX10NSA: [[COPY12:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32) + ; GFX10NSA: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) + ; GFX10NSA: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY12]](s32), [[COPY13]](s32) + ; GFX10NSA: [[INT:%[0-9]+]]:_(<5 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.image.load.2darraymsaa), 15, [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), $noreg, $noreg, [[BUILD_VECTOR]](<8 x s32>), 1, 0 :: (dereferenceable load 16 from custom "TargetCustom8") + ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[INT]](<5 x s32>) + ; GFX10NSA: G_STORE [[UV4]](s32), [[DEF]](p1) :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) + ; GFX10NSA: $vgpr0 = COPY [[UV]](s32) + ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32) + ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32) + ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32) + ; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3 +main_body: + %s = extractelement <2 x i16> %coords_lo, i32 0 + %t = extractelement <2 x i16> %coords_lo, i32 1 + %slice = extractelement <2 x i16> %coords_hi, i32 0 + %fragid = extractelement <2 x i16> %coords_hi, i32 1 + %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i16(i32 15, i16 %s, i16 %t, i16 %slice, i16 %fragid, <8 x i32> %rsrc, i32 1, i32 0) + %data = extractvalue { <4 x float>, i32 } %v, 0 + %tfe = extractvalue { <4 x float>, i32 } %v, 1 + store i32 %tfe, i32 addrspace(1)* undef + ret <4 x float> %data +} + +declare <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.2dmsaa.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.2darraymsaa.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare void @llvm.amdgcn.image.store.1d.v4f32.i16(<4 x float>, i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.2d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.3d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.cube.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.1darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.2darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.2dmsaa.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.2darraymsaa.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.1d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.2d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.3d.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.cube.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.1darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.mip.2darray.v4f32.i16(<4 x float>, i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare <4 x float> @llvm.amdgcn.image.getresinfo.1d.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.2d.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.3d.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.cube.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.1darray.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.2darray.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.2dmsaa.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare <4 x float> @llvm.amdgcn.image.getresinfo.2darraymsaa.v4f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #3 +declare float @llvm.amdgcn.image.load.1d.f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare float @llvm.amdgcn.image.load.2d.f32.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare <2 x float> @llvm.amdgcn.image.load.1d.v2f32.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare void @llvm.amdgcn.image.store.1d.f32.i16(float, i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare void @llvm.amdgcn.image.store.1d.v2f32.i16(<2 x float>, i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #2 +declare { <4 x float>, i32 } @llvm.amdgcn.image.load.1d.sl_v4f32i32s.i16(i32 immarg, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare { <4 x float>, i32 } @llvm.amdgcn.image.load.2d.sl_v4f32i32s.i16(i32 immarg, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare { <4 x float>, i32 } @llvm.amdgcn.image.load.3d.sl_v4f32i32s.i16(i32 immarg, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 +declare { <4 x float>, i32 } @llvm.amdgcn.image.load.2darraymsaa.sl_v4f32i32s.i16(i32 immarg, i16, i16, i16, i16, <8 x i32>, i32 immarg, i32 immarg) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readonly } +attributes #2 = { nounwind writeonly } +attributes #3 = { nounwind readnone }