Index: llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -517,6 +517,11 @@ /// or false constant based off of KnownBits information. bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo); + bool matchBitfieldExtractFromSExtInReg( + MachineInstr &MI, std::function &MatchInfo); + bool matchBitfieldExtractFromAnd( + MachineInstr &MI, std::function &MatchInfo); + /// Try to transform \p MI by using all of the above /// combine functions. Returns true if changed. bool tryCombine(MachineInstr &MI); Index: llvm/include/llvm/Target/GlobalISel/Combine.td =================================================================== --- llvm/include/llvm/Target/GlobalISel/Combine.td +++ llvm/include/llvm/Target/GlobalISel/Combine.td @@ -622,6 +622,21 @@ def funnel_shift_combines : GICombineGroup<[funnel_shift_to_rotate]>; +def bitfield_extract_from_sext_inreg : GICombineRule< + (defs root:$root, build_fn_matchinfo:$info), + (match (wip_match_opcode G_SEXT_INREG):$root, + [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]), + (apply [{ return Helper.applyBuildFn(*${root}, ${info}); }])>; + +def bitfield_extract_from_and : GICombineRule< + (defs root:$root, build_fn_matchinfo:$info), + (match (wip_match_opcode G_AND):$root, + [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]), + (apply [{ return Helper.applyBuildFn(*${root}, ${info}); }])>; + +def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg, + bitfield_extract_from_and]>; + // FIXME: These should use the custom predicate feature once it lands. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, undef_to_negative_one, @@ -664,7 +679,7 @@ unmerge_zext_to_zext, trunc_ext_fold, trunc_shl, const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine, - div_rem_to_divrem, funnel_shift_combines]>; + div_rem_to_divrem, funnel_shift_combines, form_bitfield_extract]>; // A combine group used to for prelegalizer combiners at -O0. The combines in // this group have been selected based on experiments to balance code size and Index: llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -3986,6 +3986,68 @@ return true; } +/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift. +bool CombinerHelper::matchBitfieldExtractFromSExtInReg( + MachineInstr &MI, std::function &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT Ty = MRI.getType(Src); + LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); + if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}})) + return false; + int64_t Width = MI.getOperand(2).getImm(); + Register ShiftSrc; + int64_t ShiftImm; + if (!mi_match( + Src, MRI, + m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)), + m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)))))) + return false; + if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits()) + return false; + + MatchInfo = [=](MachineIRBuilder &B) { + auto Cst1 = B.buildConstant(ExtractTy, ShiftImm); + auto Cst2 = B.buildConstant(ExtractTy, Width); + B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2); + }; + return true; +} + +/// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants. +bool CombinerHelper::matchBitfieldExtractFromAnd( + MachineInstr &MI, std::function &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_AND); + Register Dst = MI.getOperand(0).getReg(); + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); + LLT Ty = MRI.getType(Dst); + LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); + if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_UBFX, {Ty, ExtractTy}})) + return false; + int64_t Mask; + if (!mi_match(RHS, MRI, m_ICst(Mask))) + return false; + if (!isMask_64(Mask)) + return false; + int64_t Width = countPopulation(static_cast(Mask)); + Register ShiftSrc; + int64_t ShiftImm; + if (!mi_match(LHS, MRI, + m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm))))) + return false; + if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits()) + return false; + + MatchInfo = [=](MachineIRBuilder &B) { + auto Cst1 = B.buildConstant(ExtractTy, ShiftImm); + auto Cst2 = B.buildConstant(ExtractTy, Width); + B.buildUbfx(Dst, ShiftSrc, Cst1, Cst2); + }; + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; Index: llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp +++ llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp @@ -495,6 +495,30 @@ Known.reverseBits(); break; } + case TargetOpcode::G_SBFX: + case TargetOpcode::G_UBFX: { + Register SrcReg = MI.getOperand(1).getReg(); + Register OffsetReg = MI.getOperand(2).getReg(); + Register WidthReg = MI.getOperand(3).getReg(); + KnownBits SrcOpKnown, OffsetKnown, WidthKnown; + computeKnownBitsImpl(SrcReg, SrcOpKnown, DemandedElts, Depth + 1); + computeKnownBitsImpl(OffsetReg, OffsetKnown, DemandedElts, Depth + 1); + computeKnownBitsImpl(WidthReg, WidthKnown, DemandedElts, Depth + 1); + // Bitfield extract is computed as (Src >> Offset) & ((1 << Width) - 1). + KnownBits One = KnownBits::makeConstant(APInt(BitWidth, 1)); + Known = KnownBits::lshr(SrcOpKnown, OffsetKnown) & + KnownBits::computeForAddSub(/*Add*/ false, /*NSW*/ false, + KnownBits::shl(One, WidthKnown), One); + if (Opcode == TargetOpcode::G_SBFX) { + // Sign extend the extracted value using shift left and arithmetic shift + // right. + KnownBits ExtKnown = KnownBits::makeConstant(APInt(BitWidth, BitWidth)); + KnownBits ShiftKnown = KnownBits::computeForAddSub( + /*Add*/ false, /*NSW*/ false, ExtKnown, WidthKnown); + Known = KnownBits::ashr(KnownBits::shl(Known, ShiftKnown), ShiftKnown); + } + break; + } } assert(!Known.hasConflict() && "Bits known to be one AND zero?"); Index: llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -2019,6 +2019,21 @@ Observer.changedInstr(MI); return Legalized; + case TargetOpcode::G_SBFX: + case TargetOpcode::G_UBFX: + Observer.changingInstr(MI); + + if (TypeIdx == 0) { + widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); + widenScalarDst(MI, WideTy); + } else { + widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); + widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ZEXT); + } + + Observer.changedInstr(MI); + return Legalized; + case TargetOpcode::G_SHL: Observer.changingInstr(MI); Index: llvm/lib/Target/AArch64/AArch64Combine.td =================================================================== --- llvm/lib/Target/AArch64/AArch64Combine.td +++ llvm/lib/Target/AArch64/AArch64Combine.td @@ -168,13 +168,6 @@ def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>; -def bitfield_extract_from_sext_inreg : GICombineRule< - (defs root:$root, build_fn_matchinfo:$info), - (match (wip_match_opcode G_SEXT_INREG):$root, - [{ return matchBitfieldExtractFromSExtInReg(*${root}, MRI, ${info}); }]), - (apply [{ return Helper.applyBuildFn(*${root}, ${info}); }])>; - -def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg]>; def lower_vector_fcmp : GICombineRule< (defs root:$root), Index: llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp +++ llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp @@ -240,34 +240,6 @@ return true; } -/// Form a G_SBFX from a G_SEXT_INREG fed by a right shift. -static bool matchBitfieldExtractFromSExtInReg( - MachineInstr &MI, MachineRegisterInfo &MRI, - std::function &MatchInfo) { - assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); - Register Dst = MI.getOperand(0).getReg(); - Register Src = MI.getOperand(1).getReg(); - int64_t Width = MI.getOperand(2).getImm(); - LLT Ty = MRI.getType(Src); - assert((Ty == LLT::scalar(32) || Ty == LLT::scalar(64)) && - "Unexpected type for G_SEXT_INREG?"); - Register ShiftSrc; - int64_t ShiftImm; - if (!mi_match( - Src, MRI, - m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)), - m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)))))) - return false; - if (ShiftImm < 0 || ShiftImm + Width > Ty.getSizeInBits()) - return false; - MatchInfo = [=](MachineIRBuilder &B) { - auto Cst1 = B.buildConstant(Ty, ShiftImm); - auto Cst2 = B.buildConstant(Ty, Width); - B.buildInstr(TargetOpcode::G_SBFX, {Dst}, {ShiftSrc, Cst1, Cst2}); - }; - return true; -} - #define AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS #include "AArch64GenPostLegalizeGICombiner.inc" #undef AARCH64POSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h @@ -109,6 +109,7 @@ bool selectG_PTR_ADD(MachineInstr &I) const; bool selectG_IMPLICIT_DEF(MachineInstr &I) const; bool selectG_INSERT(MachineInstr &I) const; + bool selectG_SBFX_UBFX(MachineInstr &I) const; bool selectInterpP1F16(MachineInstr &MI) const; bool selectWritelane(MachineInstr &MI) const; Index: llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -755,6 +755,30 @@ return true; } +bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); + Register OffsetReg = MI.getOperand(2).getReg(); + Register WidthReg = MI.getOperand(3).getReg(); + + assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && + "scalar BFX instructions are expanded in regbankselect"); + assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && + "64-bit vector BFX instructions are expanded in regbankselect"); + + const DebugLoc &DL = MI.getDebugLoc(); + MachineBasicBlock *MBB = MI.getParent(); + + bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; + unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; + auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) + .addReg(SrcReg) + .addReg(OffsetReg) + .addReg(WidthReg); + MI.eraseFromParent(); + return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); +} + bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { if (STI.getLDSBankCount() != 16) return selectImpl(MI, *CoverageInfo); @@ -3172,6 +3196,9 @@ return selectBVHIntrinsic(I); case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: return selectAMDGPU_BUFFER_ATOMIC_FADD(I); + case AMDGPU::G_SBFX: + case AMDGPU::G_UBFX: + return selectG_SBFX_UBFX(I); default: return selectImpl(I, *CoverageInfo); } Index: llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -1631,6 +1631,13 @@ .minScalar(0, S32) .lower(); + getActionDefinitionsBuilder({G_SBFX, G_UBFX}) + .legalFor({{S32, S32}, {S64, S32}}) + .clampScalar(1, S32, S32) + .clampScalar(0, S32, S64) + .widenScalarToNextPow2(0) + .scalarize(0); + getActionDefinitionsBuilder({ // TODO: Verify V_BFI_B32 is generated from expanded bit ops G_FCOPYSIGN, Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h @@ -81,8 +81,7 @@ MachineRegisterInfo &MRI, int RSrcIdx) const; bool applyMappingSBufferLoad(const OperandsMapper &OpdMapper) const; - bool applyMappingBFEIntrinsic(const OperandsMapper &OpdMapper, - bool Signed) const; + bool applyMappingBFE(const OperandsMapper &OpdMapper, bool Signed) const; Register handleD16VData(MachineIRBuilder &B, MachineRegisterInfo &MRI, Register Reg) const; Index: llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1531,8 +1531,8 @@ return true; } -bool AMDGPURegisterBankInfo::applyMappingBFEIntrinsic( - const OperandsMapper &OpdMapper, bool Signed) const { +bool AMDGPURegisterBankInfo::applyMappingBFE(const OperandsMapper &OpdMapper, + bool Signed) const { MachineInstr &MI = OpdMapper.getMI(); MachineRegisterInfo &MRI = OpdMapper.getMRI(); @@ -1544,19 +1544,76 @@ const LLT S32 = LLT::scalar(32); + unsigned FirstOpnd = MI.getOpcode() == AMDGPU::G_INTRINSIC ? 2 : 1; + Register SrcReg = MI.getOperand(FirstOpnd).getReg(); + Register OffsetReg = MI.getOperand(FirstOpnd + 1).getReg(); + Register WidthReg = MI.getOperand(FirstOpnd + 2).getReg(); + const RegisterBank *DstBank = OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank; if (DstBank == &AMDGPU::VGPRRegBank) { if (Ty == S32) return true; - // TODO: 64-bit version is scalar only, so we need to expand this. - return false; - } + // Expand to Src >> Offset & ((1 << Width) - 1) using 64-bit operations. + // If Width is a constant value, use the bitfield extract instructions for + // more efficient code. + ApplyRegBankMapping ApplyBank(*this, MRI, &AMDGPU::VGPRRegBank); + MachineIRBuilder B(MI, ApplyBank); + + const LLT S1 = LLT::scalar(1); + const LLT S64 = LLT::scalar(64); + // Shift the source operand so that extracted bits start at bit 0. + auto ShiftOffset = Signed ? B.buildAShr(S64, SrcReg, OffsetReg) + : B.buildLShr(S64, SrcReg, OffsetReg); + auto UnmergeSOffset = B.buildUnmerge({S32, S32}, ShiftOffset); + + // A 64-bit bitfield extract can use the 32-bit instructions if the width + // is a constant. + if (auto ConstWidth = getConstantVRegValWithLookThrough(WidthReg, MRI)) { + // Use the 32-bit bitfield extract instruction if the width is a constant. + // Depending on the width size, use either the low or high 32-bits. + auto Zero = B.buildConstant(S32, 0); + auto WidthImm = ConstWidth->Value.getZExtValue(); + if (WidthImm < 32) { + // Use bitfield extract on the lower 32-bit source, and then sign-extend + // or clear the upper 32-bits. + auto Extract = + Signed ? B.buildSbfx(S32, UnmergeSOffset.getReg(0), Zero, WidthReg) + : B.buildUbfx(S32, UnmergeSOffset.getReg(0), Zero, WidthReg); + auto Extend = + Signed ? B.buildAShr(S32, Extract, B.buildConstant(S32, 31)) : Zero; + B.buildMerge(DstReg, {Extract, Extend}); + } else { + // Use bitfield extract on uppoer 32-bit source, and combine with lower + // 32-bit source. + auto UpperWidth = B.buildConstant(S32, WidthImm - 32); + auto Extract = + Signed + ? B.buildSbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth) + : B.buildUbfx(S32, UnmergeSOffset.getReg(1), Zero, UpperWidth); + B.buildMerge(DstReg, {UnmergeSOffset.getReg(0), Extract}); + } + MI.eraseFromParent(); + return true; + } - Register SrcReg = MI.getOperand(2).getReg(); - Register OffsetReg = MI.getOperand(3).getReg(); - Register WidthReg = MI.getOperand(4).getReg(); + // Expand to Src >> Offset & ((1 << Width) - 1) using 64-bit operations. + // Create a mask of ones to extract the necessary bits. + auto ShiftOne = B.buildShl(S64, B.buildConstant(S64, 1), WidthReg); + auto UnmergeSOne = B.buildUnmerge({S32, S32}, ShiftOne); + Register SOneLo = UnmergeSOne.getReg(0); + Register SOneHi = UnmergeSOne.getReg(1); + auto One = B.buildConstant(S32, 1); + // Use 32-bit instructions for the subtraction. + auto SubLo = B.buildUSubo(S32, S1, SOneLo, One); + auto SubHi = B.buildUSube(S32, S1, SOneHi, One, SubLo.getReg(1)); + auto AndLo = B.buildAnd(S32, UnmergeSOffset.getReg(0), SubLo); + auto AndHi = B.buildAnd(S32, UnmergeSOffset.getReg(1), SubHi); + B.buildMerge(DstReg, {AndLo, AndHi}); + MI.eraseFromParent(); + return true; + } // The scalar form packs the offset and width in a single operand. @@ -2924,10 +2981,10 @@ return; } case Intrinsic::amdgcn_sbfe: - applyMappingBFEIntrinsic(OpdMapper, true); + applyMappingBFE(OpdMapper, true); return; case Intrinsic::amdgcn_ubfe: - applyMappingBFEIntrinsic(OpdMapper, false); + applyMappingBFE(OpdMapper, false); return; case Intrinsic::amdgcn_ballot: // Use default handling and insert copy to vcc source. @@ -3019,6 +3076,12 @@ case AMDGPU::G_DYN_STACKALLOC: applyMappingDynStackAlloc(MI, OpdMapper, MRI); return; + case AMDGPU::G_SBFX: + applyMappingBFE(OpdMapper, /*Signed*/ true); + return; + case AMDGPU::G_UBFX: + applyMappingBFE(OpdMapper, /*Signed*/ false); + return; default: break; } @@ -3492,6 +3555,8 @@ case AMDGPU::G_UMIN: case AMDGPU::G_UMAX: case AMDGPU::G_SHUFFLE_VECTOR: + case AMDGPU::G_SBFX: + case AMDGPU::G_UBFX: if (isSALUMapping(MI)) return getDefaultMappingSOP(MI); LLVM_FALLTHROUGH; Index: llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll @@ -808,17 +808,16 @@ ; ; GFX8-LABEL: s_ashr_v2i16: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s1, 16 -; GFX8-NEXT: s_sext_i32_i16 s0, s0 -; GFX8-NEXT: s_sext_i32_i16 s1, s1 -; GFX8-NEXT: s_sext_i32_i16 s2, s2 -; GFX8-NEXT: s_sext_i32_i16 s3, s3 +; GFX8-NEXT: s_mov_b32 s3, 0x100010 +; GFX8-NEXT: s_sext_i32_i16 s2, s0 +; GFX8-NEXT: s_sext_i32_i16 s4, s1 +; GFX8-NEXT: s_bfe_i32 s0, s0, s3 +; GFX8-NEXT: s_bfe_i32 s1, s1, s3 ; GFX8-NEXT: s_ashr_i32 s0, s0, s1 -; GFX8-NEXT: s_ashr_i32 s1, s2, s3 -; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_and_b32 s0, s0, 0xffff -; GFX8-NEXT: s_or_b32 s0, s1, s0 +; GFX8-NEXT: s_ashr_i32 s2, s2, s4 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_and_b32 s1, s2, 0xffff +; GFX8-NEXT: s_or_b32 s0, s0, s1 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_ashr_v2i16: @@ -1021,29 +1020,26 @@ ; ; GFX8-LABEL: s_ashr_v4i16: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_lshr_b32 s4, s0, 16 -; GFX8-NEXT: s_lshr_b32 s6, s2, 16 -; GFX8-NEXT: s_lshr_b32 s5, s1, 16 -; GFX8-NEXT: s_lshr_b32 s7, s3, 16 -; GFX8-NEXT: s_sext_i32_i16 s0, s0 -; GFX8-NEXT: s_sext_i32_i16 s2, s2 -; GFX8-NEXT: s_sext_i32_i16 s4, s4 -; GFX8-NEXT: s_sext_i32_i16 s6, s6 +; GFX8-NEXT: s_mov_b32 s5, 0x100010 +; GFX8-NEXT: s_sext_i32_i16 s4, s0 +; GFX8-NEXT: s_sext_i32_i16 s7, s2 +; GFX8-NEXT: s_sext_i32_i16 s6, s1 +; GFX8-NEXT: s_sext_i32_i16 s8, s3 +; GFX8-NEXT: s_bfe_i32 s0, s0, s5 +; GFX8-NEXT: s_bfe_i32 s2, s2, s5 +; GFX8-NEXT: s_bfe_i32 s1, s1, s5 +; GFX8-NEXT: s_bfe_i32 s3, s3, s5 ; GFX8-NEXT: s_ashr_i32 s0, s0, s2 -; GFX8-NEXT: s_ashr_i32 s2, s4, s6 -; GFX8-NEXT: s_mov_b32 s4, 0xffff -; GFX8-NEXT: s_sext_i32_i16 s1, s1 -; GFX8-NEXT: s_sext_i32_i16 s3, s3 -; GFX8-NEXT: s_sext_i32_i16 s5, s5 -; GFX8-NEXT: s_sext_i32_i16 s7, s7 ; GFX8-NEXT: s_ashr_i32 s1, s1, s3 -; GFX8-NEXT: s_ashr_i32 s3, s5, s7 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_and_b32 s0, s0, s4 -; GFX8-NEXT: s_or_b32 s0, s2, s0 -; GFX8-NEXT: s_lshl_b32 s2, s3, 16 -; GFX8-NEXT: s_and_b32 s1, s1, s4 -; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_ashr_i32 s4, s4, s7 +; GFX8-NEXT: s_mov_b32 s3, 0xffff +; GFX8-NEXT: s_ashr_i32 s2, s6, s8 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_and_b32 s4, s4, s3 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_and_b32 s2, s2, s3 +; GFX8-NEXT: s_or_b32 s0, s0, s4 +; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_ashr_v4i16: @@ -1245,51 +1241,44 @@ ; ; GFX8-LABEL: s_ashr_v8i16: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_lshr_b32 s8, s0, 16 -; GFX8-NEXT: s_lshr_b32 s12, s4, 16 -; GFX8-NEXT: s_lshr_b32 s9, s1, 16 -; GFX8-NEXT: s_lshr_b32 s13, s5, 16 -; GFX8-NEXT: s_sext_i32_i16 s0, s0 -; GFX8-NEXT: s_sext_i32_i16 s4, s4 -; GFX8-NEXT: s_sext_i32_i16 s8, s8 -; GFX8-NEXT: s_sext_i32_i16 s12, s12 -; GFX8-NEXT: s_lshr_b32 s10, s2, 16 -; GFX8-NEXT: s_lshr_b32 s14, s6, 16 +; GFX8-NEXT: s_mov_b32 s9, 0x100010 +; GFX8-NEXT: s_sext_i32_i16 s8, s0 +; GFX8-NEXT: s_sext_i32_i16 s13, s4 +; GFX8-NEXT: s_sext_i32_i16 s10, s1 +; GFX8-NEXT: s_sext_i32_i16 s12, s3 +; GFX8-NEXT: s_sext_i32_i16 s14, s5 +; GFX8-NEXT: s_sext_i32_i16 s16, s7 +; GFX8-NEXT: s_bfe_i32 s0, s0, s9 +; GFX8-NEXT: s_bfe_i32 s4, s4, s9 +; GFX8-NEXT: s_bfe_i32 s1, s1, s9 +; GFX8-NEXT: s_bfe_i32 s5, s5, s9 +; GFX8-NEXT: s_bfe_i32 s3, s3, s9 +; GFX8-NEXT: s_bfe_i32 s7, s7, s9 ; GFX8-NEXT: s_ashr_i32 s0, s0, s4 -; GFX8-NEXT: s_ashr_i32 s4, s8, s12 -; GFX8-NEXT: s_mov_b32 s8, 0xffff -; GFX8-NEXT: s_sext_i32_i16 s1, s1 -; GFX8-NEXT: s_sext_i32_i16 s5, s5 -; GFX8-NEXT: s_sext_i32_i16 s9, s9 -; GFX8-NEXT: s_sext_i32_i16 s13, s13 -; GFX8-NEXT: s_lshr_b32 s11, s3, 16 -; GFX8-NEXT: s_lshr_b32 s15, s7, 16 +; GFX8-NEXT: s_ashr_i32 s3, s3, s7 ; GFX8-NEXT: s_ashr_i32 s1, s1, s5 -; GFX8-NEXT: s_sext_i32_i16 s2, s2 -; GFX8-NEXT: s_sext_i32_i16 s6, s6 -; GFX8-NEXT: s_sext_i32_i16 s10, s10 -; GFX8-NEXT: s_sext_i32_i16 s14, s14 -; GFX8-NEXT: s_ashr_i32 s5, s9, s13 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_and_b32 s0, s0, s8 +; GFX8-NEXT: s_sext_i32_i16 s11, s2 +; GFX8-NEXT: s_sext_i32_i16 s15, s6 +; GFX8-NEXT: s_bfe_i32 s2, s2, s9 +; GFX8-NEXT: s_bfe_i32 s6, s6, s9 +; GFX8-NEXT: s_ashr_i32 s4, s10, s14 +; GFX8-NEXT: s_mov_b32 s7, 0xffff ; GFX8-NEXT: s_ashr_i32 s2, s2, s6 -; GFX8-NEXT: s_or_b32 s0, s4, s0 -; GFX8-NEXT: s_sext_i32_i16 s3, s3 -; GFX8-NEXT: s_sext_i32_i16 s7, s7 -; GFX8-NEXT: s_sext_i32_i16 s11, s11 -; GFX8-NEXT: s_sext_i32_i16 s15, s15 -; GFX8-NEXT: s_ashr_i32 s6, s10, s14 -; GFX8-NEXT: s_lshl_b32 s4, s5, 16 -; GFX8-NEXT: s_and_b32 s1, s1, s8 -; GFX8-NEXT: s_ashr_i32 s3, s3, s7 -; GFX8-NEXT: s_or_b32 s1, s4, s1 -; GFX8-NEXT: s_ashr_i32 s7, s11, s15 -; GFX8-NEXT: s_lshl_b32 s4, s6, 16 -; GFX8-NEXT: s_and_b32 s2, s2, s8 -; GFX8-NEXT: s_or_b32 s2, s4, s2 -; GFX8-NEXT: s_lshl_b32 s4, s7, 16 -; GFX8-NEXT: s_and_b32 s3, s3, s8 -; GFX8-NEXT: s_or_b32 s3, s4, s3 +; GFX8-NEXT: s_ashr_i32 s5, s11, s15 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_and_b32 s4, s4, s7 +; GFX8-NEXT: s_ashr_i32 s8, s8, s13 +; GFX8-NEXT: s_or_b32 s1, s1, s4 +; GFX8-NEXT: s_ashr_i32 s6, s12, s16 +; GFX8-NEXT: s_lshl_b32 s2, s2, 16 +; GFX8-NEXT: s_and_b32 s4, s5, s7 +; GFX8-NEXT: s_or_b32 s2, s2, s4 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_and_b32 s8, s8, s7 +; GFX8-NEXT: s_lshl_b32 s3, s3, 16 +; GFX8-NEXT: s_and_b32 s4, s6, s7 +; GFX8-NEXT: s_or_b32 s0, s0, s8 +; GFX8-NEXT: s_or_b32 s3, s3, s4 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_ashr_v8i16: Index: llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll @@ -42,20 +42,12 @@ } define float @v_uitofp_to_f32_lshr7_mask255(i32 %arg0) nounwind { -; SI-LABEL: v_uitofp_to_f32_lshr7_mask255: -; SI: ; %bb.0: -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 7, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; SI-NEXT: s_setpc_b64 s[30:31] -; -; VI-LABEL: v_uitofp_to_f32_lshr7_mask255: -; VI: ; %bb.0: -; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 7, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 -; VI-NEXT: s_setpc_b64 s[30:31] +; GCN-LABEL: v_uitofp_to_f32_lshr7_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_u32 v0, v0, 7, 8 +; GCN-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] %lshr.7 = lshr i32 %arg0, 7 %masked = and i32 %lshr.7, 255 %cvt = uitofp i32 %masked to float @@ -66,16 +58,14 @@ ; SI-LABEL: v_uitofp_to_f32_lshr8_mask255: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 8, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_uitofp_to_f32_lshr8_mask255: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; VI-NEXT: s_setpc_b64 s[30:31] %lshr.8 = lshr i32 %arg0, 8 %masked = and i32 %lshr.8, 255 @@ -116,17 +106,14 @@ ; SI-LABEL: v_uitofp_to_f32_lshr16_mask255: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 16, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_uitofp_to_f32_lshr16_mask255: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v1, 0xff -; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: s_setpc_b64 s[30:31] %lshr.16 = lshr i32 %arg0, 16 %masked = and i32 %lshr.16, 255 @@ -167,20 +154,19 @@ ; SI-LABEL: v_uitofp_v2i8_to_v2f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: s_movk_i32 s4, 0xff -; SI-NEXT: v_and_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, s4, v1 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v1 +; SI-NEXT: v_bfe_u32 v0, v0, 8, 8 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v0 +; SI-NEXT: v_mov_b32_e32 v0, v2 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_uitofp_v2i8_to_v2f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 +; VI-NEXT: v_mov_b32_e32 v0, v2 ; VI-NEXT: s_setpc_b64 s[30:31] %val = bitcast i16 %arg0 to <2 x i8> %cvt = uitofp <2 x i8> %val to <2 x float> @@ -191,27 +177,21 @@ ; SI-LABEL: v_uitofp_v3i8_to_v3f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: s_movk_i32 s4, 0xff -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; SI-NEXT: v_and_b32_e32 v0, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, s4, v1 -; SI-NEXT: v_and_b32_e32 v2, s4, v2 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v1 +; SI-NEXT: v_bfe_u32 v1, v0, 8, 8 +; SI-NEXT: v_bfe_u32 v0, v0, 16, 8 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 +; SI-NEXT: v_mov_b32_e32 v0, v3 ; SI-NEXT: s_setpc_b64 s[30:31] ; ; VI-LABEL: v_uitofp_v3i8_to_v3f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_movk_i32 s4, 0xff -; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 ; VI-NEXT: v_cvt_f32_ubyte0_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 -; VI-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: v_mov_b32_e32 v0, v3 ; VI-NEXT: s_setpc_b64 s[30:31] %trunc = trunc i32 %arg0 to i24 @@ -224,13 +204,10 @@ ; SI-LABEL: v_uitofp_v4i8_to_v4f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_movk_i32 s4, 0xff -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; SI-NEXT: v_and_b32_e32 v3, s4, v0 -; SI-NEXT: v_and_b32_e32 v1, s4, v1 -; SI-NEXT: v_and_b32_e32 v2, s4, v2 -; SI-NEXT: v_cvt_f32_ubyte0_e32 v4, v3 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v0 +; SI-NEXT: v_bfe_u32 v2, v0, 16, 8 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1 +; SI-NEXT: v_bfe_u32 v1, v0, 8, 8 ; SI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 @@ -240,14 +217,10 @@ ; VI-LABEL: v_uitofp_v4i8_to_v4f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_movk_i32 s4, 0xff -; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; VI-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_cvt_f32_ubyte0_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 -; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 ; VI-NEXT: v_mov_b32_e32 v0, v4 ; VI-NEXT: s_setpc_b64 s[30:31] %val = bitcast i32 %arg0 to <4 x i8> @@ -259,13 +232,10 @@ ; SI-LABEL: v_uitofp_unpack_i32_to_v4f32: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: s_movk_i32 s4, 0xff -; SI-NEXT: v_and_b32_e32 v1, s4, v0 -; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v0 +; SI-NEXT: v_bfe_u32 v2, v0, 16, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1 -; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; SI-NEXT: v_and_b32_e32 v1, s4, v1 -; SI-NEXT: v_and_b32_e32 v2, s4, v2 +; SI-NEXT: v_bfe_u32 v1, v0, 8, 8 ; SI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 @@ -275,14 +245,10 @@ ; VI-LABEL: v_uitofp_unpack_i32_to_v4f32: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: s_movk_i32 s4, 0xff -; VI-NEXT: v_mov_b32_e32 v2, s4 -; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; VI-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; VI-NEXT: v_cvt_f32_ubyte0_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 -; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 ; VI-NEXT: v_mov_b32_e32 v0, v4 ; VI-NEXT: s_setpc_b64 s[30:31] %mask.arg0 = and i32 %arg0, 255 @@ -351,8 +317,7 @@ ; SI-LABEL: v_uitofp_to_f16_lshr8_mask255: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 8, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_setpc_b64 s[30:31] @@ -360,8 +325,7 @@ ; VI-LABEL: v_uitofp_to_f16_lshr8_mask255: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; VI-NEXT: s_setpc_b64 s[30:31] %lshr.8 = lshr i32 %arg0, 8 @@ -374,8 +338,7 @@ ; SI-LABEL: v_uitofp_to_f16_lshr16_mask255: ; SI: ; %bb.0: ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 16, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; SI-NEXT: s_setpc_b64 s[30:31] @@ -383,9 +346,7 @@ ; VI-LABEL: v_uitofp_to_f16_lshr16_mask255: ; VI: ; %bb.0: ; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v1, 0xff -; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 ; VI-NEXT: s_setpc_b64 s[30:31] %lshr.16 = lshr i32 %arg0, 16 @@ -442,8 +403,7 @@ ; GCN-LABEL: v_uitofp_to_f64_lshr8_mask255: ; GCN: ; %bb.0: ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; GCN-NEXT: v_and_b32_e32 v0, 0xff, v0 +; GCN-NEXT: v_bfe_u32 v0, v0, 8, 8 ; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 ; GCN-NEXT: s_setpc_b64 s[30:31] %lshr.8 = lshr i32 %arg0, 8 @@ -453,21 +413,12 @@ } define double @v_uitofp_to_f64_lshr16_mask255(i32 %arg0) nounwind { -; SI-LABEL: v_uitofp_to_f64_lshr16_mask255: -; SI: ; %bb.0: -; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 -; SI-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 -; SI-NEXT: s_setpc_b64 s[30:31] -; -; VI-LABEL: v_uitofp_to_f64_lshr16_mask255: -; VI: ; %bb.0: -; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v1, 0xff -; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 -; VI-NEXT: s_setpc_b64 s[30:31] +; GCN-LABEL: v_uitofp_to_f64_lshr16_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] %lshr.16 = lshr i32 %arg0, 16 %masked = and i32 %lshr.16, 255 %cvt = uitofp i32 %masked to double @@ -952,8 +903,7 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b64 s[6:7], s[2:3] ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 8, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm @@ -970,8 +920,7 @@ ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: flat_load_dword v0, v[0:1] ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 -; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: flat_store_dword v[0:1], v2 @@ -1000,8 +949,7 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_mov_b64 s[6:7], s[2:3] ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_bfe_u32 v0, v0, 16, 8 ; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm @@ -1017,10 +965,8 @@ ; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v2 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc ; VI-NEXT: flat_load_dword v0, v[0:1] -; VI-NEXT: v_mov_b32_e32 v1, 0xff ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: flat_store_dword v[0:1], v2 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement-stack-lower.ll @@ -312,8 +312,7 @@ ; GCN-NEXT: v_mov_b32_e32 v5, s4 ; GCN-NEXT: v_add_co_u32_e32 v60, vcc, v0, v5 ; GCN-NEXT: v_addc_co_u32_e32 v61, vcc, v1, v6, vcc -; GCN-NEXT: v_lshrrev_b32_e32 v0, 1, v2 -; GCN-NEXT: v_and_b32_e32 v0, 63, v0 +; GCN-NEXT: v_bfe_u32 v0, v2, 1, 6 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; GCN-NEXT: v_and_b32_e32 v1, 1, v2 ; GCN-NEXT: v_lshlrev_b32_e32 v1, 4, v1 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i8.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i8.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.i8.ll @@ -8,19 +8,16 @@ ; GCN-LABEL: extractelement_sgpr_v4i8_sgpr_idx: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dword s0, s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s5, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s1, s0, 8 -; GCN-NEXT: s_and_b32 s1, s1, s5 -; GCN-NEXT: s_lshr_b32 s2, s0, 16 -; GCN-NEXT: s_lshr_b32 s3, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s5 -; GCN-NEXT: s_lshl_b32 s1, s1, 8 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_and_b32 s1, s2, s5 -; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_and_b32 s1, s4, 3 ; GCN-NEXT: s_lshl_b32 s1, s1, 3 @@ -30,22 +27,19 @@ ; GFX10-LABEL: extractelement_sgpr_v4i8_sgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 -; GFX10-NEXT: s_lshl_b32 s2, s2, 8 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s2, s5, 24 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX10-NEXT: s_lshr_b32 s1, s0, 24 +; GFX10-NEXT: s_and_b32 s2, s0, 0xff +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX10-NEXT: s_lshl_b32 s3, s3, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_lshl_b32 s1, s1, 24 +; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_and_b32 s2, s4, 3 ; GFX10-NEXT: s_or_b32 s0, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s4, 3 -; GFX10-NEXT: s_or_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s1, s1, 3 +; GFX10-NEXT: s_lshl_b32 s1, s2, 3 ; GFX10-NEXT: s_lshr_b32 s0, s0, s1 ; GFX10-NEXT: ; return to shader part epilog %vector = load <4 x i8>, <4 x i8> addrspace(4)* %ptr @@ -57,18 +51,18 @@ ; GFX9-LABEL: extractelement_vgpr_v4i8_sgpr_idx: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff -; GFX9-NEXT: s_and_b32 s2, s2, 3 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: s_and_b32 s0, s2, 3 +; GFX9-NEXT: s_lshl_b32 s0, s0, 3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v2 -; GFX9-NEXT: s_lshl_b32 s0, s2, 3 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, s0, v0 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: ; return to shader part epilog @@ -76,20 +70,18 @@ ; GFX8-LABEL: extractelement_vgpr_v4i8_sgpr_idx: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s0 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_and_b32 s0, s2, 3 ; GFX8-NEXT: s_lshl_b32 s0, s0, 3 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, s0, v0 ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 ; GFX8-NEXT: ; return to shader part epilog @@ -100,22 +92,19 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s0, 0xff -; GFX7-NEXT: s_and_b32 s1, s2, 3 +; GFX7-NEXT: s_and_b32 s0, s2, 3 +; GFX7-NEXT: s_lshl_b32 s0, s0, 3 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: s_lshl_b32 s0, s1, 3 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, s0, v0 ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 ; GFX7-NEXT: ; return to shader part epilog @@ -123,18 +112,17 @@ ; GFX10-LABEL: extractelement_vgpr_v4i8_sgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dword v0, v[0:1], off -; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 ; GFX10-NEXT: s_and_b32 s0, s2, 3 ; GFX10-NEXT: s_lshl_b32 s0, s0, 3 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: ; return to shader part epilog @@ -148,18 +136,18 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: v_and_b32_e32 v1, 3, v2 -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 8 +; GFX9-NEXT: v_mov_b32_e32 v4, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v4, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v4, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v5 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -167,19 +155,17 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v3, s4 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v3, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -192,21 +178,18 @@ ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 ; GFX7-NEXT: v_and_b32_e32 v1, 3, v2 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_bfe_u32 v4, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v3, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -215,18 +198,17 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dword v0, v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v3, 16 +; GFX10-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_and_b32_sdwa v4, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_and_b32_e32 v1, 3, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v4, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <4 x i8>, <4 x i8> addrspace(1)* %ptr @@ -238,21 +220,18 @@ ; GFX9-LABEL: extractelement_sgpr_v4i8_vgpr_idx: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s1, s0, 8 -; GFX9-NEXT: s_and_b32 s1, s1, s4 -; GFX9-NEXT: s_lshr_b32 s2, s0, 16 -; GFX9-NEXT: s_lshr_b32 s3, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s4 -; GFX9-NEXT: s_lshl_b32 s1, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s2, s4 -; GFX9-NEXT: s_lshl_b32 s1, s1, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s3, 24 +; GFX9-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX9-NEXT: s_lshr_b32 s1, s0, 24 +; GFX9-NEXT: s_and_b32 s2, s0, 0xff +; GFX9-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX9-NEXT: s_lshl_b32 s3, s3, 8 +; GFX9-NEXT: s_or_b32 s2, s2, s3 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s2, s0 +; GFX9-NEXT: s_lshl_b32 s1, s1, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s1 ; GFX9-NEXT: v_lshrrev_b32_e64 v0, v0, s0 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 @@ -261,21 +240,18 @@ ; GFX8-LABEL: extractelement_sgpr_v4i8_vgpr_idx: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s1, s0, 8 -; GFX8-NEXT: s_and_b32 s1, s1, s4 -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s4 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s2, s4 -; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s3, 24 +; GFX8-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX8-NEXT: s_lshr_b32 s1, s0, 24 +; GFX8-NEXT: s_and_b32 s2, s0, 0xff +; GFX8-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX8-NEXT: s_lshl_b32 s3, s3, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s3 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s2, s0 +; GFX8-NEXT: s_lshl_b32 s1, s1, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s1 ; GFX8-NEXT: v_lshrrev_b32_e64 v0, v0, s0 ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 @@ -284,21 +260,18 @@ ; GFX7-LABEL: extractelement_sgpr_v4i8_vgpr_idx: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s1, s0, 8 -; GFX7-NEXT: s_and_b32 s1, s1, s4 -; GFX7-NEXT: s_lshr_b32 s2, s0, 16 -; GFX7-NEXT: s_lshr_b32 s3, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s4 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s2, s4 -; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s3, 24 +; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX7-NEXT: s_lshr_b32 s1, s0, 24 +; GFX7-NEXT: s_and_b32 s2, s0, 0xff +; GFX7-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX7-NEXT: s_lshl_b32 s3, s3, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s3 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s2, s0 +; GFX7-NEXT: s_lshl_b32 s1, s1, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s1 ; GFX7-NEXT: v_lshr_b32_e32 v0, s0, v0 ; GFX7-NEXT: v_readfirstlane_b32 s0, v0 @@ -307,21 +280,18 @@ ; GFX10-LABEL: extractelement_sgpr_v4i8_vgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: v_lshrrev_b32_e64 v0, v0, s0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 @@ -334,39 +304,33 @@ define amdgpu_ps i8 @extractelement_sgpr_v4i8_idx0(<4 x i8> addrspace(4)* inreg %ptr) { ; GCN-LABEL: extractelement_sgpr_v4i8_idx0: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dword s1, s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s0, 0xff +; GCN-NEXT: s_load_dword s0, s[2:3], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s1, 8 -; GCN-NEXT: s_and_b32 s2, s2, s0 -; GCN-NEXT: s_lshr_b32 s3, s1, 16 -; GCN-NEXT: s_lshr_b32 s4, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s0 -; GCN-NEXT: s_and_b32 s0, s3, s0 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s0, s0, 16 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_lshl_b32 s1, s4, 24 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: extractelement_sgpr_v4i8_idx0: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: ; return to shader part epilog %vector = load <4 x i8>, <4 x i8> addrspace(4)* %ptr @@ -377,20 +341,17 @@ define amdgpu_ps i8 @extractelement_sgpr_v4i8_idx1(<4 x i8> addrspace(4)* inreg %ptr) { ; GCN-LABEL: extractelement_sgpr_v4i8_idx1: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dword s1, s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s0, 0xff +; GCN-NEXT: s_load_dword s0, s[2:3], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s1, 8 -; GCN-NEXT: s_and_b32 s2, s2, s0 -; GCN-NEXT: s_lshr_b32 s3, s1, 16 -; GCN-NEXT: s_lshr_b32 s4, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s0 -; GCN-NEXT: s_and_b32 s0, s3, s0 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s0, s0, 16 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_lshl_b32 s1, s4, 24 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 8 ; GCN-NEXT: ; return to shader part epilog @@ -398,19 +359,16 @@ ; GFX10-LABEL: extractelement_sgpr_v4i8_idx1: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 8 ; GFX10-NEXT: ; return to shader part epilog @@ -422,20 +380,17 @@ define amdgpu_ps i8 @extractelement_sgpr_v4i8_idx2(<4 x i8> addrspace(4)* inreg %ptr) { ; GCN-LABEL: extractelement_sgpr_v4i8_idx2: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dword s1, s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s0, 0xff +; GCN-NEXT: s_load_dword s0, s[2:3], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s1, 8 -; GCN-NEXT: s_and_b32 s2, s2, s0 -; GCN-NEXT: s_lshr_b32 s3, s1, 16 -; GCN-NEXT: s_lshr_b32 s4, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s0 -; GCN-NEXT: s_and_b32 s0, s3, s0 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s0, s0, 16 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_lshl_b32 s1, s4, 24 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 16 ; GCN-NEXT: ; return to shader part epilog @@ -443,19 +398,16 @@ ; GFX10-LABEL: extractelement_sgpr_v4i8_idx2: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 16 ; GFX10-NEXT: ; return to shader part epilog @@ -467,20 +419,17 @@ define amdgpu_ps i8 @extractelement_sgpr_v4i8_idx3(<4 x i8> addrspace(4)* inreg %ptr) { ; GCN-LABEL: extractelement_sgpr_v4i8_idx3: ; GCN: ; %bb.0: -; GCN-NEXT: s_load_dword s1, s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s0, 0xff +; GCN-NEXT: s_load_dword s0, s[2:3], 0x0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s1, 8 -; GCN-NEXT: s_and_b32 s2, s2, s0 -; GCN-NEXT: s_lshr_b32 s3, s1, 16 -; GCN-NEXT: s_lshr_b32 s4, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s0 -; GCN-NEXT: s_and_b32 s0, s3, s0 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s0, s0, 16 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_lshl_b32 s1, s4, 24 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 24 ; GCN-NEXT: ; return to shader part epilog @@ -488,19 +437,16 @@ ; GFX10-LABEL: extractelement_sgpr_v4i8_idx3: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 ; GFX10-NEXT: ; return to shader part epilog @@ -514,15 +460,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -530,17 +476,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -551,19 +495,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -572,16 +513,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dword v0, v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <4 x i8>, <4 x i8> addrspace(1)* %ptr %element = extractelement <4 x i8> %vector, i32 0 @@ -594,15 +534,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -610,17 +550,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -632,19 +570,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -655,15 +590,14 @@ ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dword v0, v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <4 x i8>, <4 x i8> addrspace(1)* %ptr @@ -676,16 +610,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -693,17 +627,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -715,19 +647,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -737,14 +666,13 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dword v0, v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 @@ -759,15 +687,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -776,17 +704,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -798,19 +724,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -820,16 +743,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dword v0, v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <4 x i8>, <4 x i8> addrspace(1)* %ptr @@ -841,31 +763,29 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_sgpr_idx: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s9, 0xff +; GCN-NEXT: s_mov_b32 s7, 0x80008 +; GCN-NEXT: s_movk_i32 s5, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s0, 8 -; GCN-NEXT: s_and_b32 s2, s2, s9 -; GCN-NEXT: s_lshr_b32 s3, s0, 16 -; GCN-NEXT: s_lshr_b32 s5, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s9 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_and_b32 s2, s3, s9 -; GCN-NEXT: s_lshl_b32 s2, s2, 16 -; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_lshl_b32 s2, s5, 24 -; GCN-NEXT: s_lshr_b32 s6, s1, 8 +; GCN-NEXT: s_bfe_u32 s8, s0, s7 +; GCN-NEXT: s_and_b32 s6, s0, s5 +; GCN-NEXT: s_lshl_b32 s8, s8, 8 +; GCN-NEXT: s_or_b32 s6, s6, s8 +; GCN-NEXT: s_mov_b32 s8, 0x80010 +; GCN-NEXT: s_lshr_b32 s2, s0, 24 +; GCN-NEXT: s_bfe_u32 s0, s0, s8 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s6, s0 +; GCN-NEXT: s_lshl_b32 s2, s2, 24 ; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_and_b32 s2, s6, s9 -; GCN-NEXT: s_lshr_b32 s7, s1, 16 -; GCN-NEXT: s_lshr_b32 s8, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s9 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 -; GCN-NEXT: s_and_b32 s2, s7, s9 -; GCN-NEXT: s_lshl_b32 s2, s2, 16 -; GCN-NEXT: s_or_b32 s1, s1, s2 -; GCN-NEXT: s_lshl_b32 s2, s8, 24 +; GCN-NEXT: s_and_b32 s2, s1, s5 +; GCN-NEXT: s_bfe_u32 s5, s1, s7 +; GCN-NEXT: s_lshr_b32 s3, s1, 24 +; GCN-NEXT: s_bfe_u32 s1, s1, s8 +; GCN-NEXT: s_lshl_b32 s5, s5, 8 +; GCN-NEXT: s_or_b32 s2, s2, s5 +; GCN-NEXT: s_lshl_b32 s1, s1, 16 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s2, s3, 24 ; GCN-NEXT: s_or_b32 s1, s1, s2 ; GCN-NEXT: s_lshr_b32 s2, s4, 2 ; GCN-NEXT: s_cmp_eq_u32 s2, 1 @@ -878,34 +798,32 @@ ; GFX10-LABEL: extractelement_sgpr_v8i8_sgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: s_movk_i32 s2, 0xff -; GFX10-NEXT: s_lshr_b32 s3, s4, 2 +; GFX10-NEXT: s_mov_b32 s5, 0x80010 +; GFX10-NEXT: s_lshr_b32 s6, s4, 2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s5, s0, 8 -; GFX10-NEXT: s_lshr_b32 s8, s1, 8 -; GFX10-NEXT: s_lshr_b32 s6, s0, 16 -; GFX10-NEXT: s_and_b32 s5, s5, s2 -; GFX10-NEXT: s_and_b32 s8, s8, s2 -; GFX10-NEXT: s_lshr_b32 s9, s1, 16 +; GFX10-NEXT: s_bfe_u32 s10, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 ; GFX10-NEXT: s_lshr_b32 s7, s0, 24 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_lshr_b32 s10, s1, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s2 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_and_b32 s2, s9, s2 -; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s6, s6, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s5 -; GFX10-NEXT: s_lshl_b32 s2, s2, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s8 +; GFX10-NEXT: s_lshr_b32 s8, s1, 24 +; GFX10-NEXT: s_and_b32 s9, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, s5 +; GFX10-NEXT: s_and_b32 s2, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s5 +; GFX10-NEXT: s_lshl_b32 s5, s10, 8 +; GFX10-NEXT: s_lshl_b32 s3, s3, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s5, s9, s5 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s2, s2, s3 ; GFX10-NEXT: s_lshl_b32 s7, s7, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s9, s10, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s2 +; GFX10-NEXT: s_or_b32 s0, s5, s0 +; GFX10-NEXT: s_lshl_b32 s8, s8, 24 +; GFX10-NEXT: s_or_b32 s1, s2, s1 ; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_or_b32 s1, s1, s9 -; GFX10-NEXT: s_cmp_eq_u32 s3, 1 +; GFX10-NEXT: s_or_b32 s1, s1, s8 +; GFX10-NEXT: s_cmp_eq_u32 s6, 1 ; GFX10-NEXT: s_cselect_b32 s0, s1, s0 ; GFX10-NEXT: s_and_b32 s1, s4, 3 ; GFX10-NEXT: s_lshl_b32 s1, s1, 3 @@ -921,25 +839,24 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff -; GFX9-NEXT: s_lshr_b32 s3, s2, 2 +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: s_movk_i32 s3, 0xff +; GFX9-NEXT: s_lshr_b32 s4, s2, 2 ; GFX9-NEXT: s_and_b32 s2, s2, 3 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v6, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v7, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v6 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v2 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_and_or_b32 v1, v1, s1, v4 -; GFX9-NEXT: v_or3_b32 v0, v0, v6, v3 -; GFX9-NEXT: v_or3_b32 v1, v1, v7, v5 +; GFX9-NEXT: v_or3_b32 v0, v0, v5, v2 +; GFX9-NEXT: v_or3_b32 v1, v1, v7, v3 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX9-NEXT: s_lshl_b32 s0, s2, 3 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, s0, v0 @@ -949,31 +866,27 @@ ; GFX8-LABEL: extractelement_vgpr_v8i8_sgpr_idx: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v2, 8 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 ; GFX8-NEXT: s_lshr_b32 s0, s2, 2 ; GFX8-NEXT: s_and_b32 s1, s2, 3 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s0, 1 ; GFX8-NEXT: s_lshl_b32 s0, s1, 3 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v9, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v4, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v9 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v8 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX8-NEXT: v_lshrrev_b32_e32 v0, s0, v0 ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 @@ -990,30 +903,26 @@ ; GFX7-NEXT: s_and_b32 s2, s2, 3 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s0, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s0, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s0, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v6, s0, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX7-NEXT: s_lshl_b32 s0, s2, 3 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, s0, v0 @@ -1024,24 +933,23 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s3, 0xff ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v7, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 ; GFX10-NEXT: s_lshr_b32 s0, s2, 2 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s0, 1 -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 -; GFX10-NEXT: v_or3_b32 v1, v1, v7, v5 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v2 +; GFX10-NEXT: v_or3_b32 v1, v1, v7, v3 ; GFX10-NEXT: s_and_b32 s0, s2, 3 ; GFX10-NEXT: s_lshl_b32 s0, s0, 3 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo @@ -1059,25 +967,24 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: s_mov_b32 s5, 16 +; GFX9-NEXT: s_movk_i32 s6, 0xff ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 2, v2 ; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v8, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v9, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s6, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v8 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v4 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v1, v1, s5, v6 -; GFX9-NEXT: v_or3_b32 v0, v0, v8, v5 -; GFX9-NEXT: v_or3_b32 v1, v1, v9, v7 +; GFX9-NEXT: v_or3_b32 v0, v0, v7, v4 +; GFX9-NEXT: v_or3_b32 v1, v1, v9, v5 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -1087,30 +994,26 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_mov_b32_e32 v5, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 2, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 2, v2 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v11, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v5, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v11 -; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v10 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v7 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -1128,30 +1031,26 @@ ; GFX7-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v3 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_and_b32_e32 v7, s4, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 -; GFX7-NEXT: v_and_b32_e32 v8, s4, v8 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX7-NEXT: v_bfe_u32 v7, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v9, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v6, s4, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v8, s4, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_or_b32_e32 v6, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v7, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v0, v6, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v7, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v9 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -1163,25 +1062,24 @@ ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 -; GFX10-NEXT: s_movk_i32 s5, 0xff +; GFX10-NEXT: s_mov_b32 s5, 16 +; GFX10-NEXT: s_movk_i32 s6, 0xff ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v7, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v8, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s5, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s5, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 2, v2 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v8, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s6, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX10-NEXT: v_and_or_b32 v1, v1, s6, v6 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 2, v2 ; GFX10-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v7, v5 -; GFX10-NEXT: v_or3_b32 v1, v1, v8, v6 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v3 +; GFX10-NEXT: v_or3_b32 v1, v1, v8, v4 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -1195,37 +1093,35 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_vgpr_idx: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s8, 0xff +; GCN-NEXT: s_mov_b32 s6, 0x80008 +; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: v_lshrrev_b32_e32 v1, 2, v0 ; GCN-NEXT: v_and_b32_e32 v0, 3, v0 -; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s2, s0, 8 -; GCN-NEXT: s_and_b32 s2, s2, s8 -; GCN-NEXT: s_lshr_b32 s3, s0, 16 -; GCN-NEXT: s_lshr_b32 s4, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s8 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_and_b32 s2, s3, s8 -; GCN-NEXT: s_lshl_b32 s2, s2, 16 -; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_lshl_b32 s2, s4, 24 -; GCN-NEXT: s_lshr_b32 s5, s1, 8 +; GCN-NEXT: s_bfe_u32 s7, s0, s6 +; GCN-NEXT: s_and_b32 s5, s0, s4 +; GCN-NEXT: s_lshl_b32 s7, s7, 8 +; GCN-NEXT: s_or_b32 s5, s5, s7 +; GCN-NEXT: s_mov_b32 s7, 0x80010 +; GCN-NEXT: s_lshr_b32 s2, s0, 24 +; GCN-NEXT: s_bfe_u32 s0, s0, s7 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s5, s0 +; GCN-NEXT: s_lshl_b32 s2, s2, 24 ; GCN-NEXT: s_or_b32 s0, s0, s2 -; GCN-NEXT: s_and_b32 s2, s5, s8 -; GCN-NEXT: s_lshr_b32 s6, s1, 16 -; GCN-NEXT: s_lshr_b32 s7, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s8 -; GCN-NEXT: s_lshl_b32 s2, s2, 8 -; GCN-NEXT: s_or_b32 s1, s1, s2 -; GCN-NEXT: s_and_b32 s2, s6, s8 -; GCN-NEXT: s_lshl_b32 s2, s2, 16 -; GCN-NEXT: s_or_b32 s1, s1, s2 -; GCN-NEXT: s_lshl_b32 s2, s7, 24 +; GCN-NEXT: s_and_b32 s2, s1, s4 +; GCN-NEXT: s_bfe_u32 s4, s1, s6 +; GCN-NEXT: s_lshr_b32 s3, s1, 24 +; GCN-NEXT: s_bfe_u32 s1, s1, s7 +; GCN-NEXT: s_lshl_b32 s4, s4, 8 +; GCN-NEXT: s_or_b32 s2, s2, s4 +; GCN-NEXT: s_lshl_b32 s1, s1, 16 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s2, s3, 24 ; GCN-NEXT: s_or_b32 s1, s1, s2 ; GCN-NEXT: v_mov_b32_e32 v2, s0 ; GCN-NEXT: v_mov_b32_e32 v3, s1 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 ; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GCN-NEXT: v_lshrrev_b32_e32 v0, v0, v1 @@ -1235,37 +1131,35 @@ ; GFX10-LABEL: extractelement_sgpr_v8i8_vgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: s_movk_i32 s2, 0xff +; GFX10-NEXT: s_mov_b32 s4, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v1, 2, v0 ; GFX10-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s1, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 8 -; GFX10-NEXT: s_lshr_b32 s4, s0, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_lshr_b32 s7, s1, 16 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s3, s3, s2 -; GFX10-NEXT: s_lshr_b32 s8, s1, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s2 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_and_b32 s4, s4, s2 -; GFX10-NEXT: s_and_b32 s2, s7, s2 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshl_b32 s2, s2, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s6 -; GFX10-NEXT: s_lshl_b32 s7, s8, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s2 +; GFX10-NEXT: s_bfe_u32 s8, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 +; GFX10-NEXT: s_and_b32 s7, s0, s2 +; GFX10-NEXT: s_lshr_b32 s6, s1, 24 +; GFX10-NEXT: s_and_b32 s2, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s4 ; GFX10-NEXT: s_lshl_b32 s3, s3, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshl_b32 s2, s4, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_lshl_b32 s3, s6, 24 +; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_lshr_b32 s5, s0, 24 +; GFX10-NEXT: s_bfe_u32 s0, s0, s4 +; GFX10-NEXT: s_lshl_b32 s4, s8, 8 +; GFX10-NEXT: s_or_b32 s1, s1, s3 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s3, s7, s4 ; GFX10-NEXT: v_mov_b32_e32 v2, s1 -; GFX10-NEXT: s_lshl_b32 s5, s5, 24 +; GFX10-NEXT: s_lshl_b32 s2, s5, 24 +; GFX10-NEXT: s_or_b32 s0, s3, s0 ; GFX10-NEXT: s_or_b32 s0, s0, s2 -; GFX10-NEXT: s_or_b32 s0, s0, s5 ; GFX10-NEXT: v_cndmask_b32_e32 v1, s0, v2, vcc_lo ; GFX10-NEXT: v_lshrrev_b32_e32 v0, v0, v1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 @@ -1279,19 +1173,16 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx0: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s1, s0, 8 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshr_b32 s2, s0, 16 -; GCN-NEXT: s_lshr_b32 s3, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 8 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_and_b32 s1, s2, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: ; return to shader part epilog ; @@ -1299,18 +1190,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s1, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: ; return to shader part epilog %vector = load <8 x i8>, <8 x i8> addrspace(4)* %ptr @@ -1322,19 +1210,16 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx1: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s1, s0, 8 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshr_b32 s2, s0, 16 -; GCN-NEXT: s_lshr_b32 s3, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 8 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_and_b32 s1, s2, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 8 ; GCN-NEXT: ; return to shader part epilog @@ -1343,18 +1228,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s1, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 8 ; GFX10-NEXT: ; return to shader part epilog @@ -1367,19 +1249,16 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx2: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s1, s0, 8 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshr_b32 s2, s0, 16 -; GCN-NEXT: s_lshr_b32 s3, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 8 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_and_b32 s1, s2, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 16 ; GCN-NEXT: ; return to shader part epilog @@ -1388,18 +1267,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s1, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 16 ; GFX10-NEXT: ; return to shader part epilog @@ -1412,19 +1288,16 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx3: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s1, s0, 8 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshr_b32 s2, s0, 16 -; GCN-NEXT: s_lshr_b32 s3, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 8 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_and_b32 s1, s2, s4 -; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 +; GCN-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GCN-NEXT: s_lshr_b32 s1, s0, 24 +; GCN-NEXT: s_and_b32 s2, s0, 0xff +; GCN-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s2, s0 +; GCN-NEXT: s_lshl_b32 s1, s1, 24 ; GCN-NEXT: s_or_b32 s0, s0, s1 ; GCN-NEXT: s_lshr_b32 s0, s0, 24 ; GCN-NEXT: ; return to shader part epilog @@ -1433,18 +1306,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s1, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s4, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s3, s1 +; GFX10-NEXT: s_bfe_u32 s2, s0, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s0, 0x80010 +; GFX10-NEXT: s_and_b32 s1, s0, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: s_lshl_b32 s0, s0, 24 -; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_or_b32 s1, s1, s3 ; GFX10-NEXT: s_or_b32 s0, s1, s0 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 ; GFX10-NEXT: ; return to shader part epilog @@ -1457,38 +1327,32 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx4: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s0, s1, 8 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshr_b32 s2, s1, 16 -; GCN-NEXT: s_lshr_b32 s3, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshl_b32 s0, s0, 8 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_and_b32 s1, s2, s4 +; GCN-NEXT: s_bfe_u32 s3, s1, 0x80008 +; GCN-NEXT: s_lshr_b32 s0, s1, 24 +; GCN-NEXT: s_and_b32 s2, s1, 0xff +; GCN-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 -; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s0, s0, 24 +; GCN-NEXT: s_or_b32 s0, s1, s0 ; GCN-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: extractelement_sgpr_v8i8_idx4: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s1, 8 -; GFX10-NEXT: s_lshr_b32 s3, s1, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s0 -; GFX10-NEXT: s_and_b32 s4, s1, s0 -; GFX10-NEXT: s_and_b32 s0, s3, s0 +; GFX10-NEXT: s_bfe_u32 s2, s1, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s1, 0x80010 +; GFX10-NEXT: s_and_b32 s0, s1, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s1, s1, 24 -; GFX10-NEXT: s_lshl_b32 s0, s0, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: s_lshl_b32 s1, s1, 24 -; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: s_or_b32 s0, s0, s1 ; GFX10-NEXT: ; return to shader part epilog %vector = load <8 x i8>, <8 x i8> addrspace(4)* %ptr @@ -1500,20 +1364,17 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx5: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s0, s1, 8 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshr_b32 s2, s1, 16 -; GCN-NEXT: s_lshr_b32 s3, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshl_b32 s0, s0, 8 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_and_b32 s1, s2, s4 +; GCN-NEXT: s_bfe_u32 s3, s1, 0x80008 +; GCN-NEXT: s_lshr_b32 s0, s1, 24 +; GCN-NEXT: s_and_b32 s2, s1, 0xff +; GCN-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 -; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s0, s0, 24 +; GCN-NEXT: s_or_b32 s0, s1, s0 ; GCN-NEXT: s_lshr_b32 s0, s0, 8 ; GCN-NEXT: ; return to shader part epilog ; @@ -1521,18 +1382,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s1, 8 -; GFX10-NEXT: s_lshr_b32 s3, s1, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s0 -; GFX10-NEXT: s_and_b32 s4, s1, s0 -; GFX10-NEXT: s_and_b32 s0, s3, s0 +; GFX10-NEXT: s_bfe_u32 s2, s1, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s1, 0x80010 +; GFX10-NEXT: s_and_b32 s0, s1, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s1, s1, 24 -; GFX10-NEXT: s_lshl_b32 s0, s0, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: s_lshl_b32 s1, s1, 24 -; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: s_or_b32 s0, s0, s1 ; GFX10-NEXT: s_lshr_b32 s0, s0, 8 ; GFX10-NEXT: ; return to shader part epilog @@ -1545,20 +1403,17 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx6: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s0, s1, 8 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshr_b32 s2, s1, 16 -; GCN-NEXT: s_lshr_b32 s3, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshl_b32 s0, s0, 8 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_and_b32 s1, s2, s4 +; GCN-NEXT: s_bfe_u32 s3, s1, 0x80008 +; GCN-NEXT: s_lshr_b32 s0, s1, 24 +; GCN-NEXT: s_and_b32 s2, s1, 0xff +; GCN-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 -; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s0, s0, 24 +; GCN-NEXT: s_or_b32 s0, s1, s0 ; GCN-NEXT: s_lshr_b32 s0, s0, 16 ; GCN-NEXT: ; return to shader part epilog ; @@ -1566,18 +1421,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s1, 8 -; GFX10-NEXT: s_lshr_b32 s3, s1, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s0 -; GFX10-NEXT: s_and_b32 s4, s1, s0 -; GFX10-NEXT: s_and_b32 s0, s3, s0 +; GFX10-NEXT: s_bfe_u32 s2, s1, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s1, 0x80010 +; GFX10-NEXT: s_and_b32 s0, s1, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s1, s1, 24 -; GFX10-NEXT: s_lshl_b32 s0, s0, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: s_lshl_b32 s1, s1, 24 -; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: s_or_b32 s0, s0, s1 ; GFX10-NEXT: s_lshr_b32 s0, s0, 16 ; GFX10-NEXT: ; return to shader part epilog @@ -1590,20 +1442,17 @@ ; GCN-LABEL: extractelement_sgpr_v8i8_idx7: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s4, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s0, s1, 8 -; GCN-NEXT: s_and_b32 s0, s0, s4 -; GCN-NEXT: s_lshr_b32 s2, s1, 16 -; GCN-NEXT: s_lshr_b32 s3, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s4 -; GCN-NEXT: s_lshl_b32 s0, s0, 8 -; GCN-NEXT: s_or_b32 s0, s1, s0 -; GCN-NEXT: s_and_b32 s1, s2, s4 +; GCN-NEXT: s_bfe_u32 s3, s1, 0x80008 +; GCN-NEXT: s_lshr_b32 s0, s1, 24 +; GCN-NEXT: s_and_b32 s2, s1, 0xff +; GCN-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GCN-NEXT: s_lshl_b32 s3, s3, 8 +; GCN-NEXT: s_or_b32 s2, s2, s3 ; GCN-NEXT: s_lshl_b32 s1, s1, 16 -; GCN-NEXT: s_or_b32 s0, s0, s1 -; GCN-NEXT: s_lshl_b32 s1, s3, 24 -; GCN-NEXT: s_or_b32 s0, s0, s1 +; GCN-NEXT: s_or_b32 s1, s2, s1 +; GCN-NEXT: s_lshl_b32 s0, s0, 24 +; GCN-NEXT: s_or_b32 s0, s1, s0 ; GCN-NEXT: s_lshr_b32 s0, s0, 24 ; GCN-NEXT: ; return to shader part epilog ; @@ -1611,18 +1460,15 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: s_lshr_b32 s2, s1, 8 -; GFX10-NEXT: s_lshr_b32 s3, s1, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s0 -; GFX10-NEXT: s_and_b32 s4, s1, s0 -; GFX10-NEXT: s_and_b32 s0, s3, s0 +; GFX10-NEXT: s_bfe_u32 s2, s1, 0x80008 +; GFX10-NEXT: s_bfe_u32 s3, s1, 0x80010 +; GFX10-NEXT: s_and_b32 s0, s1, 0xff ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshr_b32 s1, s1, 24 -; GFX10-NEXT: s_lshl_b32 s0, s0, 16 -; GFX10-NEXT: s_or_b32 s2, s4, s2 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: s_lshl_b32 s1, s1, 24 -; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: s_or_b32 s0, s0, s1 ; GFX10-NEXT: s_lshr_b32 s0, s0, 24 ; GFX10-NEXT: ; return to shader part epilog @@ -1636,15 +1482,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -1652,17 +1498,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -1673,19 +1517,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -1694,16 +1535,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr %element = extractelement <8 x i8> %vector, i32 0 @@ -1716,15 +1556,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -1732,17 +1572,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -1754,19 +1592,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -1778,14 +1613,13 @@ ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -1798,16 +1632,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -1815,17 +1649,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -1837,19 +1669,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -1859,14 +1688,13 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 @@ -1881,15 +1709,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -1898,17 +1726,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -1920,19 +1746,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -1942,16 +1765,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -1964,15 +1786,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -1980,17 +1802,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -2001,20 +1821,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: extractelement_vgpr_v8i8_idx4: @@ -2022,16 +1839,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr %element = extractelement <8 x i8> %vector, i32 4 @@ -2044,15 +1860,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -2060,17 +1876,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -2082,20 +1896,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -2106,14 +1917,13 @@ ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v1, 0xff, v1, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v1, v0, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -2126,16 +1936,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -2143,17 +1953,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -2165,20 +1973,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -2187,14 +1992,13 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 @@ -2209,15 +2013,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -2226,17 +2030,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -2248,20 +2050,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -2270,16 +2069,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -2291,55 +2089,49 @@ ; GCN-LABEL: extractelement_sgpr_v16i8_sgpr_idx: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s17, 0xff +; GCN-NEXT: s_mov_b32 s11, 0x80008 +; GCN-NEXT: s_movk_i32 s9, 0xff ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s5, s0, 8 -; GCN-NEXT: s_and_b32 s5, s5, s17 -; GCN-NEXT: s_lshr_b32 s6, s0, 16 -; GCN-NEXT: s_lshr_b32 s7, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 8 -; GCN-NEXT: s_or_b32 s0, s0, s5 -; GCN-NEXT: s_and_b32 s5, s6, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 16 -; GCN-NEXT: s_or_b32 s0, s0, s5 -; GCN-NEXT: s_lshl_b32 s5, s7, 24 -; GCN-NEXT: s_lshr_b32 s8, s1, 8 +; GCN-NEXT: s_bfe_u32 s12, s0, s11 +; GCN-NEXT: s_and_b32 s10, s0, s9 +; GCN-NEXT: s_lshl_b32 s12, s12, 8 +; GCN-NEXT: s_or_b32 s10, s10, s12 +; GCN-NEXT: s_mov_b32 s12, 0x80010 +; GCN-NEXT: s_lshr_b32 s5, s0, 24 +; GCN-NEXT: s_bfe_u32 s0, s0, s12 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s10, s0 +; GCN-NEXT: s_bfe_u32 s10, s1, s11 +; GCN-NEXT: s_lshl_b32 s5, s5, 24 ; GCN-NEXT: s_or_b32 s0, s0, s5 -; GCN-NEXT: s_and_b32 s5, s8, s17 -; GCN-NEXT: s_lshr_b32 s9, s1, 16 -; GCN-NEXT: s_lshr_b32 s10, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 8 -; GCN-NEXT: s_or_b32 s1, s1, s5 -; GCN-NEXT: s_and_b32 s5, s9, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 16 -; GCN-NEXT: s_or_b32 s1, s1, s5 -; GCN-NEXT: s_lshl_b32 s5, s10, 24 -; GCN-NEXT: s_lshr_b32 s11, s2, 8 +; GCN-NEXT: s_lshr_b32 s6, s1, 24 +; GCN-NEXT: s_and_b32 s5, s1, s9 +; GCN-NEXT: s_bfe_u32 s1, s1, s12 +; GCN-NEXT: s_lshl_b32 s10, s10, 8 +; GCN-NEXT: s_or_b32 s5, s5, s10 +; GCN-NEXT: s_lshl_b32 s1, s1, 16 +; GCN-NEXT: s_or_b32 s1, s5, s1 +; GCN-NEXT: s_lshl_b32 s5, s6, 24 +; GCN-NEXT: s_bfe_u32 s6, s2, s11 ; GCN-NEXT: s_or_b32 s1, s1, s5 -; GCN-NEXT: s_and_b32 s5, s11, s17 -; GCN-NEXT: s_lshr_b32 s12, s2, 16 -; GCN-NEXT: s_lshr_b32 s13, s2, 24 -; GCN-NEXT: s_and_b32 s2, s2, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 8 -; GCN-NEXT: s_or_b32 s2, s2, s5 -; GCN-NEXT: s_and_b32 s5, s12, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 16 -; GCN-NEXT: s_or_b32 s2, s2, s5 -; GCN-NEXT: s_lshl_b32 s5, s13, 24 -; GCN-NEXT: s_lshr_b32 s14, s3, 8 +; GCN-NEXT: s_lshr_b32 s7, s2, 24 +; GCN-NEXT: s_and_b32 s5, s2, s9 +; GCN-NEXT: s_bfe_u32 s2, s2, s12 +; GCN-NEXT: s_lshl_b32 s6, s6, 8 +; GCN-NEXT: s_or_b32 s5, s5, s6 +; GCN-NEXT: s_lshl_b32 s2, s2, 16 +; GCN-NEXT: s_bfe_u32 s6, s3, s11 +; GCN-NEXT: s_or_b32 s2, s5, s2 +; GCN-NEXT: s_lshl_b32 s5, s7, 24 ; GCN-NEXT: s_or_b32 s2, s2, s5 -; GCN-NEXT: s_and_b32 s5, s14, s17 -; GCN-NEXT: s_lshr_b32 s15, s3, 16 -; GCN-NEXT: s_lshr_b32 s16, s3, 24 -; GCN-NEXT: s_and_b32 s3, s3, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 8 -; GCN-NEXT: s_or_b32 s3, s3, s5 -; GCN-NEXT: s_and_b32 s5, s15, s17 -; GCN-NEXT: s_lshl_b32 s5, s5, 16 -; GCN-NEXT: s_or_b32 s3, s3, s5 -; GCN-NEXT: s_lshl_b32 s5, s16, 24 +; GCN-NEXT: s_lshr_b32 s8, s3, 24 +; GCN-NEXT: s_and_b32 s5, s3, s9 +; GCN-NEXT: s_bfe_u32 s3, s3, s12 +; GCN-NEXT: s_lshl_b32 s6, s6, 8 +; GCN-NEXT: s_or_b32 s5, s5, s6 +; GCN-NEXT: s_lshl_b32 s3, s3, 16 +; GCN-NEXT: s_or_b32 s3, s5, s3 +; GCN-NEXT: s_lshl_b32 s5, s8, 24 ; GCN-NEXT: s_or_b32 s3, s3, s5 ; GCN-NEXT: s_lshr_b32 s5, s4, 2 ; GCN-NEXT: s_cmp_eq_u32 s5, 1 @@ -2356,56 +2148,50 @@ ; GFX10-LABEL: extractelement_sgpr_v16i8_sgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s6, 0x80008 ; GFX10-NEXT: s_movk_i32 s5, 0xff +; GFX10-NEXT: s_mov_b32 s7, 0x80010 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_lshr_b32 s7, s0, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s5 +; GFX10-NEXT: s_bfe_u32 s13, s0, s6 ; GFX10-NEXT: s_lshr_b32 s8, s0, 24 -; GFX10-NEXT: s_and_b32 s7, s7, s5 -; GFX10-NEXT: s_and_b32 s0, s0, s5 +; GFX10-NEXT: s_and_b32 s12, s0, s5 +; GFX10-NEXT: s_bfe_u32 s0, s0, s7 +; GFX10-NEXT: s_lshl_b32 s13, s13, 8 +; GFX10-NEXT: s_bfe_u32 s15, s1, s6 +; GFX10-NEXT: s_bfe_u32 s17, s2, s6 +; GFX10-NEXT: s_bfe_u32 s6, s3, s6 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s12, s12, s13 +; GFX10-NEXT: s_lshr_b32 s9, s1, 24 +; GFX10-NEXT: s_and_b32 s14, s1, s5 +; GFX10-NEXT: s_bfe_u32 s1, s1, s7 +; GFX10-NEXT: s_and_b32 s16, s2, s5 +; GFX10-NEXT: s_lshr_b32 s10, s2, 24 +; GFX10-NEXT: s_bfe_u32 s2, s2, s7 +; GFX10-NEXT: s_lshl_b32 s15, s15, 8 +; GFX10-NEXT: s_lshr_b32 s11, s3, 24 +; GFX10-NEXT: s_and_b32 s5, s3, s5 +; GFX10-NEXT: s_bfe_u32 s3, s3, s7 ; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshr_b32 s12, s2, 8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshr_b32 s13, s2, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_and_b32 s7, s12, s5 ; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_lshr_b32 s9, s1, 8 -; GFX10-NEXT: s_lshr_b32 s14, s2, 24 +; GFX10-NEXT: s_or_b32 s0, s12, s0 +; GFX10-NEXT: s_lshl_b32 s17, s17, 8 ; GFX10-NEXT: s_or_b32 s0, s0, s8 -; GFX10-NEXT: s_and_b32 s2, s2, s5 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_and_b32 s8, s13, s5 -; GFX10-NEXT: s_lshr_b32 s10, s1, 16 -; GFX10-NEXT: s_and_b32 s9, s9, s5 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_lshl_b32 s7, s8, 16 -; GFX10-NEXT: s_lshr_b32 s15, s3, 8 -; GFX10-NEXT: s_lshr_b32 s11, s1, 24 -; GFX10-NEXT: s_and_b32 s10, s10, s5 -; GFX10-NEXT: s_and_b32 s1, s1, s5 -; GFX10-NEXT: s_lshl_b32 s9, s9, 8 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_and_b32 s7, s15, s5 -; GFX10-NEXT: s_lshr_b32 s16, s3, 16 -; GFX10-NEXT: s_lshl_b32 s10, s10, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s9 -; GFX10-NEXT: s_lshr_b32 s17, s3, 24 -; GFX10-NEXT: s_and_b32 s3, s3, s5 -; GFX10-NEXT: s_and_b32 s5, s16, s5 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s10 -; GFX10-NEXT: s_lshl_b32 s6, s11, 24 -; GFX10-NEXT: s_or_b32 s3, s3, s7 -; GFX10-NEXT: s_lshl_b32 s5, s5, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s6 -; GFX10-NEXT: s_lshl_b32 s6, s14, 24 -; GFX10-NEXT: s_or_b32 s3, s3, s5 -; GFX10-NEXT: s_lshl_b32 s5, s17, 24 -; GFX10-NEXT: s_or_b32 s2, s2, s6 +; GFX10-NEXT: s_or_b32 s5, s5, s6 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s13, s14, s15 +; GFX10-NEXT: s_or_b32 s8, s16, s17 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 +; GFX10-NEXT: s_or_b32 s3, s5, s3 +; GFX10-NEXT: s_or_b32 s2, s8, s2 +; GFX10-NEXT: s_lshl_b32 s8, s10, 24 +; GFX10-NEXT: s_lshl_b32 s5, s11, 24 +; GFX10-NEXT: s_lshl_b32 s9, s9, 24 +; GFX10-NEXT: s_or_b32 s1, s13, s1 ; GFX10-NEXT: s_lshr_b32 s6, s4, 2 +; GFX10-NEXT: s_or_b32 s1, s1, s9 +; GFX10-NEXT: s_or_b32 s2, s2, s8 ; GFX10-NEXT: s_or_b32 s3, s3, s5 ; GFX10-NEXT: s_cmp_eq_u32 s6, 1 ; GFX10-NEXT: s_cselect_b32 s0, s1, s0 @@ -2427,45 +2213,43 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_mov_b32_e32 v5, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff -; GFX9-NEXT: s_lshr_b32 s3, s2, 2 +; GFX9-NEXT: s_movk_i32 s3, 0xff +; GFX9-NEXT: s_lshr_b32 s4, s2, 2 +; GFX9-NEXT: v_mov_b32_e32 v6, 16 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xff -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 ; GFX9-NEXT: s_and_b32 s2, s2, 3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v2 -; GFX9-NEXT: v_and_b32_sdwa v14, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v15, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v11 ; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v6 +; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v2, v4, v15 ; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_and_or_b32 v1, v1, s1, v8 -; GFX9-NEXT: v_and_b32_sdwa v16, v2, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_and_or_b32 v2, v2, s1, v10 -; GFX9-NEXT: v_or3_b32 v0, v0, v14, v7 -; GFX9-NEXT: v_or3_b32 v1, v1, v15, v9 -; GFX9-NEXT: v_and_b32_sdwa v17, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v13 +; GFX9-NEXT: v_or3_b32 v0, v0, v12, v7 +; GFX9-NEXT: v_or3_b32 v1, v1, v14, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v3, v3, v4, v5 -; GFX9-NEXT: v_or3_b32 v2, v2, v16, v11 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 2 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v10 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GFX9-NEXT: v_or3_b32 v2, v2, v16, v9 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 2 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GFX9-NEXT: v_or3_b32 v3, v3, v17, v12 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s3, 3 +; GFX9-NEXT: v_or3_b32 v3, v3, v6, v4 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 3 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: s_lshl_b32 s0, s2, 3 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, s0, v0 @@ -2475,51 +2259,46 @@ ; GFX8-LABEL: extractelement_vgpr_v16i8_sgpr_idx: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_mov_b32_e32 v5, 8 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 ; GFX8-NEXT: v_mov_b32_e32 v6, 8 -; GFX8-NEXT: v_mov_b32_e32 v7, s0 -; GFX8-NEXT: v_mov_b32_e32 v4, 0xff +; GFX8-NEXT: v_mov_b32_e32 v7, 16 ; GFX8-NEXT: s_lshr_b32 s0, s2, 2 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s0, 1 ; GFX8-NEXT: s_and_b32 s1, s2, 3 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v9 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v6, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v2 -; GFX8-NEXT: v_and_b32_sdwa v16, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v7, v1, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v11 -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v6, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v16 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v17, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v2, v2, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v4, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v2 +; GFX8-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v9 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v3, v3, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v10 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v15 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v10 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v15 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v11 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v7 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v9 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s0, 2 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s0, 3 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v13 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v6 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX8-NEXT: s_lshl_b32 s0, s1, 3 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, s0, v0 @@ -2538,57 +2317,49 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 ; GFX7-NEXT: s_and_b32 s2, s2, 3 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s0, v5 -; GFX7-NEXT: v_and_b32_e32 v8, s0, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 8, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s0, v6 -; GFX7-NEXT: v_and_b32_e32 v9, s0, v9 -; GFX7-NEXT: v_and_b32_e32 v11, s0, v11 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v12, v12, v4 -; GFX7-NEXT: v_and_b32_e32 v14, v14, v4 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, v15, v4 +; GFX7-NEXT: v_bfe_u32 v10, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v12, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX7-NEXT: v_bfe_u32 v14, v2, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v9, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v11, s0, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v13, v2, v4 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_bfe_u32 v15, v3, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v9, v9, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v10, v11, v12 ; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v4, v3, v4 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v0, v9, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX7-NEXT: v_or_b32_e32 v1, v10, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v11, v13, v14 ; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v12 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v14 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v10 +; GFX7-NEXT: v_or_b32_e32 v2, v11, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v15 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 24, v16 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v7 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s1, 2 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GFX7-NEXT: v_or_b32_e32 v3, v3, v15 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v8 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s1, 3 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX7-NEXT: s_lshl_b32 s0, s2, 3 @@ -2599,43 +2370,41 @@ ; GFX10-LABEL: extractelement_vgpr_v16i8_sgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v5, 8 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: v_mov_b32_e32 v5, 8 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s3, 0xff +; GFX10-NEXT: v_mov_b32_e32 v6, 16 ; GFX10-NEXT: v_mov_b32_e32 v4, 0xff ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v2 -; GFX10-NEXT: v_and_b32_sdwa v13, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v14, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v6 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v10 ; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 8, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v8 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 ; GFX10-NEXT: s_lshr_b32 s0, s2, 2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v5, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v15, v2, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX10-NEXT: v_and_or_b32 v2, v2, s1, v10 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v3 -; GFX10-NEXT: v_or3_b32 v0, v0, v13, v7 -; GFX10-NEXT: v_or3_b32 v1, v1, v14, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v2, v2, v4, v14 +; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v11, v7 +; GFX10-NEXT: v_or3_b32 v1, v1, v13, v8 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s0, 1 -; GFX10-NEXT: v_and_or_b32 v5, v3, v4, v5 -; GFX10-NEXT: v_and_b32_sdwa v3, v3, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_or3_b32 v2, v2, v15, v11 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v6 +; GFX10-NEXT: v_and_or_b32 v4, v3, v4, v5 +; GFX10-NEXT: v_or3_b32 v2, v2, v15, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v10 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s0, 2 -; GFX10-NEXT: v_or3_b32 v1, v5, v3, v4 +; GFX10-NEXT: v_or3_b32 v1, v4, v3, v5 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s0, 3 ; GFX10-NEXT: s_and_b32 s0, s2, 3 @@ -2655,45 +2424,43 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[3:6], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 +; GFX9-NEXT: s_mov_b32 s5, 16 ; GFX9-NEXT: v_mov_b32_e32 v1, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 2, v2 +; GFX9-NEXT: s_movk_i32 s6, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 2, v2 +; GFX9-NEXT: v_mov_b32_e32 v7, 16 ; GFX9-NEXT: v_mov_b32_e32 v0, 0xff -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 ; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v4 ; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX9-NEXT: v_and_b32_sdwa v16, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v17, v4, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, s4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, s5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v17, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v13 ; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_lshlrev_b32_sdwa v18, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v5, v5, v0, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v3, v3, s5, v8 -; GFX9-NEXT: v_and_or_b32 v4, v4, s5, v10 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v6 -; GFX9-NEXT: v_and_b32_sdwa v18, v5, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v19, v6, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX9-NEXT: v_and_or_b32 v0, v6, v0, v1 -; GFX9-NEXT: v_or3_b32 v1, v3, v16, v9 -; GFX9-NEXT: v_or3_b32 v3, v4, v17, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX9-NEXT: v_and_or_b32 v5, v5, s5, v12 -; GFX9-NEXT: v_lshlrev_b32_e32 v14, 24, v15 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_or3_b32 v4, v5, v18, v13 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v7 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX9-NEXT: v_or3_b32 v0, v0, v19, v14 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v7 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v12 +; GFX9-NEXT: v_or3_b32 v3, v3, v14, v9 +; GFX9-NEXT: v_or3_b32 v4, v4, v16, v10 +; GFX9-NEXT: v_or3_b32 v0, v0, v7, v1 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc +; GFX9-NEXT: v_or3_b32 v5, v5, v18, v11 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 2, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -2703,52 +2470,47 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[3:6], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff -; GFX8-NEXT: v_mov_b32_e32 v1, 8 +; GFX8-NEXT: v_mov_b32_e32 v0, 8 +; GFX8-NEXT: v_mov_b32_e32 v1, 16 ; GFX8-NEXT: v_mov_b32_e32 v7, 8 -; GFX8-NEXT: v_mov_b32_e32 v8, s4 -; GFX8-NEXT: v_mov_b32_e32 v0, 0xff +; GFX8-NEXT: v_mov_b32_e32 v8, 16 ; GFX8-NEXT: v_lshrrev_b32_e32 v9, 2, v2 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v4 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v11 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v4 -; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v7, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 8, v5 -; GFX8-NEXT: v_and_b32_sdwa v18, v3, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_lshrrev_b32_e32 v16, 8, v6 -; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v7, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_and_b32_sdwa v8, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v4, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v1, v1, v18 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v8 -; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v5 -; GFX8-NEXT: v_and_b32_sdwa v19, v5, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v17, 24, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v15 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v19 -; GFX8-NEXT: v_and_b32_sdwa v0, v6, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v5, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v1, v1, v10 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v12 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX8-NEXT: v_lshlrev_b32_e32 v15, 24, v17 -; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v14 +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v11 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-NEXT: v_or_b32_sdwa v5, v5, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v15 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v6 +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v6, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_e32 v1, v5, v17 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v12 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v10 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v13 +; GFX8-NEXT: v_or_b32_e32 v5, v6, v8 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v11 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 2, v9 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX8-NEXT: v_or_b32_e32 v0, v0, v15 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc +; GFX8-NEXT: v_or_b32_e32 v4, v5, v7 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 3, v9 -; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, v1, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -2762,62 +2524,54 @@ ; GFX7-NEXT: buffer_load_dwordx4 v[3:6], v[0:1], s[4:7], 0 addr64 ; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: v_mov_b32_e32 v0, 0xff -; GFX7-NEXT: v_lshrrev_b32_e32 v18, 2, v2 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v18 +; GFX7-NEXT: v_lshrrev_b32_e32 v17, 2, v2 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v17 ; GFX7-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v9, s4, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 8, v6 -; GFX7-NEXT: v_and_b32_e32 v7, s4, v7 -; GFX7-NEXT: v_and_b32_e32 v10, s4, v10 -; GFX7-NEXT: v_and_b32_e32 v12, s4, v12 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v4, s4, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 24, v5 -; GFX7-NEXT: v_and_b32_e32 v13, v13, v0 -; GFX7-NEXT: v_and_b32_e32 v15, v15, v0 +; GFX7-NEXT: v_bfe_u32 v11, v3, 8, 8 +; GFX7-NEXT: v_bfe_u32 v13, v4, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX7-NEXT: v_bfe_u32 v15, v5, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s4, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v12, s4, v4 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v5 +; GFX7-NEXT: v_and_b32_e32 v14, v5, v0 +; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 8 +; GFX7-NEXT: v_bfe_u32 v16, v6, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v10, v10, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_or_b32_e32 v11, v12, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v6 +; GFX7-NEXT: v_and_b32_e32 v0, v6, v0 +; GFX7-NEXT: v_bfe_u32 v6, v6, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v16, 8, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v3, v10, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_or_b32_e32 v12, v14, v15 +; GFX7-NEXT: v_or_b32_e32 v4, v11, v4 ; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v6 -; GFX7-NEXT: v_and_b32_e32 v5, s4, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v17, 24, v6 -; GFX7-NEXT: v_and_b32_e32 v6, v6, v0 -; GFX7-NEXT: v_and_b32_e32 v0, v16, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v7 ; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v10 -; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v13 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v12 -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 8, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v14, 24, v14 -; GFX7-NEXT: v_or_b32_e32 v4, v4, v13 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v15 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v11 +; GFX7-NEXT: v_or_b32_e32 v5, v12, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v16 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX7-NEXT: v_lshlrev_b32_e32 v16, 24, v17 -; GFX7-NEXT: v_or_b32_e32 v0, v5, v0 -; GFX7-NEXT: v_or_b32_e32 v4, v4, v14 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v18 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX7-NEXT: v_or_b32_e32 v4, v5, v8 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 2, v17 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX7-NEXT: v_or_b32_e32 v0, v0, v16 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v18 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v9 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 3, v17 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -2828,46 +2582,44 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[3:6], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v1, 8 ; GFX10-NEXT: s_mov_b32 s4, 8 -; GFX10-NEXT: s_movk_i32 s5, 0xff -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 2, v2 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: s_mov_b32 s5, 16 +; GFX10-NEXT: s_movk_i32 s6, 0xff +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 2, v2 +; GFX10-NEXT: v_mov_b32_e32 v7, 16 ; GFX10-NEXT: v_mov_b32_e32 v0, 0xff ; GFX10-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v4 ; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 8, v5 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, s4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v16, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v17, v4, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX10-NEXT: v_and_or_b32 v3, v3, s5, v8 +; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, s4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, s5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, s5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v17, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v3, v3, s6, v13 ; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX10-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX10-NEXT: v_and_or_b32 v4, v4, s6, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX10-NEXT: v_lshlrev_b32_sdwa v18, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v5, v5, v0, v17 ; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX10-NEXT: v_and_or_b32 v4, v4, s5, v10 -; GFX10-NEXT: v_and_b32_sdwa v18, v5, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v15, 24, v6 -; GFX10-NEXT: v_or3_b32 v3, v3, v16, v9 -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v13 -; GFX10-NEXT: v_or3_b32 v4, v4, v17, v11 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_or_b32 v5, v5, s5, v12 -; GFX10-NEXT: v_and_b32_sdwa v19, v6, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v15 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo +; GFX10-NEXT: v_or3_b32 v3, v3, v14, v9 +; GFX10-NEXT: v_or3_b32 v4, v4, v16, v10 ; GFX10-NEXT: v_and_or_b32 v0, v6, v0, v1 -; GFX10-NEXT: v_or3_b32 v1, v5, v18, v8 -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v7 -; GFX10-NEXT: v_or3_b32 v0, v0, v19, v9 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v7 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v12 +; GFX10-NEXT: v_or3_b32 v5, v5, v18, v11 +; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v8 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v8 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, v1, v0 @@ -2881,64 +2633,58 @@ ; GCN-LABEL: extractelement_sgpr_v16i8_vgpr_idx: ; GCN: ; %bb.0: ; GCN-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GCN-NEXT: s_movk_i32 s16, 0xff +; GCN-NEXT: s_mov_b32 s10, 0x80008 +; GCN-NEXT: s_movk_i32 s8, 0xff ; GCN-NEXT: v_lshrrev_b32_e32 v1, 2, v0 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 -; GCN-NEXT: v_and_b32_e32 v0, 3, v0 ; GCN-NEXT: s_waitcnt lgkmcnt(0) -; GCN-NEXT: s_lshr_b32 s4, s0, 8 -; GCN-NEXT: s_and_b32 s4, s4, s16 -; GCN-NEXT: s_lshr_b32 s5, s0, 16 -; GCN-NEXT: s_lshr_b32 s6, s0, 24 -; GCN-NEXT: s_and_b32 s0, s0, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 8 -; GCN-NEXT: s_or_b32 s0, s0, s4 -; GCN-NEXT: s_and_b32 s4, s5, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 16 -; GCN-NEXT: s_or_b32 s0, s0, s4 -; GCN-NEXT: s_lshl_b32 s4, s6, 24 -; GCN-NEXT: s_lshr_b32 s7, s1, 8 +; GCN-NEXT: s_bfe_u32 s11, s0, s10 +; GCN-NEXT: s_and_b32 s9, s0, s8 +; GCN-NEXT: s_lshl_b32 s11, s11, 8 +; GCN-NEXT: s_or_b32 s9, s9, s11 +; GCN-NEXT: s_mov_b32 s11, 0x80010 +; GCN-NEXT: s_lshr_b32 s4, s0, 24 +; GCN-NEXT: s_bfe_u32 s0, s0, s11 +; GCN-NEXT: s_lshl_b32 s0, s0, 16 +; GCN-NEXT: s_or_b32 s0, s9, s0 +; GCN-NEXT: s_bfe_u32 s9, s1, s10 +; GCN-NEXT: s_lshl_b32 s4, s4, 24 ; GCN-NEXT: s_or_b32 s0, s0, s4 -; GCN-NEXT: s_and_b32 s4, s7, s16 -; GCN-NEXT: s_lshr_b32 s8, s1, 16 -; GCN-NEXT: s_lshr_b32 s9, s1, 24 -; GCN-NEXT: s_and_b32 s1, s1, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 8 -; GCN-NEXT: s_or_b32 s1, s1, s4 -; GCN-NEXT: s_and_b32 s4, s8, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 16 -; GCN-NEXT: s_or_b32 s1, s1, s4 -; GCN-NEXT: s_lshl_b32 s4, s9, 24 -; GCN-NEXT: s_lshr_b32 s10, s2, 8 +; GCN-NEXT: s_lshr_b32 s5, s1, 24 +; GCN-NEXT: s_and_b32 s4, s1, s8 +; GCN-NEXT: s_bfe_u32 s1, s1, s11 +; GCN-NEXT: s_lshl_b32 s9, s9, 8 +; GCN-NEXT: s_or_b32 s4, s4, s9 +; GCN-NEXT: s_lshl_b32 s1, s1, 16 +; GCN-NEXT: s_or_b32 s1, s4, s1 +; GCN-NEXT: s_lshl_b32 s4, s5, 24 +; GCN-NEXT: s_bfe_u32 s5, s2, s10 ; GCN-NEXT: s_or_b32 s1, s1, s4 -; GCN-NEXT: s_and_b32 s4, s10, s16 -; GCN-NEXT: s_lshr_b32 s11, s2, 16 -; GCN-NEXT: s_lshr_b32 s12, s2, 24 -; GCN-NEXT: s_and_b32 s2, s2, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 8 -; GCN-NEXT: s_or_b32 s2, s2, s4 -; GCN-NEXT: s_and_b32 s4, s11, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 16 -; GCN-NEXT: s_or_b32 s2, s2, s4 -; GCN-NEXT: s_lshl_b32 s4, s12, 24 -; GCN-NEXT: s_lshr_b32 s13, s3, 8 +; GCN-NEXT: s_lshr_b32 s6, s2, 24 +; GCN-NEXT: s_and_b32 s4, s2, s8 +; GCN-NEXT: s_bfe_u32 s2, s2, s11 +; GCN-NEXT: s_lshl_b32 s5, s5, 8 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_lshl_b32 s2, s2, 16 +; GCN-NEXT: s_bfe_u32 s5, s3, s10 +; GCN-NEXT: s_or_b32 s2, s4, s2 +; GCN-NEXT: s_lshl_b32 s4, s6, 24 ; GCN-NEXT: s_or_b32 s2, s2, s4 -; GCN-NEXT: s_and_b32 s4, s13, s16 -; GCN-NEXT: s_lshr_b32 s14, s3, 16 -; GCN-NEXT: s_lshr_b32 s15, s3, 24 -; GCN-NEXT: s_and_b32 s3, s3, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 8 -; GCN-NEXT: s_or_b32 s3, s3, s4 -; GCN-NEXT: s_and_b32 s4, s14, s16 -; GCN-NEXT: s_lshl_b32 s4, s4, 16 -; GCN-NEXT: s_or_b32 s3, s3, s4 -; GCN-NEXT: s_lshl_b32 s4, s15, 24 +; GCN-NEXT: s_lshr_b32 s7, s3, 24 +; GCN-NEXT: s_and_b32 s4, s3, s8 +; GCN-NEXT: s_bfe_u32 s3, s3, s11 +; GCN-NEXT: s_lshl_b32 s5, s5, 8 +; GCN-NEXT: s_or_b32 s4, s4, s5 +; GCN-NEXT: s_lshl_b32 s3, s3, 16 +; GCN-NEXT: s_or_b32 s3, s4, s3 +; GCN-NEXT: s_lshl_b32 s4, s7, 24 ; GCN-NEXT: v_mov_b32_e32 v2, s0 ; GCN-NEXT: v_mov_b32_e32 v3, s1 ; GCN-NEXT: s_or_b32 s3, s3, s4 ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc ; GCN-NEXT: v_mov_b32_e32 v4, s2 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 2, v1 +; GCN-NEXT: v_and_b32_e32 v0, 3, v0 ; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GCN-NEXT: v_mov_b32_e32 v5, s3 ; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 3, v1 @@ -2951,64 +2697,58 @@ ; GFX10-LABEL: extractelement_sgpr_v16i8_vgpr_idx: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s5, 0x80008 ; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: s_mov_b32 s6, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v1, 2, v0 ; GFX10-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s8, s1, 8 -; GFX10-NEXT: s_lshr_b32 s5, s0, 8 -; GFX10-NEXT: s_lshr_b32 s9, s1, 16 -; GFX10-NEXT: s_and_b32 s8, s8, s4 -; GFX10-NEXT: s_and_b32 s5, s5, s4 -; GFX10-NEXT: s_and_b32 s9, s9, s4 -; GFX10-NEXT: s_lshr_b32 s10, s1, 24 -; GFX10-NEXT: s_and_b32 s1, s1, s4 -; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshr_b32 s6, s0, 16 +; GFX10-NEXT: s_bfe_u32 s12, s0, s5 +; GFX10-NEXT: s_bfe_u32 s14, s1, s5 +; GFX10-NEXT: s_lshr_b32 s8, s1, 24 +; GFX10-NEXT: s_and_b32 s13, s1, s4 +; GFX10-NEXT: s_bfe_u32 s1, s1, s6 +; GFX10-NEXT: s_and_b32 s11, s0, s4 +; GFX10-NEXT: s_lshl_b32 s12, s12, 8 +; GFX10-NEXT: s_lshl_b32 s14, s14, 8 +; GFX10-NEXT: s_or_b32 s11, s11, s12 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s12, s13, s14 +; GFX10-NEXT: s_lshl_b32 s8, s8, 24 +; GFX10-NEXT: s_or_b32 s1, s12, s1 ; GFX10-NEXT: s_lshr_b32 s7, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s4 -; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_lshl_b32 s9, s9, 16 +; GFX10-NEXT: s_bfe_u32 s0, s0, s6 ; GFX10-NEXT: s_or_b32 s1, s1, s8 -; GFX10-NEXT: s_and_b32 s6, s6, s4 -; GFX10-NEXT: s_or_b32 s0, s0, s5 -; GFX10-NEXT: s_or_b32 s1, s1, s9 -; GFX10-NEXT: s_lshl_b32 s5, s10, 24 -; GFX10-NEXT: s_lshl_b32 s6, s6, 16 -; GFX10-NEXT: s_lshr_b32 s11, s2, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s5 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_and_b32 s6, s11, s4 -; GFX10-NEXT: s_lshl_b32 s7, s7, 24 -; GFX10-NEXT: s_lshr_b32 s12, s2, 16 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_bfe_u32 s16, s2, s5 ; GFX10-NEXT: v_mov_b32_e32 v2, s1 -; GFX10-NEXT: s_lshr_b32 s13, s2, 24 +; GFX10-NEXT: s_lshl_b32 s7, s7, 24 +; GFX10-NEXT: s_or_b32 s0, s11, s0 +; GFX10-NEXT: s_and_b32 s15, s2, s4 +; GFX10-NEXT: s_lshr_b32 s9, s2, 24 +; GFX10-NEXT: s_bfe_u32 s2, s2, s6 +; GFX10-NEXT: s_lshl_b32 s16, s16, 8 ; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_and_b32 s2, s2, s4 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_and_b32 s7, s12, s4 -; GFX10-NEXT: s_or_b32 s2, s2, s6 -; GFX10-NEXT: s_lshl_b32 s6, s7, 16 -; GFX10-NEXT: s_lshr_b32 s14, s3, 8 -; GFX10-NEXT: s_lshr_b32 s15, s3, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s6 -; GFX10-NEXT: s_and_b32 s6, s14, s4 +; GFX10-NEXT: s_or_b32 s7, s15, s16 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 +; GFX10-NEXT: s_bfe_u32 s5, s3, s5 ; GFX10-NEXT: v_cndmask_b32_e32 v2, s0, v2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 2, v1 -; GFX10-NEXT: s_lshl_b32 s5, s13, 24 -; GFX10-NEXT: s_and_b32 s1, s15, s4 -; GFX10-NEXT: s_lshr_b32 s16, s3, 24 -; GFX10-NEXT: s_and_b32 s3, s3, s4 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_or_b32 s2, s2, s5 -; GFX10-NEXT: s_or_b32 s3, s3, s6 +; GFX10-NEXT: s_or_b32 s2, s7, s2 +; GFX10-NEXT: s_lshl_b32 s7, s9, 24 +; GFX10-NEXT: s_bfe_u32 s1, s3, s6 +; GFX10-NEXT: s_and_b32 s4, s3, s4 +; GFX10-NEXT: s_lshl_b32 s5, s5, 8 +; GFX10-NEXT: s_or_b32 s2, s2, s7 +; GFX10-NEXT: s_lshr_b32 s10, s3, 24 +; GFX10-NEXT: s_or_b32 s3, s4, s5 ; GFX10-NEXT: s_lshl_b32 s1, s1, 16 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 3, v1 ; GFX10-NEXT: s_or_b32 s0, s3, s1 -; GFX10-NEXT: s_lshl_b32 s1, s16, 24 +; GFX10-NEXT: s_lshl_b32 s1, s10, 24 ; GFX10-NEXT: s_or_b32 s3, s0, s1 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s3, vcc_lo ; GFX10-NEXT: v_lshrrev_b32_e32 v0, v0, v1 @@ -3024,15 +2764,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3040,17 +2780,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -3061,19 +2799,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3082,16 +2817,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr %element = extractelement <16 x i8> %vector, i32 0 @@ -3104,15 +2838,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3120,17 +2854,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3142,19 +2874,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -3166,14 +2895,13 @@ ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -3186,16 +2914,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3203,17 +2931,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3225,19 +2951,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -3247,14 +2970,13 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 @@ -3269,15 +2991,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v0, s5, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -3286,17 +3008,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3308,19 +3028,16 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] @@ -3330,16 +3047,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -3352,15 +3068,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3368,17 +3084,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -3389,20 +3103,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: extractelement_vgpr_v16i8_idx4: @@ -3410,16 +3121,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr %element = extractelement <16 x i8> %vector, i32 4 @@ -3432,15 +3142,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3448,17 +3158,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3470,20 +3178,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3494,14 +3199,13 @@ ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v1, 0xff, v1, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v1, v0, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -3514,16 +3218,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3531,17 +3235,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3553,20 +3255,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3575,14 +3274,13 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 @@ -3597,15 +3295,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX9-NEXT: v_and_b32_sdwa v3, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v1, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -3614,17 +3312,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v2, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v2, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3636,20 +3332,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; GFX7-NEXT: v_bfe_u32 v3, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, 0xff, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3658,16 +3351,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_b32_sdwa v3, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v1, s4, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v1, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -3680,15 +3372,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v2, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v2 +; GFX9-NEXT: v_and_or_b32 v0, v2, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3696,17 +3388,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -3717,20 +3407,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v3, v2, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: extractelement_vgpr_v16i8_idx8: @@ -3738,16 +3425,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v3, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v2, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v2, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr %element = extractelement <16 x i8> %vector, i32 8 @@ -3760,15 +3446,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v2, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v2, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3776,17 +3462,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3798,20 +3482,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v3, v2, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3822,14 +3503,13 @@ ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v3, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v2, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v1, 0xff, v2, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v1, v0, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -3842,16 +3522,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v2, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX9-NEXT: v_and_or_b32 v0, v2, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -3859,17 +3539,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3881,20 +3559,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v3, v2, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3903,15 +3578,14 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v2 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v2, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v3, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v2, s4, v0 ; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -3925,15 +3599,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v3, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v2, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v2 +; GFX9-NEXT: v_and_or_b32 v0, v2, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v3, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -3942,17 +3616,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -3964,20 +3636,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v3, v2, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -3986,16 +3655,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v3, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v2, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v2, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -4008,15 +3676,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v2, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v3, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v2, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v3 +; GFX9-NEXT: v_and_or_b32 v0, v3, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -4024,17 +3692,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: s_setpc_b64 s[30:31] ; @@ -4045,20 +3711,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v2, v3, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; ; GFX10-LABEL: extractelement_vgpr_v16i8_idx12: @@ -4066,16 +3729,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v3, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v3, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr %element = extractelement <16 x i8> %vector, i32 12 @@ -4088,15 +3750,15 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v2, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v3, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v3, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -4104,17 +3766,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -4126,20 +3786,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v2, v3, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -4150,14 +3807,13 @@ ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v3, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v1, 0xff, v3, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_or3_b32 v0, v1, v0, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr @@ -4170,16 +3826,16 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v2, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v3, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: s_mov_b32 s4, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX9-NEXT: v_and_or_b32 v0, v3, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -4187,17 +3843,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -4209,20 +3863,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v2, v3, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -4231,15 +3882,14 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v3 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: s_mov_b32 s4, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v3, v0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v3, s4, v0 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -4253,15 +3903,15 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v2, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v0, v3, s5, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v2, 16 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v3 +; GFX9-NEXT: v_and_or_b32 v0, v3, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX9-NEXT: s_setpc_b64 s[30:31] @@ -4270,17 +3920,15 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: s_waitcnt vmcnt(0) ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX8-NEXT: s_setpc_b64 s[30:31] @@ -4292,20 +3940,17 @@ ; GFX7-NEXT: s_mov_b32 s7, 0xf000 ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[4:7], 0 addr64 -; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s4, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX7-NEXT: v_bfe_u32 v2, v3, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v1, 0xff, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX7-NEXT: s_setpc_b64 s[30:31] ; @@ -4314,16 +3959,15 @@ ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v3, s4, v0 -; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v3, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2 ; GFX10-NEXT: v_lshrrev_b32_e32 v0, 24, v0 ; GFX10-NEXT: s_setpc_b64 s[30:31] %vector = load <16 x i8>, <16 x i8> addrspace(1)* %ptr Index: llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll @@ -551,12 +551,12 @@ ; GFX6-NEXT: s_lshr_b32 s4, s2, 8 ; GFX6-NEXT: s_andn2_b32 s2, 7, s2 ; GFX6-NEXT: s_lshr_b32 s5, s5, 1 -; GFX6-NEXT: s_lshr_b32 s1, s1, 9 +; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80008 ; GFX6-NEXT: s_lshr_b32 s2, s5, s2 ; GFX6-NEXT: s_or_b32 s0, s0, s2 ; GFX6-NEXT: s_and_b32 s2, s4, 7 ; GFX6-NEXT: s_andn2_b32 s4, 7, s4 -; GFX6-NEXT: s_and_b32 s1, s1, 0x7f +; GFX6-NEXT: s_lshr_b32 s1, s1, 1 ; GFX6-NEXT: s_lshl_b32 s2, s3, s2 ; GFX6-NEXT: s_lshr_b32 s1, s1, s4 ; GFX6-NEXT: s_or_b32 s1, s2, s1 @@ -673,12 +673,12 @@ ; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v0 ; GFX6-NEXT: v_lshlrev_b32_e32 v0, v5, v0 ; GFX6-NEXT: v_lshrrev_b32_e32 v2, v2, v6 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, 9, v1 +; GFX6-NEXT: v_bfe_u32 v1, v1, 8, 8 ; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 ; GFX6-NEXT: v_and_b32_e32 v2, 7, v4 ; GFX6-NEXT: v_xor_b32_e32 v4, -1, v4 ; GFX6-NEXT: v_and_b32_e32 v4, 7, v4 -; GFX6-NEXT: v_and_b32_e32 v1, 0x7f, v1 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, 1, v1 ; GFX6-NEXT: v_lshlrev_b32_e32 v2, v2, v3 ; GFX6-NEXT: v_lshrrev_b32_e32 v1, v4, v1 ; GFX6-NEXT: v_mov_b32_e32 v5, 0xff @@ -795,17 +795,16 @@ ; GFX6-NEXT: s_or_b32 s0, s0, s2 ; GFX6-NEXT: s_and_b32 s2, s6, 7 ; GFX6-NEXT: s_lshl_b32 s2, s3, s2 -; GFX6-NEXT: s_lshr_b32 s3, s1, 9 -; GFX6-NEXT: s_movk_i32 s9, 0x7f +; GFX6-NEXT: s_bfe_u32 s3, s1, 0x80008 ; GFX6-NEXT: s_andn2_b32 s6, 7, s6 -; GFX6-NEXT: s_and_b32 s3, s3, s9 +; GFX6-NEXT: s_lshr_b32 s3, s3, 1 ; GFX6-NEXT: s_lshr_b32 s3, s3, s6 ; GFX6-NEXT: s_or_b32 s2, s2, s3 ; GFX6-NEXT: s_and_b32 s3, s7, 7 ; GFX6-NEXT: s_lshl_b32 s3, s4, s3 -; GFX6-NEXT: s_lshr_b32 s4, s1, 17 +; GFX6-NEXT: s_bfe_u32 s4, s1, 0x80010 ; GFX6-NEXT: s_andn2_b32 s6, 7, s7 -; GFX6-NEXT: s_and_b32 s4, s4, s9 +; GFX6-NEXT: s_lshr_b32 s4, s4, 1 ; GFX6-NEXT: s_lshr_b32 s4, s4, s6 ; GFX6-NEXT: s_and_b32 s2, s2, s10 ; GFX6-NEXT: s_or_b32 s3, s3, s4 @@ -1016,18 +1015,17 @@ ; GFX6-NEXT: v_and_b32_e32 v2, 7, v6 ; GFX6-NEXT: v_xor_b32_e32 v6, -1, v6 ; GFX6-NEXT: v_lshlrev_b32_e32 v2, v2, v3 -; GFX6-NEXT: v_lshrrev_b32_e32 v3, 9, v1 -; GFX6-NEXT: s_movk_i32 s4, 0x7f +; GFX6-NEXT: v_bfe_u32 v3, v1, 8, 8 ; GFX6-NEXT: v_and_b32_e32 v6, 7, v6 -; GFX6-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v3, 1, v3 ; GFX6-NEXT: v_lshrrev_b32_e32 v3, v6, v3 ; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 ; GFX6-NEXT: v_and_b32_e32 v3, 7, v7 ; GFX6-NEXT: v_xor_b32_e32 v6, -1, v7 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, v3, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 17, v1 +; GFX6-NEXT: v_bfe_u32 v4, v1, 16, 8 ; GFX6-NEXT: v_and_b32_e32 v6, 7, v6 -; GFX6-NEXT: v_and_b32_e32 v4, s4, v4 +; GFX6-NEXT: v_lshrrev_b32_e32 v4, 1, v4 ; GFX6-NEXT: v_mov_b32_e32 v9, 0xff ; GFX6-NEXT: v_lshrrev_b32_e32 v4, v6, v4 ; GFX6-NEXT: v_xor_b32_e32 v6, -1, v8 @@ -1477,73 +1475,71 @@ define amdgpu_ps i48 @s_fshl_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 inreg %amt.arg) { ; GFX6-LABEL: s_fshl_v2i24: ; GFX6: ; %bb.0: -; GFX6-NEXT: s_lshr_b32 s6, s0, 8 -; GFX6-NEXT: s_movk_i32 s10, 0xff -; GFX6-NEXT: s_and_b32 s6, s6, s10 -; GFX6-NEXT: s_lshr_b32 s7, s0, 16 -; GFX6-NEXT: s_lshr_b32 s8, s0, 24 -; GFX6-NEXT: s_and_b32 s0, s0, s10 -; GFX6-NEXT: s_lshl_b32 s6, s6, 8 -; GFX6-NEXT: s_or_b32 s0, s0, s6 -; GFX6-NEXT: s_and_b32 s6, s7, s10 +; GFX6-NEXT: s_movk_i32 s9, 0xff +; GFX6-NEXT: s_mov_b32 s11, 0x80008 +; GFX6-NEXT: s_lshr_b32 s6, s0, 16 +; GFX6-NEXT: s_lshr_b32 s7, s0, 24 +; GFX6-NEXT: s_and_b32 s10, s0, s9 +; GFX6-NEXT: s_bfe_u32 s0, s0, s11 +; GFX6-NEXT: s_lshl_b32 s0, s0, 8 +; GFX6-NEXT: s_and_b32 s6, s6, s9 +; GFX6-NEXT: s_or_b32 s0, s10, s0 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX6-NEXT: s_lshr_b32 s9, s1, 8 -; GFX6-NEXT: s_and_b32 s1, s1, s10 +; GFX6-NEXT: s_lshr_b32 s8, s1, 8 +; GFX6-NEXT: s_and_b32 s1, s1, s9 ; GFX6-NEXT: s_bfe_u32 s0, s0, 0x100000 ; GFX6-NEXT: s_lshl_b32 s6, s6, 16 ; GFX6-NEXT: s_lshl_b32 s1, s1, 8 ; GFX6-NEXT: s_or_b32 s0, s0, s6 -; GFX6-NEXT: s_and_b32 s6, s9, s10 -; GFX6-NEXT: s_or_b32 s1, s8, s1 +; GFX6-NEXT: s_and_b32 s6, s8, s9 +; GFX6-NEXT: s_or_b32 s1, s7, s1 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 ; GFX6-NEXT: s_bfe_u32 s1, s1, 0x100000 ; GFX6-NEXT: s_lshl_b32 s6, s6, 16 ; GFX6-NEXT: s_or_b32 s1, s1, s6 -; GFX6-NEXT: s_lshr_b32 s6, s2, 8 -; GFX6-NEXT: s_and_b32 s6, s6, s10 -; GFX6-NEXT: s_lshr_b32 s7, s2, 16 -; GFX6-NEXT: s_lshr_b32 s8, s2, 24 -; GFX6-NEXT: s_and_b32 s2, s2, s10 -; GFX6-NEXT: s_lshl_b32 s6, s6, 8 -; GFX6-NEXT: s_or_b32 s2, s2, s6 -; GFX6-NEXT: s_and_b32 s6, s7, s10 +; GFX6-NEXT: s_lshr_b32 s6, s2, 16 +; GFX6-NEXT: s_lshr_b32 s7, s2, 24 +; GFX6-NEXT: s_and_b32 s10, s2, s9 +; GFX6-NEXT: s_bfe_u32 s2, s2, s11 +; GFX6-NEXT: s_lshl_b32 s2, s2, 8 +; GFX6-NEXT: s_and_b32 s6, s6, s9 +; GFX6-NEXT: s_or_b32 s2, s10, s2 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX6-NEXT: s_lshr_b32 s9, s3, 8 -; GFX6-NEXT: s_and_b32 s3, s3, s10 +; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 +; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GFX6-NEXT: s_lshr_b32 s8, s3, 8 +; GFX6-NEXT: s_and_b32 s3, s3, s9 ; GFX6-NEXT: s_bfe_u32 s2, s2, 0x100000 ; GFX6-NEXT: s_lshl_b32 s6, s6, 16 ; GFX6-NEXT: s_lshl_b32 s3, s3, 8 -; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 ; GFX6-NEXT: s_or_b32 s2, s2, s6 -; GFX6-NEXT: s_and_b32 s6, s9, s10 -; GFX6-NEXT: s_or_b32 s3, s8, s3 +; GFX6-NEXT: s_and_b32 s6, s8, s9 +; GFX6-NEXT: s_or_b32 s3, s7, s3 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX6-NEXT: s_bfe_u32 s3, s3, 0x100000 ; GFX6-NEXT: s_lshl_b32 s6, s6, 16 -; GFX6-NEXT: s_or_b32 s3, s3, s6 -; GFX6-NEXT: s_lshr_b32 s6, s4, 8 -; GFX6-NEXT: s_and_b32 s6, s6, s10 ; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 -; GFX6-NEXT: s_lshr_b32 s7, s4, 16 -; GFX6-NEXT: s_lshr_b32 s8, s4, 24 -; GFX6-NEXT: s_and_b32 s4, s4, s10 -; GFX6-NEXT: s_lshl_b32 s6, s6, 8 -; GFX6-NEXT: s_or_b32 s4, s4, s6 -; GFX6-NEXT: s_and_b32 s6, s7, s10 +; GFX6-NEXT: s_or_b32 s3, s3, s6 +; GFX6-NEXT: s_lshr_b32 s6, s4, 16 +; GFX6-NEXT: s_lshr_b32 s7, s4, 24 +; GFX6-NEXT: s_and_b32 s10, s4, s9 +; GFX6-NEXT: s_bfe_u32 s4, s4, s11 +; GFX6-NEXT: s_lshl_b32 s4, s4, 8 +; GFX6-NEXT: s_and_b32 s6, s6, s9 ; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: s_or_b32 s4, s10, s4 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 ; GFX6-NEXT: s_bfe_u32 s4, s4, 0x100000 ; GFX6-NEXT: s_lshl_b32 s6, s6, 16 ; GFX6-NEXT: s_or_b32 s4, s4, s6 ; GFX6-NEXT: s_sub_i32 s6, 0, 24 ; GFX6-NEXT: v_mul_lo_u32 v1, s6, v0 -; GFX6-NEXT: s_lshr_b32 s9, s5, 8 -; GFX6-NEXT: s_and_b32 s5, s5, s10 +; GFX6-NEXT: s_lshr_b32 s8, s5, 8 +; GFX6-NEXT: s_and_b32 s5, s5, s9 ; GFX6-NEXT: s_lshl_b32 s5, s5, 8 ; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 -; GFX6-NEXT: s_and_b32 s7, s9, s10 -; GFX6-NEXT: s_or_b32 s5, s8, s5 +; GFX6-NEXT: s_or_b32 s5, s7, s5 +; GFX6-NEXT: s_and_b32 s7, s8, s9 ; GFX6-NEXT: s_bfe_u32 s7, s7, 0x100000 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 ; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 @@ -1587,27 +1583,23 @@ ; GFX6-NEXT: v_and_b32_e32 v1, v1, v4 ; GFX6-NEXT: s_lshr_b32 s0, s3, 1 ; GFX6-NEXT: v_and_b32_e32 v2, v2, v4 +; GFX6-NEXT: v_bfe_u32 v3, v0, 8, 8 ; GFX6-NEXT: v_lshl_b32_e32 v1, s1, v1 ; GFX6-NEXT: v_lshr_b32_e32 v2, s0, v2 ; GFX6-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX6-NEXT: v_and_b32_e32 v0, s10, v0 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s10, v1 +; GFX6-NEXT: v_and_b32_e32 v2, s9, v0 +; GFX6-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX6-NEXT: v_and_b32_e32 v2, s9, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v5 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s10, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX6-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX6-NEXT: v_bfe_u32 v2, v1, 8, 8 +; GFX6-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v2, v1 ; GFX6-NEXT: v_readfirstlane_b32 s0, v0 ; GFX6-NEXT: v_readfirstlane_b32 s1, v1 ; GFX6-NEXT: ; return to shader part epilog @@ -1728,20 +1720,17 @@ ; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s1 ; GFX8-NEXT: v_lshrrev_b32_e64 v2, v2, s0 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v4, s10 -; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: v_and_b32_e32 v2, s10, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_or_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v3, v0 +; GFX8-NEXT: v_and_b32_e32 v3, s10, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 ; GFX8-NEXT: v_readfirstlane_b32 s1, v1 ; GFX8-NEXT: ; return to shader part epilog @@ -1749,60 +1738,60 @@ ; GFX9-LABEL: s_fshl_v2i24: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_lshr_b32 s7, s0, 8 -; GFX9-NEXT: s_movk_i32 s11, 0xff -; GFX9-NEXT: s_and_b32 s7, s7, s11 -; GFX9-NEXT: s_bfe_u32 s12, 8, 0x100000 -; GFX9-NEXT: s_lshr_b32 s8, s0, 16 -; GFX9-NEXT: s_lshr_b32 s9, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s11 -; GFX9-NEXT: s_lshl_b32 s7, s7, s12 +; GFX9-NEXT: s_movk_i32 s12, 0xff +; GFX9-NEXT: s_and_b32 s7, s7, s12 +; GFX9-NEXT: s_bfe_u32 s13, 8, 0x100000 +; GFX9-NEXT: s_lshr_b32 s9, s0, 16 +; GFX9-NEXT: s_lshr_b32 s10, s0, 24 +; GFX9-NEXT: s_and_b32 s0, s0, s12 +; GFX9-NEXT: s_lshl_b32 s7, s7, s13 ; GFX9-NEXT: s_or_b32 s0, s0, s7 -; GFX9-NEXT: s_and_b32 s7, s8, s11 +; GFX9-NEXT: s_and_b32 s7, s9, s12 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 -; GFX9-NEXT: s_lshr_b32 s10, s1, 8 -; GFX9-NEXT: s_and_b32 s1, s1, s11 +; GFX9-NEXT: s_lshr_b32 s11, s1, 8 +; GFX9-NEXT: s_and_b32 s1, s1, s12 ; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000 ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 -; GFX9-NEXT: s_lshl_b32 s1, s1, s12 +; GFX9-NEXT: s_lshl_b32 s1, s1, s13 ; GFX9-NEXT: s_or_b32 s0, s0, s7 -; GFX9-NEXT: s_and_b32 s7, s10, s11 -; GFX9-NEXT: s_or_b32 s1, s9, s1 +; GFX9-NEXT: s_and_b32 s7, s11, s12 +; GFX9-NEXT: s_or_b32 s1, s10, s1 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 ; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000 ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s1, s1, s7 ; GFX9-NEXT: s_lshr_b32 s7, s2, 8 -; GFX9-NEXT: s_and_b32 s7, s7, s11 -; GFX9-NEXT: s_lshr_b32 s8, s2, 16 -; GFX9-NEXT: s_lshr_b32 s9, s2, 24 -; GFX9-NEXT: s_and_b32 s2, s2, s11 -; GFX9-NEXT: s_lshl_b32 s7, s7, s12 +; GFX9-NEXT: s_and_b32 s7, s7, s12 +; GFX9-NEXT: s_lshr_b32 s9, s2, 16 +; GFX9-NEXT: s_lshr_b32 s10, s2, 24 +; GFX9-NEXT: s_and_b32 s2, s2, s12 +; GFX9-NEXT: s_lshl_b32 s7, s7, s13 ; GFX9-NEXT: s_or_b32 s2, s2, s7 -; GFX9-NEXT: s_and_b32 s7, s8, s11 +; GFX9-NEXT: s_and_b32 s7, s9, s12 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 -; GFX9-NEXT: s_lshr_b32 s10, s3, 8 -; GFX9-NEXT: s_and_b32 s3, s3, s11 +; GFX9-NEXT: s_lshr_b32 s11, s3, 8 +; GFX9-NEXT: s_and_b32 s3, s3, s12 ; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000 ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s2, s2, s7 -; GFX9-NEXT: s_and_b32 s7, s10, s11 -; GFX9-NEXT: s_lshl_b32 s3, s3, s12 +; GFX9-NEXT: s_and_b32 s7, s11, s12 +; GFX9-NEXT: s_lshl_b32 s3, s3, s13 ; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 -; GFX9-NEXT: s_or_b32 s3, s9, s3 +; GFX9-NEXT: s_or_b32 s3, s10, s3 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000 ; GFX9-NEXT: s_lshl_b32 s7, s7, 16 ; GFX9-NEXT: s_or_b32 s3, s3, s7 ; GFX9-NEXT: s_lshr_b32 s7, s4, 8 -; GFX9-NEXT: s_and_b32 s7, s7, s11 +; GFX9-NEXT: s_and_b32 s7, s7, s12 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 -; GFX9-NEXT: s_lshr_b32 s8, s4, 16 -; GFX9-NEXT: s_lshr_b32 s9, s4, 24 -; GFX9-NEXT: s_and_b32 s4, s4, s11 -; GFX9-NEXT: s_lshl_b32 s7, s7, s12 +; GFX9-NEXT: s_lshr_b32 s9, s4, 16 +; GFX9-NEXT: s_lshr_b32 s10, s4, 24 +; GFX9-NEXT: s_and_b32 s4, s4, s12 +; GFX9-NEXT: s_lshl_b32 s7, s7, s13 ; GFX9-NEXT: s_or_b32 s4, s4, s7 -; GFX9-NEXT: s_and_b32 s7, s8, s11 +; GFX9-NEXT: s_and_b32 s7, s9, s12 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 ; GFX9-NEXT: s_bfe_u32 s4, s4, 0x100000 @@ -1810,24 +1799,24 @@ ; GFX9-NEXT: s_or_b32 s4, s4, s7 ; GFX9-NEXT: s_sub_i32 s7, 0, 24 ; GFX9-NEXT: v_mul_lo_u32 v1, s7, v0 -; GFX9-NEXT: s_lshr_b32 s10, s5, 8 -; GFX9-NEXT: s_and_b32 s5, s5, s11 -; GFX9-NEXT: s_lshl_b32 s5, s5, s12 +; GFX9-NEXT: s_lshr_b32 s11, s5, 8 +; GFX9-NEXT: s_and_b32 s5, s5, s12 +; GFX9-NEXT: s_lshl_b32 s5, s5, s13 ; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1 -; GFX9-NEXT: s_and_b32 s8, s10, s11 -; GFX9-NEXT: s_or_b32 s5, s9, s5 -; GFX9-NEXT: s_bfe_u32 s8, s8, 0x100000 +; GFX9-NEXT: s_and_b32 s9, s11, s12 +; GFX9-NEXT: s_or_b32 s5, s10, s5 +; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 ; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 ; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1 ; GFX9-NEXT: v_mul_hi_u32 v0, s4, v0 ; GFX9-NEXT: s_bfe_u32 s5, s5, 0x100000 -; GFX9-NEXT: s_lshl_b32 s8, s8, 16 +; GFX9-NEXT: s_lshl_b32 s9, s9, 16 ; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_mul_lo_u32 v0, v0, 24 -; GFX9-NEXT: s_or_b32 s5, s5, s8 -; GFX9-NEXT: s_mov_b32 s8, 0xffffff +; GFX9-NEXT: s_or_b32 s5, s5, s9 +; GFX9-NEXT: s_mov_b32 s9, 0xffffff ; GFX9-NEXT: v_mul_lo_u32 v3, s7, v1 ; GFX9-NEXT: v_sub_u32_e32 v0, s4, v0 ; GFX9-NEXT: v_subrev_u32_e32 v2, 24, v0 @@ -1842,8 +1831,8 @@ ; GFX9-NEXT: v_sub_u32_e32 v2, 23, v0 ; GFX9-NEXT: s_lshr_b32 s2, s2, 1 ; GFX9-NEXT: v_mul_lo_u32 v1, v1, 24 -; GFX9-NEXT: v_and_b32_e32 v2, s8, v2 -; GFX9-NEXT: v_and_b32_e32 v0, s8, v0 +; GFX9-NEXT: v_and_b32_e32 v2, s9, v2 +; GFX9-NEXT: v_and_b32_e32 v0, s9, v0 ; GFX9-NEXT: v_lshrrev_b32_e64 v2, v2, s2 ; GFX9-NEXT: v_sub_u32_e32 v1, s5, v1 ; GFX9-NEXT: v_lshl_or_b32 v0, s0, v0, v2 @@ -1861,17 +1850,16 @@ ; GFX9-NEXT: v_lshrrev_b32_e64 v2, v2, s0 ; GFX9-NEXT: v_lshl_or_b32 v1, s1, v1, v2 ; GFX9-NEXT: s_mov_b32 s6, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_e32 v4, s11, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: v_and_or_b32 v2, v0, s11, v2 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_or3_b32 v0, v2, v0, v4 -; GFX9-NEXT: v_and_or_b32 v1, v3, s11, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_b32_e32 v3, s12, v1 +; GFX9-NEXT: s_mov_b32 s8, 16 +; GFX9-NEXT: v_and_or_b32 v2, v0, s12, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v2, v0, v3 +; GFX9-NEXT: v_bfe_u32 v2, v1, 8, 8 +; GFX9-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX9-NEXT: v_lshl_or_b32 v1, v1, 8, v2 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: v_readfirstlane_b32 s1, v1 ; GFX9-NEXT: ; return to shader part epilog @@ -1880,130 +1868,129 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 -; GFX10-NEXT: s_movk_i32 s8, 0xff -; GFX10-NEXT: s_lshr_b32 s11, s1, 8 -; GFX10-NEXT: s_bfe_u32 s10, 8, 0x100000 +; GFX10-NEXT: s_sub_i32 s14, 0, 24 +; GFX10-NEXT: s_movk_i32 s9, 0xff +; GFX10-NEXT: s_lshr_b32 s10, s1, 8 ; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX10-NEXT: s_and_b32 s1, s1, s8 -; GFX10-NEXT: s_lshr_b32 s9, s0, 24 -; GFX10-NEXT: s_lshl_b32 s1, s1, s10 +; GFX10-NEXT: s_bfe_u32 s11, 8, 0x100000 +; GFX10-NEXT: s_and_b32 s1, s1, s9 ; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_or_b32 s1, s9, s1 -; GFX10-NEXT: s_sub_i32 s9, 0, 24 -; GFX10-NEXT: s_and_b32 s6, s6, s8 -; GFX10-NEXT: s_lshr_b32 s7, s0, 16 +; GFX10-NEXT: s_lshr_b32 s8, s0, 24 +; GFX10-NEXT: s_lshl_b32 s1, s1, s11 +; GFX10-NEXT: s_and_b32 s6, s6, s9 +; GFX10-NEXT: s_or_b32 s1, s8, s1 +; GFX10-NEXT: s_lshr_b32 s8, s4, 8 ; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 -; GFX10-NEXT: s_and_b32 s0, s0, s8 -; GFX10-NEXT: s_lshl_b32 s6, s6, s10 -; GFX10-NEXT: s_lshr_b32 s12, s4, 24 +; GFX10-NEXT: s_and_b32 s8, s8, s9 +; GFX10-NEXT: s_lshr_b32 s7, s0, 16 +; GFX10-NEXT: s_and_b32 s0, s0, s9 ; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1 +; GFX10-NEXT: s_lshl_b32 s6, s6, s11 +; GFX10-NEXT: s_lshr_b32 s12, s4, 24 ; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_and_b32 s6, s7, s8 -; GFX10-NEXT: s_lshr_b32 s7, s4, 8 -; GFX10-NEXT: v_mul_lo_u32 v2, s9, v0 -; GFX10-NEXT: v_mul_lo_u32 v3, s9, v1 -; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000 -; GFX10-NEXT: s_lshl_b32 s6, s6, 16 -; GFX10-NEXT: s_and_b32 s7, s7, s8 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_and_b32 s6, s11, s8 -; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2 -; GFX10-NEXT: s_lshr_b32 s11, s4, 16 -; GFX10-NEXT: s_and_b32 s4, s4, s8 -; GFX10-NEXT: s_lshl_b32 s7, s7, s10 -; GFX10-NEXT: s_and_b32 s9, s11, s8 -; GFX10-NEXT: s_or_b32 s4, s4, s7 -; GFX10-NEXT: s_bfe_u32 s7, s9, 0x100000 +; GFX10-NEXT: v_mul_lo_u32 v2, s14, v0 +; GFX10-NEXT: v_mul_lo_u32 v3, s14, v1 +; GFX10-NEXT: s_and_b32 s6, s7, s9 +; GFX10-NEXT: s_and_b32 s7, s10, s9 +; GFX10-NEXT: s_lshr_b32 s10, s4, 16 +; GFX10-NEXT: s_and_b32 s4, s4, s9 +; GFX10-NEXT: s_lshl_b32 s8, s8, s11 ; GFX10-NEXT: s_lshr_b32 s13, s5, 8 -; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2 -; GFX10-NEXT: v_mul_hi_u32 v2, v1, v3 -; GFX10-NEXT: s_and_b32 s5, s5, s8 +; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2 +; GFX10-NEXT: s_or_b32 s4, s4, s8 +; GFX10-NEXT: s_and_b32 s8, s10, s9 +; GFX10-NEXT: v_mul_hi_u32 v3, v1, v3 +; GFX10-NEXT: s_bfe_u32 s8, s8, 0x100000 +; GFX10-NEXT: s_and_b32 s5, s5, s9 ; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_lshl_b32 s5, s5, s10 -; GFX10-NEXT: s_or_b32 s4, s4, s7 -; GFX10-NEXT: s_and_b32 s7, s13, s8 +; GFX10-NEXT: s_lshl_b32 s8, s8, 16 +; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2 +; GFX10-NEXT: s_lshl_b32 s5, s5, s11 +; GFX10-NEXT: s_or_b32 s4, s4, s8 +; GFX10-NEXT: s_and_b32 s8, s13, s9 ; GFX10-NEXT: s_or_b32 s5, s12, s5 -; GFX10-NEXT: s_bfe_u32 s7, s7, 0x100000 +; GFX10-NEXT: s_bfe_u32 s8, s8, 0x100000 ; GFX10-NEXT: v_mul_hi_u32 v0, s4, v0 -; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v2 +; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3 ; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_lshr_b32 s9, s2, 16 -; GFX10-NEXT: s_or_b32 s5, s5, s7 -; GFX10-NEXT: s_lshr_b32 s7, s2, 8 +; GFX10-NEXT: s_lshl_b32 s8, s8, 16 +; GFX10-NEXT: s_lshr_b32 s10, s2, 16 +; GFX10-NEXT: s_or_b32 s5, s5, s8 +; GFX10-NEXT: s_lshr_b32 s8, s2, 8 ; GFX10-NEXT: v_mul_hi_u32 v1, s5, v1 ; GFX10-NEXT: v_mul_lo_u32 v0, v0, 24 -; GFX10-NEXT: s_lshr_b32 s11, s2, 24 -; GFX10-NEXT: s_and_b32 s2, s2, s8 -; GFX10-NEXT: s_lshr_b32 s12, s3, 8 -; GFX10-NEXT: s_and_b32 s3, s3, s8 -; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX10-NEXT: s_lshl_b32 s3, s3, s10 +; GFX10-NEXT: s_and_b32 s8, s8, s9 +; GFX10-NEXT: s_and_b32 s12, s2, s9 +; GFX10-NEXT: s_lshl_b32 s8, s8, s11 +; GFX10-NEXT: s_and_b32 s10, s10, s9 +; GFX10-NEXT: s_or_b32 s8, s12, s8 +; GFX10-NEXT: s_lshr_b32 s2, s2, 24 ; GFX10-NEXT: v_mul_lo_u32 v1, v1, 24 ; GFX10-NEXT: v_sub_nc_u32_e32 v0, s4, v0 -; GFX10-NEXT: s_and_b32 s4, s7, s8 -; GFX10-NEXT: s_and_b32 s7, s9, s8 -; GFX10-NEXT: s_lshl_b32 s4, s4, s10 -; GFX10-NEXT: s_or_b32 s3, s11, s3 +; GFX10-NEXT: s_bfe_u32 s4, s8, 0x100000 +; GFX10-NEXT: s_bfe_u32 s8, s10, 0x100000 +; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000 +; GFX10-NEXT: s_bfe_u32 s7, s7, 0x100000 ; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v0 ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0 ; GFX10-NEXT: v_sub_nc_u32_e32 v1, s5, v1 -; GFX10-NEXT: s_or_b32 s2, s2, s4 -; GFX10-NEXT: s_bfe_u32 s4, s7, 0x100000 -; GFX10-NEXT: s_mov_b32 s5, 0xffffff +; GFX10-NEXT: s_lshl_b32 s5, s8, 16 +; GFX10-NEXT: s_lshr_b32 s8, s3, 8 +; GFX10-NEXT: s_and_b32 s3, s3, s9 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo -; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v1 +; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v1 ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1 -; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000 -; GFX10-NEXT: s_lshl_b32 s4, s4, 16 -; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v0 -; GFX10-NEXT: s_or_b32 s2, s2, s4 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo +; GFX10-NEXT: s_lshl_b32 s3, s3, s11 +; GFX10-NEXT: s_or_b32 s4, s4, s5 +; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v0 +; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0 -; GFX10-NEXT: s_and_b32 s4, s12, s8 -; GFX10-NEXT: s_lshr_b32 s2, s2, 1 -; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000 -; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v1 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo +; GFX10-NEXT: s_and_b32 s3, s8, s9 +; GFX10-NEXT: s_mov_b32 s5, 0xffffff +; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000 +; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1 -; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000 -; GFX10-NEXT: s_lshl_b32 s6, s6, 16 -; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo -; GFX10-NEXT: v_and_b32_e32 v0, s5, v0 -; GFX10-NEXT: v_and_b32_e32 v2, s5, v3 +; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX10-NEXT: v_sub_nc_u32_e32 v2, 23, v0 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo ; GFX10-NEXT: v_mov_b32_e32 v3, 0xffffff +; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_lshr_b32 s3, s4, 1 +; GFX10-NEXT: v_and_b32_e32 v2, s5, v2 ; GFX10-NEXT: v_sub_nc_u32_e32 v4, 23, v1 -; GFX10-NEXT: v_lshrrev_b32_e64 v2, v2, s2 -; GFX10-NEXT: s_bfe_u32 s2, s3, 0x100000 -; GFX10-NEXT: s_lshl_b32 s3, s4, 16 +; GFX10-NEXT: s_lshr_b32 s2, s2, 1 +; GFX10-NEXT: v_and_b32_e32 v0, s5, v0 +; GFX10-NEXT: v_and_b32_e32 v1, v1, v3 +; GFX10-NEXT: v_lshrrev_b32_e64 v2, v2, s3 ; GFX10-NEXT: v_and_b32_e32 v4, v4, v3 -; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_lshl_b32 s6, s6, 16 +; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000 +; GFX10-NEXT: s_lshl_b32 s7, s7, 16 +; GFX10-NEXT: s_or_b32 s0, s0, s6 +; GFX10-NEXT: v_lshrrev_b32_e64 v3, v4, s2 +; GFX10-NEXT: s_or_b32 s1, s1, s7 ; GFX10-NEXT: v_lshl_or_b32 v0, s0, v0, v2 -; GFX10-NEXT: s_lshr_b32 s0, s2, 1 -; GFX10-NEXT: v_and_b32_e32 v1, v1, v3 -; GFX10-NEXT: v_lshrrev_b32_e64 v2, v4, s0 -; GFX10-NEXT: s_or_b32 s0, s1, s6 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshl_or_b32 v1, s0, v1, v2 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_e32 v3, s8, v1 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, s8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; GFX10-NEXT: v_and_or_b32 v2, v0, s8, v2 -; GFX10-NEXT: v_and_b32_sdwa v0, v0, s8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshl_or_b32 v1, s1, v1, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_and_b32_e32 v3, s9, v1 +; GFX10-NEXT: v_bfe_u32 v4, v1, 8, 8 +; GFX10-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX10-NEXT: v_and_or_b32 v2, v0, s9, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX10-NEXT: v_lshl_or_b32 v1, v1, 8, v4 ; GFX10-NEXT: v_or3_b32 v0, v2, v0, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s8, v4 -; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: ; return to shader part epilog %lhs = bitcast i48 %lhs.arg to <2 x i24> %rhs = bitcast i48 %rhs.arg to <2 x i24> Index: llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll @@ -538,26 +538,25 @@ define amdgpu_ps i16 @s_fshr_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg, i16 inreg %amt.arg) { ; GFX6-LABEL: s_fshr_v2i8: ; GFX6: ; %bb.0: -; GFX6-NEXT: s_movk_i32 s7, 0xff ; GFX6-NEXT: s_lshr_b32 s3, s0, 8 -; GFX6-NEXT: s_lshr_b32 s4, s1, 8 -; GFX6-NEXT: s_lshr_b32 s5, s2, 8 -; GFX6-NEXT: s_and_b32 s6, s2, 7 -; GFX6-NEXT: s_and_b32 s1, s1, s7 +; GFX6-NEXT: s_lshr_b32 s4, s2, 8 +; GFX6-NEXT: s_and_b32 s5, s2, 7 ; GFX6-NEXT: s_andn2_b32 s2, 7, s2 ; GFX6-NEXT: s_lshl_b32 s0, s0, 1 +; GFX6-NEXT: s_movk_i32 s6, 0xff ; GFX6-NEXT: s_lshl_b32 s0, s0, s2 -; GFX6-NEXT: s_lshr_b32 s1, s1, s6 -; GFX6-NEXT: s_andn2_b32 s2, 7, s5 +; GFX6-NEXT: s_and_b32 s2, s1, s6 +; GFX6-NEXT: s_lshr_b32 s2, s2, s5 +; GFX6-NEXT: s_or_b32 s0, s0, s2 +; GFX6-NEXT: s_and_b32 s2, s4, 7 +; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80008 +; GFX6-NEXT: s_andn2_b32 s4, 7, s4 ; GFX6-NEXT: s_lshl_b32 s3, s3, 1 -; GFX6-NEXT: s_or_b32 s0, s0, s1 -; GFX6-NEXT: s_lshl_b32 s2, s3, s2 -; GFX6-NEXT: s_and_b32 s1, s5, 7 -; GFX6-NEXT: s_and_b32 s3, s4, s7 -; GFX6-NEXT: s_lshr_b32 s1, s3, s1 -; GFX6-NEXT: s_or_b32 s1, s2, s1 -; GFX6-NEXT: s_and_b32 s1, s1, s7 -; GFX6-NEXT: s_and_b32 s0, s0, s7 +; GFX6-NEXT: s_lshl_b32 s3, s3, s4 +; GFX6-NEXT: s_lshr_b32 s1, s1, s2 +; GFX6-NEXT: s_or_b32 s1, s3, s1 +; GFX6-NEXT: s_and_b32 s1, s1, s6 +; GFX6-NEXT: s_and_b32 s0, s0, s6 ; GFX6-NEXT: s_lshl_b32 s1, s1, 8 ; GFX6-NEXT: s_or_b32 s0, s0, s1 ; GFX6-NEXT: ; return to shader part epilog @@ -660,26 +659,25 @@ ; GFX6-LABEL: v_fshr_v2i8: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v5, 8, v2 -; GFX6-NEXT: v_and_b32_e32 v6, 7, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v4, 8, v2 +; GFX6-NEXT: v_and_b32_e32 v5, 7, v2 ; GFX6-NEXT: v_xor_b32_e32 v2, -1, v2 ; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v0 ; GFX6-NEXT: v_and_b32_e32 v2, 7, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0 ; GFX6-NEXT: s_movk_i32 s4, 0xff -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0 -; GFX6-NEXT: v_xor_b32_e32 v2, -1, v5 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, v6, v1 -; GFX6-NEXT: v_and_b32_e32 v2, 7, v2 +; GFX6-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX6-NEXT: v_lshrrev_b32_e32 v2, v5, v2 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX6-NEXT: v_and_b32_e32 v2, 7, v4 +; GFX6-NEXT: v_xor_b32_e32 v4, -1, v4 +; GFX6-NEXT: v_bfe_u32 v1, v1, 8, 8 +; GFX6-NEXT: v_and_b32_e32 v4, 7, v4 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, v2, v3 -; GFX6-NEXT: v_and_b32_e32 v1, 7, v5 -; GFX6-NEXT: v_and_b32_e32 v3, s4, v4 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, v1, v3 -; GFX6-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v4, v3 +; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 ; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 ; GFX6-NEXT: v_and_b32_e32 v0, 0xff, v0 ; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1 @@ -774,51 +772,49 @@ define amdgpu_ps i32 @s_fshr_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 inreg %amt.arg) { ; GFX6-LABEL: s_fshr_v4i8: ; GFX6: ; %bb.0: -; GFX6-NEXT: s_movk_i32 s13, 0xff ; GFX6-NEXT: s_lshr_b32 s3, s0, 8 ; GFX6-NEXT: s_lshr_b32 s4, s0, 16 ; GFX6-NEXT: s_lshr_b32 s5, s0, 24 -; GFX6-NEXT: s_lshr_b32 s6, s1, 8 -; GFX6-NEXT: s_lshr_b32 s7, s1, 16 -; GFX6-NEXT: s_lshr_b32 s8, s1, 24 -; GFX6-NEXT: s_lshr_b32 s9, s2, 8 -; GFX6-NEXT: s_lshr_b32 s10, s2, 16 -; GFX6-NEXT: s_lshr_b32 s11, s2, 24 -; GFX6-NEXT: s_and_b32 s12, s2, 7 -; GFX6-NEXT: s_and_b32 s1, s1, s13 +; GFX6-NEXT: s_lshr_b32 s7, s2, 8 +; GFX6-NEXT: s_lshr_b32 s8, s2, 16 +; GFX6-NEXT: s_lshr_b32 s9, s2, 24 +; GFX6-NEXT: s_and_b32 s10, s2, 7 ; GFX6-NEXT: s_andn2_b32 s2, 7, s2 ; GFX6-NEXT: s_lshl_b32 s0, s0, 1 +; GFX6-NEXT: s_movk_i32 s11, 0xff ; GFX6-NEXT: s_lshl_b32 s0, s0, s2 -; GFX6-NEXT: s_lshr_b32 s1, s1, s12 -; GFX6-NEXT: s_andn2_b32 s2, 7, s9 +; GFX6-NEXT: s_and_b32 s2, s1, s11 +; GFX6-NEXT: s_lshr_b32 s2, s2, s10 +; GFX6-NEXT: s_or_b32 s0, s0, s2 +; GFX6-NEXT: s_and_b32 s2, s7, 7 +; GFX6-NEXT: s_andn2_b32 s7, 7, s7 ; GFX6-NEXT: s_lshl_b32 s3, s3, 1 -; GFX6-NEXT: s_or_b32 s0, s0, s1 -; GFX6-NEXT: s_lshl_b32 s2, s3, s2 -; GFX6-NEXT: s_and_b32 s1, s9, 7 -; GFX6-NEXT: s_and_b32 s3, s6, s13 -; GFX6-NEXT: s_lshr_b32 s1, s3, s1 -; GFX6-NEXT: s_andn2_b32 s3, 7, s10 -; GFX6-NEXT: s_lshl_b32 s4, s4, 1 -; GFX6-NEXT: s_or_b32 s1, s2, s1 -; GFX6-NEXT: s_lshl_b32 s3, s4, s3 -; GFX6-NEXT: s_and_b32 s2, s10, 7 -; GFX6-NEXT: s_and_b32 s4, s7, s13 -; GFX6-NEXT: s_lshr_b32 s2, s4, s2 -; GFX6-NEXT: s_and_b32 s1, s1, s13 +; GFX6-NEXT: s_lshl_b32 s3, s3, s7 +; GFX6-NEXT: s_bfe_u32 s7, s1, 0x80008 +; GFX6-NEXT: s_lshr_b32 s2, s7, s2 ; GFX6-NEXT: s_or_b32 s2, s3, s2 -; GFX6-NEXT: s_and_b32 s3, s11, 7 -; GFX6-NEXT: s_andn2_b32 s4, 7, s11 +; GFX6-NEXT: s_lshr_b32 s6, s1, 24 +; GFX6-NEXT: s_and_b32 s3, s8, 7 +; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GFX6-NEXT: s_andn2_b32 s7, 7, s8 +; GFX6-NEXT: s_lshl_b32 s4, s4, 1 +; GFX6-NEXT: s_lshr_b32 s1, s1, s3 +; GFX6-NEXT: s_lshl_b32 s4, s4, s7 +; GFX6-NEXT: s_or_b32 s1, s4, s1 +; GFX6-NEXT: s_and_b32 s3, s9, 7 +; GFX6-NEXT: s_and_b32 s2, s2, s11 +; GFX6-NEXT: s_andn2_b32 s4, 7, s9 ; GFX6-NEXT: s_lshl_b32 s5, s5, 1 -; GFX6-NEXT: s_and_b32 s0, s0, s13 -; GFX6-NEXT: s_lshl_b32 s1, s1, 8 -; GFX6-NEXT: s_or_b32 s0, s0, s1 -; GFX6-NEXT: s_and_b32 s1, s2, s13 +; GFX6-NEXT: s_and_b32 s1, s1, s11 ; GFX6-NEXT: s_lshl_b32 s4, s5, s4 -; GFX6-NEXT: s_lshr_b32 s3, s8, s3 -; GFX6-NEXT: s_lshl_b32 s1, s1, 16 +; GFX6-NEXT: s_lshr_b32 s3, s6, s3 +; GFX6-NEXT: s_and_b32 s0, s0, s11 +; GFX6-NEXT: s_lshl_b32 s2, s2, 8 ; GFX6-NEXT: s_or_b32 s3, s4, s3 +; GFX6-NEXT: s_or_b32 s0, s0, s2 +; GFX6-NEXT: s_lshl_b32 s1, s1, 16 ; GFX6-NEXT: s_or_b32 s0, s0, s1 -; GFX6-NEXT: s_and_b32 s1, s3, s13 +; GFX6-NEXT: s_and_b32 s1, s3, s11 ; GFX6-NEXT: s_lshl_b32 s1, s1, 24 ; GFX6-NEXT: s_or_b32 s0, s0, s1 ; GFX6-NEXT: ; return to shader part epilog @@ -996,54 +992,51 @@ ; GFX6-LABEL: v_fshr_v4i8: ; GFX6: ; %bb.0: ; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX6-NEXT: v_lshrrev_b32_e32 v9, 8, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v10, 16, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v11, 24, v2 -; GFX6-NEXT: v_and_b32_e32 v12, 7, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v7, 8, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v8, 16, v2 +; GFX6-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX6-NEXT: v_and_b32_e32 v10, 7, v2 ; GFX6-NEXT: v_xor_b32_e32 v2, -1, v2 -; GFX6-NEXT: s_movk_i32 s4, 0xff ; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v0 ; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v0 ; GFX6-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX6-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s4, v1 +; GFX6-NEXT: v_and_b32_e32 v11, 0xff, v1 ; GFX6-NEXT: v_and_b32_e32 v2, 7, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0 ; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, v12, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v1, 7, v9 -; GFX6-NEXT: v_xor_b32_e32 v9, -1, v9 -; GFX6-NEXT: v_and_b32_e32 v6, s4, v6 -; GFX6-NEXT: v_lshrrev_b32_e32 v1, v1, v6 -; GFX6-NEXT: v_xor_b32_e32 v6, -1, v10 -; GFX6-NEXT: v_and_b32_e32 v9, 7, v9 +; GFX6-NEXT: v_lshrrev_b32_e32 v10, v10, v11 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v10 +; GFX6-NEXT: v_and_b32_e32 v10, 7, v7 +; GFX6-NEXT: v_xor_b32_e32 v7, -1, v7 +; GFX6-NEXT: v_and_b32_e32 v7, 7, v7 ; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3 -; GFX6-NEXT: v_mov_b32_e32 v2, 0xff -; GFX6-NEXT: v_lshlrev_b32_e32 v3, v9, v3 -; GFX6-NEXT: v_and_b32_e32 v6, 7, v6 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, v7, v3 +; GFX6-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX6-NEXT: v_lshrrev_b32_e32 v7, v10, v7 +; GFX6-NEXT: v_or_b32_e32 v3, v3, v7 +; GFX6-NEXT: v_and_b32_e32 v7, 7, v8 +; GFX6-NEXT: v_xor_b32_e32 v8, -1, v8 +; GFX6-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX6-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX6-NEXT: v_and_b32_e32 v8, 7, v8 ; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v4 -; GFX6-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX6-NEXT: v_lshlrev_b32_e32 v4, v6, v4 -; GFX6-NEXT: v_and_b32_e32 v3, 7, v10 -; GFX6-NEXT: v_and_b32_e32 v6, v7, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v3, v3, v6 -; GFX6-NEXT: v_xor_b32_e32 v6, -1, v11 -; GFX6-NEXT: v_and_b32_e32 v1, v1, v2 -; GFX6-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX6-NEXT: v_and_b32_e32 v4, 7, v11 -; GFX6-NEXT: v_and_b32_e32 v6, 7, v6 +; GFX6-NEXT: v_mov_b32_e32 v2, 0xff +; GFX6-NEXT: v_lshrrev_b32_e32 v1, v7, v1 +; GFX6-NEXT: v_xor_b32_e32 v7, -1, v9 +; GFX6-NEXT: v_lshlrev_b32_e32 v4, v8, v4 +; GFX6-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX6-NEXT: v_and_b32_e32 v4, 7, v9 +; GFX6-NEXT: v_and_b32_e32 v3, v3, v2 +; GFX6-NEXT: v_and_b32_e32 v7, 7, v7 ; GFX6-NEXT: v_lshlrev_b32_e32 v5, 1, v5 +; GFX6-NEXT: v_and_b32_e32 v1, v1, v2 +; GFX6-NEXT: v_lshlrev_b32_e32 v5, v7, v5 +; GFX6-NEXT: v_lshrrev_b32_e32 v4, v4, v6 ; GFX6-NEXT: v_and_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v1, v3, v2 -; GFX6-NEXT: v_lshlrev_b32_e32 v5, v6, v5 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, v4, v8 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 8, v3 ; GFX6-NEXT: v_or_b32_e32 v4, v5, v4 +; GFX6-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX6-NEXT: v_and_b32_e32 v1, v4, v2 ; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1 @@ -1477,69 +1470,64 @@ define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 inreg %amt.arg) { ; GFX6-LABEL: s_fshr_v2i24: ; GFX6: ; %bb.0: -; GFX6-NEXT: s_movk_i32 s10, 0xff -; GFX6-NEXT: s_lshr_b32 s9, s1, 8 -; GFX6-NEXT: s_and_b32 s1, s1, s10 -; GFX6-NEXT: s_lshr_b32 s6, s0, 8 -; GFX6-NEXT: s_lshr_b32 s8, s0, 24 -; GFX6-NEXT: s_lshl_b32 s1, s1, 8 -; GFX6-NEXT: s_or_b32 s1, s8, s1 -; GFX6-NEXT: s_and_b32 s6, s6, s10 -; GFX6-NEXT: s_lshr_b32 s8, s2, 8 -; GFX6-NEXT: s_and_b32 s8, s8, s10 -; GFX6-NEXT: s_lshr_b32 s7, s0, 16 -; GFX6-NEXT: s_and_b32 s0, s0, s10 -; GFX6-NEXT: s_lshl_b32 s6, s6, 8 -; GFX6-NEXT: s_or_b32 s0, s0, s6 -; GFX6-NEXT: s_and_b32 s6, s7, s10 -; GFX6-NEXT: s_and_b32 s7, s9, s10 -; GFX6-NEXT: s_lshr_b32 s9, s2, 16 -; GFX6-NEXT: s_lshr_b32 s11, s2, 24 -; GFX6-NEXT: s_and_b32 s2, s2, s10 -; GFX6-NEXT: s_lshl_b32 s8, s8, 8 -; GFX6-NEXT: s_or_b32 s2, s2, s8 -; GFX6-NEXT: s_and_b32 s8, s9, s10 +; GFX6-NEXT: s_movk_i32 s9, 0xff +; GFX6-NEXT: s_mov_b32 s11, 0x80008 +; GFX6-NEXT: s_lshr_b32 s6, s0, 16 ; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 -; GFX6-NEXT: s_bfe_u32 s8, s8, 0x100000 +; GFX6-NEXT: s_lshr_b32 s8, s1, 8 +; GFX6-NEXT: s_and_b32 s1, s1, s9 +; GFX6-NEXT: s_lshr_b32 s7, s0, 24 +; GFX6-NEXT: s_and_b32 s10, s0, s9 +; GFX6-NEXT: s_bfe_u32 s0, s0, s11 +; GFX6-NEXT: s_lshl_b32 s0, s0, 8 +; GFX6-NEXT: s_lshl_b32 s1, s1, 8 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0 +; GFX6-NEXT: s_or_b32 s0, s10, s0 +; GFX6-NEXT: s_or_b32 s1, s7, s1 +; GFX6-NEXT: s_and_b32 s7, s8, s9 +; GFX6-NEXT: s_lshr_b32 s8, s2, 16 +; GFX6-NEXT: s_lshr_b32 s10, s2, 24 +; GFX6-NEXT: s_and_b32 s13, s2, s9 +; GFX6-NEXT: s_bfe_u32 s2, s2, s11 +; GFX6-NEXT: s_lshl_b32 s2, s2, 8 +; GFX6-NEXT: s_and_b32 s8, s8, s9 +; GFX6-NEXT: s_or_b32 s2, s13, s2 +; GFX6-NEXT: s_bfe_u32 s8, s8, 0x100000 +; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: s_lshr_b32 s12, s3, 8 -; GFX6-NEXT: s_and_b32 s3, s3, s10 +; GFX6-NEXT: s_and_b32 s3, s3, s9 ; GFX6-NEXT: s_bfe_u32 s2, s2, 0x100000 ; GFX6-NEXT: s_lshl_b32 s8, s8, 16 ; GFX6-NEXT: s_lshl_b32 s3, s3, 8 ; GFX6-NEXT: s_or_b32 s2, s2, s8 -; GFX6-NEXT: s_and_b32 s8, s12, s10 -; GFX6-NEXT: s_or_b32 s3, s11, s3 +; GFX6-NEXT: s_and_b32 s8, s12, s9 +; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 +; GFX6-NEXT: s_or_b32 s3, s10, s3 ; GFX6-NEXT: s_bfe_u32 s8, s8, 0x100000 -; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX6-NEXT: s_bfe_u32 s3, s3, 0x100000 ; GFX6-NEXT: s_lshl_b32 s8, s8, 16 ; GFX6-NEXT: s_or_b32 s3, s3, s8 -; GFX6-NEXT: s_lshr_b32 s8, s4, 8 -; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX6-NEXT: s_and_b32 s8, s8, s10 -; GFX6-NEXT: s_lshr_b32 s9, s4, 16 -; GFX6-NEXT: s_lshr_b32 s11, s4, 24 -; GFX6-NEXT: s_and_b32 s4, s4, s10 -; GFX6-NEXT: s_lshl_b32 s8, s8, 8 -; GFX6-NEXT: s_or_b32 s4, s4, s8 -; GFX6-NEXT: s_and_b32 s8, s9, s10 -; GFX6-NEXT: s_sub_i32 s9, 0, 24 -; GFX6-NEXT: v_mul_lo_u32 v1, s9, v0 +; GFX6-NEXT: s_lshr_b32 s8, s4, 16 +; GFX6-NEXT: s_lshr_b32 s10, s4, 24 +; GFX6-NEXT: s_and_b32 s13, s4, s9 +; GFX6-NEXT: s_bfe_u32 s4, s4, s11 +; GFX6-NEXT: s_sub_i32 s11, 0, 24 +; GFX6-NEXT: v_mul_lo_u32 v1, s11, v0 +; GFX6-NEXT: s_lshl_b32 s4, s4, 8 +; GFX6-NEXT: s_and_b32 s8, s8, s9 +; GFX6-NEXT: s_or_b32 s4, s13, s4 +; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 ; GFX6-NEXT: s_bfe_u32 s8, s8, 0x100000 ; GFX6-NEXT: s_bfe_u32 s4, s4, 0x100000 ; GFX6-NEXT: s_lshl_b32 s8, s8, 16 -; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1 -; GFX6-NEXT: s_or_b32 s4, s4, s8 -; GFX6-NEXT: s_lshr_b32 s12, s5, 8 -; GFX6-NEXT: s_and_b32 s5, s5, s10 ; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; GFX6-NEXT: s_or_b32 s4, s4, s8 ; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0 ; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 ; GFX6-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX6-NEXT: s_lshl_b32 s5, s5, 8 +; GFX6-NEXT: s_lshr_b32 s12, s5, 8 ; GFX6-NEXT: v_mul_lo_u32 v0, v0, 24 -; GFX6-NEXT: s_and_b32 s8, s12, s10 +; GFX6-NEXT: s_and_b32 s5, s5, s9 ; GFX6-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 ; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s4, v0 @@ -1549,20 +1537,23 @@ ; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 24, v0 ; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v0 ; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GFX6-NEXT: v_mul_lo_u32 v2, s9, v1 -; GFX6-NEXT: s_or_b32 s5, s11, s5 +; GFX6-NEXT: v_mul_lo_u32 v2, s11, v1 +; GFX6-NEXT: s_lshl_b32 s5, s5, 8 +; GFX6-NEXT: s_and_b32 s8, s12, s9 +; GFX6-NEXT: s_or_b32 s5, s10, s5 +; GFX6-NEXT: v_mul_hi_u32 v2, v1, v2 ; GFX6-NEXT: s_bfe_u32 s8, s8, 0x100000 ; GFX6-NEXT: s_bfe_u32 s5, s5, 0x100000 -; GFX6-NEXT: v_mul_hi_u32 v2, v1, v2 ; GFX6-NEXT: s_lshl_b32 s8, s8, 16 ; GFX6-NEXT: s_or_b32 s5, s5, s8 -; GFX6-NEXT: s_bfe_u32 s0, s0, 0x100000 ; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2 ; GFX6-NEXT: v_mul_hi_u32 v1, s5, v1 +; GFX6-NEXT: s_and_b32 s6, s6, s9 +; GFX6-NEXT: s_bfe_u32 s0, s0, 0x100000 ; GFX6-NEXT: s_bfe_u32 s6, s6, 0x100000 +; GFX6-NEXT: v_mul_lo_u32 v1, v1, 24 ; GFX6-NEXT: s_mov_b32 s8, 0xffffff ; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 23, v0 -; GFX6-NEXT: v_mul_lo_u32 v1, v1, 24 ; GFX6-NEXT: s_lshl_b32 s4, s6, 17 ; GFX6-NEXT: s_lshl_b32 s0, s0, 1 ; GFX6-NEXT: v_and_b32_e32 v0, s8, v0 @@ -1587,27 +1578,23 @@ ; GFX6-NEXT: v_and_b32_e32 v1, v1, v4 ; GFX6-NEXT: s_or_b32 s0, s0, s1 ; GFX6-NEXT: v_and_b32_e32 v2, v2, v4 +; GFX6-NEXT: v_bfe_u32 v3, v0, 8, 8 ; GFX6-NEXT: v_lshl_b32_e32 v2, s0, v2 ; GFX6-NEXT: v_lshr_b32_e32 v1, s3, v1 ; GFX6-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX6-NEXT: v_and_b32_e32 v0, s10, v0 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v3 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX6-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s10, v1 +; GFX6-NEXT: v_and_b32_e32 v2, s9, v0 +; GFX6-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX6-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX6-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX6-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX6-NEXT: v_and_b32_e32 v2, s9, v1 +; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX6-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX6-NEXT: v_and_b32_e32 v2, s10, v5 -; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1 -; GFX6-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX6-NEXT: v_and_b32_e32 v1, s10, v4 -; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX6-NEXT: v_or_b32_e32 v1, v1, v2 +; GFX6-NEXT: v_bfe_u32 v2, v1, 8, 8 +; GFX6-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1 +; GFX6-NEXT: v_or_b32_e32 v1, v2, v1 ; GFX6-NEXT: v_readfirstlane_b32 s0, v0 ; GFX6-NEXT: v_readfirstlane_b32 s1, v1 ; GFX6-NEXT: ; return to shader part epilog @@ -1728,103 +1715,100 @@ ; GFX8-NEXT: v_lshlrev_b32_e64 v2, v2, s0 ; GFX8-NEXT: v_lshrrev_b32_e64 v1, v1, s3 ; GFX8-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v4, s10 -; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: v_and_b32_e32 v2, s10, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v2, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_or_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v3, v0 +; GFX8-NEXT: v_and_b32_e32 v3, s10, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD ; GFX8-NEXT: v_readfirstlane_b32 s0, v0 ; GFX8-NEXT: v_readfirstlane_b32 s1, v1 ; GFX8-NEXT: ; return to shader part epilog ; ; GFX9-LABEL: s_fshr_v2i24: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_movk_i32 s11, 0xff -; GFX9-NEXT: s_lshr_b32 s10, s1, 8 -; GFX9-NEXT: s_bfe_u32 s12, 8, 0x100000 -; GFX9-NEXT: s_and_b32 s1, s1, s11 +; GFX9-NEXT: s_movk_i32 s12, 0xff +; GFX9-NEXT: s_lshr_b32 s11, s1, 8 +; GFX9-NEXT: s_bfe_u32 s13, 8, 0x100000 +; GFX9-NEXT: s_and_b32 s1, s1, s12 ; GFX9-NEXT: s_lshr_b32 s7, s0, 8 -; GFX9-NEXT: s_lshr_b32 s9, s0, 24 -; GFX9-NEXT: s_lshl_b32 s1, s1, s12 -; GFX9-NEXT: s_or_b32 s1, s9, s1 -; GFX9-NEXT: s_and_b32 s7, s7, s11 -; GFX9-NEXT: s_lshr_b32 s9, s2, 8 -; GFX9-NEXT: s_and_b32 s9, s9, s11 -; GFX9-NEXT: s_lshr_b32 s8, s0, 16 -; GFX9-NEXT: s_and_b32 s0, s0, s11 -; GFX9-NEXT: s_lshl_b32 s7, s7, s12 +; GFX9-NEXT: s_lshr_b32 s10, s0, 24 +; GFX9-NEXT: s_lshl_b32 s1, s1, s13 +; GFX9-NEXT: s_or_b32 s1, s10, s1 +; GFX9-NEXT: s_and_b32 s7, s7, s12 +; GFX9-NEXT: s_lshr_b32 s10, s2, 8 +; GFX9-NEXT: s_and_b32 s10, s10, s12 +; GFX9-NEXT: s_lshr_b32 s9, s0, 16 +; GFX9-NEXT: s_and_b32 s0, s0, s12 +; GFX9-NEXT: s_lshl_b32 s7, s7, s13 ; GFX9-NEXT: s_or_b32 s0, s0, s7 -; GFX9-NEXT: s_and_b32 s7, s8, s11 -; GFX9-NEXT: s_and_b32 s8, s10, s11 -; GFX9-NEXT: s_lshr_b32 s10, s2, 16 -; GFX9-NEXT: s_lshr_b32 s13, s2, 24 -; GFX9-NEXT: s_and_b32 s2, s2, s11 -; GFX9-NEXT: s_lshl_b32 s9, s9, s12 -; GFX9-NEXT: s_or_b32 s2, s2, s9 -; GFX9-NEXT: s_and_b32 s9, s10, s11 +; GFX9-NEXT: s_and_b32 s7, s9, s12 +; GFX9-NEXT: s_and_b32 s9, s11, s12 +; GFX9-NEXT: s_lshr_b32 s11, s2, 16 +; GFX9-NEXT: s_lshr_b32 s14, s2, 24 +; GFX9-NEXT: s_and_b32 s2, s2, s12 +; GFX9-NEXT: s_lshl_b32 s10, s10, s13 +; GFX9-NEXT: s_or_b32 s2, s2, s10 +; GFX9-NEXT: s_and_b32 s10, s11, s12 ; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 -; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 +; GFX9-NEXT: s_bfe_u32 s10, s10, 0x100000 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0 -; GFX9-NEXT: s_lshr_b32 s14, s3, 8 -; GFX9-NEXT: s_and_b32 s3, s3, s11 +; GFX9-NEXT: s_lshr_b32 s15, s3, 8 +; GFX9-NEXT: s_and_b32 s3, s3, s12 ; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000 -; GFX9-NEXT: s_lshl_b32 s9, s9, 16 -; GFX9-NEXT: s_lshl_b32 s3, s3, s12 -; GFX9-NEXT: s_or_b32 s2, s2, s9 -; GFX9-NEXT: s_and_b32 s9, s14, s11 -; GFX9-NEXT: s_or_b32 s3, s13, s3 -; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 +; GFX9-NEXT: s_lshl_b32 s10, s10, 16 +; GFX9-NEXT: s_lshl_b32 s3, s3, s13 +; GFX9-NEXT: s_or_b32 s2, s2, s10 +; GFX9-NEXT: s_and_b32 s10, s15, s12 +; GFX9-NEXT: s_or_b32 s3, s14, s3 +; GFX9-NEXT: s_bfe_u32 s10, s10, 0x100000 ; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000 -; GFX9-NEXT: s_lshl_b32 s9, s9, 16 -; GFX9-NEXT: s_or_b32 s3, s3, s9 -; GFX9-NEXT: s_lshr_b32 s9, s4, 8 +; GFX9-NEXT: s_lshl_b32 s10, s10, 16 +; GFX9-NEXT: s_or_b32 s3, s3, s10 +; GFX9-NEXT: s_lshr_b32 s10, s4, 8 ; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0 -; GFX9-NEXT: s_and_b32 s9, s9, s11 -; GFX9-NEXT: s_lshr_b32 s10, s4, 16 -; GFX9-NEXT: s_lshr_b32 s13, s4, 24 -; GFX9-NEXT: s_and_b32 s4, s4, s11 -; GFX9-NEXT: s_lshl_b32 s9, s9, s12 -; GFX9-NEXT: s_or_b32 s4, s4, s9 -; GFX9-NEXT: s_and_b32 s9, s10, s11 -; GFX9-NEXT: s_sub_i32 s10, 0, 24 -; GFX9-NEXT: v_mul_lo_u32 v1, s10, v0 -; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 +; GFX9-NEXT: s_and_b32 s10, s10, s12 +; GFX9-NEXT: s_lshr_b32 s11, s4, 16 +; GFX9-NEXT: s_lshr_b32 s14, s4, 24 +; GFX9-NEXT: s_and_b32 s4, s4, s12 +; GFX9-NEXT: s_lshl_b32 s10, s10, s13 +; GFX9-NEXT: s_or_b32 s4, s4, s10 +; GFX9-NEXT: s_and_b32 s10, s11, s12 +; GFX9-NEXT: s_sub_i32 s11, 0, 24 +; GFX9-NEXT: v_mul_lo_u32 v1, s11, v0 +; GFX9-NEXT: s_bfe_u32 s10, s10, 0x100000 ; GFX9-NEXT: s_bfe_u32 s4, s4, 0x100000 -; GFX9-NEXT: s_lshl_b32 s9, s9, 16 +; GFX9-NEXT: s_lshl_b32 s10, s10, 16 ; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1 -; GFX9-NEXT: s_or_b32 s4, s4, s9 -; GFX9-NEXT: s_lshr_b32 s14, s5, 8 -; GFX9-NEXT: s_and_b32 s5, s5, s11 +; GFX9-NEXT: s_or_b32 s4, s4, s10 +; GFX9-NEXT: s_lshr_b32 s15, s5, 8 +; GFX9-NEXT: s_and_b32 s5, s5, s12 ; GFX9-NEXT: v_add_u32_e32 v0, v0, v1 ; GFX9-NEXT: v_mul_hi_u32 v0, s4, v0 ; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX9-NEXT: s_lshl_b32 s5, s5, s12 +; GFX9-NEXT: s_lshl_b32 s5, s5, s13 ; GFX9-NEXT: v_mul_lo_u32 v0, v0, 24 -; GFX9-NEXT: s_and_b32 s9, s14, s11 +; GFX9-NEXT: s_and_b32 s10, s15, s12 ; GFX9-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 ; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1 ; GFX9-NEXT: v_sub_u32_e32 v0, s4, v0 ; GFX9-NEXT: v_subrev_u32_e32 v2, 24, v0 ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v0 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc -; GFX9-NEXT: v_mul_lo_u32 v2, s10, v1 -; GFX9-NEXT: s_or_b32 s5, s13, s5 -; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 +; GFX9-NEXT: v_mul_lo_u32 v2, s11, v1 +; GFX9-NEXT: s_or_b32 s5, s14, s5 +; GFX9-NEXT: s_bfe_u32 s10, s10, 0x100000 ; GFX9-NEXT: s_bfe_u32 s5, s5, 0x100000 ; GFX9-NEXT: v_mul_hi_u32 v2, v1, v2 -; GFX9-NEXT: s_lshl_b32 s9, s9, 16 -; GFX9-NEXT: s_or_b32 s5, s5, s9 +; GFX9-NEXT: s_lshl_b32 s10, s10, 16 +; GFX9-NEXT: s_or_b32 s5, s5, s10 ; GFX9-NEXT: v_subrev_u32_e32 v3, 24, v0 ; GFX9-NEXT: v_add_u32_e32 v1, v1, v2 ; GFX9-NEXT: v_mul_hi_u32 v1, s5, v1 @@ -1833,14 +1817,14 @@ ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc ; GFX9-NEXT: v_mul_lo_u32 v1, v1, 24 ; GFX9-NEXT: s_bfe_u32 s7, s7, 0x100000 -; GFX9-NEXT: s_mov_b32 s9, 0xffffff +; GFX9-NEXT: s_mov_b32 s10, 0xffffff ; GFX9-NEXT: v_sub_u32_e32 v3, 23, v0 -; GFX9-NEXT: v_and_b32_e32 v0, s9, v0 +; GFX9-NEXT: v_and_b32_e32 v0, s10, v0 ; GFX9-NEXT: s_lshl_b32 s4, s7, 17 ; GFX9-NEXT: s_lshl_b32 s0, s0, 1 ; GFX9-NEXT: v_sub_u32_e32 v1, s5, v1 ; GFX9-NEXT: s_or_b32 s0, s4, s0 -; GFX9-NEXT: v_and_b32_e32 v3, s9, v3 +; GFX9-NEXT: v_and_b32_e32 v3, s10, v3 ; GFX9-NEXT: v_lshrrev_b32_e64 v0, v0, s2 ; GFX9-NEXT: v_lshl_or_b32 v0, s0, v3, v0 ; GFX9-NEXT: v_subrev_u32_e32 v3, 24, v1 @@ -1850,28 +1834,27 @@ ; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v1 ; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: s_bfe_u32 s8, s8, 0x100000 +; GFX9-NEXT: s_bfe_u32 s9, s9, 0x100000 ; GFX9-NEXT: v_mov_b32_e32 v2, 0xffffff ; GFX9-NEXT: v_sub_u32_e32 v3, 23, v1 ; GFX9-NEXT: v_and_b32_e32 v1, v1, v2 -; GFX9-NEXT: s_lshl_b32 s0, s8, 17 +; GFX9-NEXT: s_lshl_b32 s0, s9, 17 ; GFX9-NEXT: s_lshl_b32 s1, s1, 1 -; GFX9-NEXT: v_and_b32_e32 v3, v3, v2 ; GFX9-NEXT: s_or_b32 s0, s0, s1 +; GFX9-NEXT: v_and_b32_e32 v3, v3, v2 ; GFX9-NEXT: v_lshrrev_b32_e64 v1, v1, s3 ; GFX9-NEXT: v_lshl_or_b32 v1, s0, v3, v1 ; GFX9-NEXT: s_mov_b32 s6, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_e32 v4, s11, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX9-NEXT: v_and_or_b32 v2, v0, s11, v2 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_or3_b32 v0, v2, v0, v4 -; GFX9-NEXT: v_and_or_b32 v1, v3, s11, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_b32_e32 v3, s12, v1 +; GFX9-NEXT: s_mov_b32 s8, 16 +; GFX9-NEXT: v_and_or_b32 v2, v0, s12, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX9-NEXT: v_or3_b32 v0, v2, v0, v3 +; GFX9-NEXT: v_bfe_u32 v2, v1, 8, 8 +; GFX9-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX9-NEXT: v_lshl_or_b32 v1, v1, 8, v2 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: v_readfirstlane_b32 s1, v1 ; GFX9-NEXT: ; return to shader part epilog @@ -1880,130 +1863,129 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, 24 ; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v1, 24 -; GFX10-NEXT: s_sub_i32 s12, 0, 24 -; GFX10-NEXT: s_movk_i32 s9, 0xff -; GFX10-NEXT: s_lshr_b32 s14, s4, 8 +; GFX10-NEXT: s_sub_i32 s13, 0, 24 +; GFX10-NEXT: s_movk_i32 s10, 0xff +; GFX10-NEXT: s_lshr_b32 s12, s4, 8 ; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0 ; GFX10-NEXT: v_rcp_iflag_f32_e32 v1, v1 -; GFX10-NEXT: s_lshr_b32 s15, s4, 16 -; GFX10-NEXT: s_bfe_u32 s10, 8, 0x100000 -; GFX10-NEXT: s_and_b32 s14, s14, s9 -; GFX10-NEXT: s_and_b32 s16, s4, s9 -; GFX10-NEXT: s_lshl_b32 s14, s14, s10 -; GFX10-NEXT: s_and_b32 s15, s15, s9 -; GFX10-NEXT: s_or_b32 s14, s16, s14 -; GFX10-NEXT: s_lshr_b32 s4, s4, 24 +; GFX10-NEXT: s_bfe_u32 s11, 8, 0x100000 +; GFX10-NEXT: s_and_b32 s12, s12, s10 +; GFX10-NEXT: s_lshr_b32 s14, s4, 16 +; GFX10-NEXT: s_lshr_b32 s15, s4, 24 +; GFX10-NEXT: s_and_b32 s4, s4, s10 +; GFX10-NEXT: s_lshl_b32 s12, s12, s11 +; GFX10-NEXT: s_lshr_b32 s16, s5, 8 +; GFX10-NEXT: s_or_b32 s4, s4, s12 ; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0 ; GFX10-NEXT: v_mul_f32_e32 v1, 0x4f7ffffe, v1 -; GFX10-NEXT: s_bfe_u32 s14, s14, 0x100000 -; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_lshr_b32 s11, s1, 8 +; GFX10-NEXT: s_and_b32 s5, s5, s10 +; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000 +; GFX10-NEXT: s_lshl_b32 s5, s5, s11 ; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0 ; GFX10-NEXT: v_cvt_u32_f32_e32 v1, v1 -; GFX10-NEXT: s_and_b32 s1, s1, s9 -; GFX10-NEXT: s_and_b32 s6, s6, s9 -; GFX10-NEXT: s_lshr_b32 s8, s0, 24 -; GFX10-NEXT: v_mul_lo_u32 v2, s12, v0 -; GFX10-NEXT: v_mul_lo_u32 v3, s12, v1 -; GFX10-NEXT: s_bfe_u32 s12, s15, 0x100000 -; GFX10-NEXT: s_lshr_b32 s15, s5, 8 +; GFX10-NEXT: s_or_b32 s5, s15, s5 +; GFX10-NEXT: s_lshr_b32 s9, s1, 8 +; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000 +; GFX10-NEXT: v_mul_lo_u32 v2, s13, v0 +; GFX10-NEXT: v_mul_lo_u32 v3, s13, v1 +; GFX10-NEXT: s_and_b32 s13, s14, s10 +; GFX10-NEXT: s_and_b32 s1, s1, s10 +; GFX10-NEXT: s_bfe_u32 s12, s13, 0x100000 +; GFX10-NEXT: s_lshr_b32 s6, s0, 8 ; GFX10-NEXT: s_lshl_b32 s12, s12, 16 -; GFX10-NEXT: s_and_b32 s5, s5, s9 -; GFX10-NEXT: s_or_b32 s12, s14, s12 -; GFX10-NEXT: s_lshl_b32 s5, s5, s10 +; GFX10-NEXT: s_lshr_b32 s8, s0, 24 ; GFX10-NEXT: v_mul_hi_u32 v2, v0, v2 -; GFX10-NEXT: s_and_b32 s14, s15, s9 -; GFX10-NEXT: s_or_b32 s4, s4, s5 -; GFX10-NEXT: s_bfe_u32 s5, s14, 0x100000 -; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000 -; GFX10-NEXT: s_lshl_b32 s5, s5, 16 -; GFX10-NEXT: s_lshl_b32 s1, s1, s10 -; GFX10-NEXT: s_or_b32 s4, s4, s5 +; GFX10-NEXT: s_or_b32 s4, s4, s12 +; GFX10-NEXT: s_and_b32 s12, s16, s10 +; GFX10-NEXT: s_lshl_b32 s1, s1, s11 +; GFX10-NEXT: s_bfe_u32 s12, s12, 0x100000 +; GFX10-NEXT: s_or_b32 s1, s8, s1 +; GFX10-NEXT: s_lshl_b32 s12, s12, 16 +; GFX10-NEXT: s_and_b32 s6, s6, s10 ; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2 ; GFX10-NEXT: v_mul_hi_u32 v2, v1, v3 -; GFX10-NEXT: s_or_b32 s1, s8, s1 +; GFX10-NEXT: s_or_b32 s5, s5, s12 ; GFX10-NEXT: s_lshr_b32 s8, s2, 8 ; GFX10-NEXT: s_lshr_b32 s7, s0, 16 -; GFX10-NEXT: v_mul_hi_u32 v0, s12, v0 -; GFX10-NEXT: s_and_b32 s0, s0, s9 -; GFX10-NEXT: s_lshl_b32 s6, s6, s10 -; GFX10-NEXT: s_and_b32 s8, s8, s9 +; GFX10-NEXT: v_mul_hi_u32 v0, s4, v0 +; GFX10-NEXT: s_and_b32 s8, s8, s10 +; GFX10-NEXT: s_and_b32 s0, s0, s10 +; GFX10-NEXT: s_lshl_b32 s6, s6, s11 ; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v2 ; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_and_b32 s6, s7, s9 -; GFX10-NEXT: s_and_b32 s7, s11, s9 +; GFX10-NEXT: s_and_b32 s6, s7, s10 +; GFX10-NEXT: s_and_b32 s7, s9, s10 ; GFX10-NEXT: v_mul_lo_u32 v0, v0, 24 -; GFX10-NEXT: v_mul_hi_u32 v1, s4, v1 -; GFX10-NEXT: s_lshr_b32 s11, s2, 16 -; GFX10-NEXT: s_and_b32 s13, s2, s9 -; GFX10-NEXT: s_lshl_b32 s5, s8, s10 -; GFX10-NEXT: s_and_b32 s8, s11, s9 -; GFX10-NEXT: s_lshr_b32 s11, s3, 8 -; GFX10-NEXT: s_and_b32 s3, s3, s9 -; GFX10-NEXT: v_sub_nc_u32_e32 v0, s12, v0 +; GFX10-NEXT: v_mul_hi_u32 v1, s5, v1 +; GFX10-NEXT: s_lshr_b32 s9, s2, 16 +; GFX10-NEXT: s_lshr_b32 s13, s2, 24 +; GFX10-NEXT: s_and_b32 s2, s2, s10 +; GFX10-NEXT: s_lshl_b32 s8, s8, s11 +; GFX10-NEXT: s_lshr_b32 s12, s3, 8 +; GFX10-NEXT: s_or_b32 s2, s2, s8 +; GFX10-NEXT: v_sub_nc_u32_e32 v0, s4, v0 ; GFX10-NEXT: v_mul_lo_u32 v1, v1, 24 -; GFX10-NEXT: s_or_b32 s5, s13, s5 -; GFX10-NEXT: s_bfe_u32 s8, s8, 0x100000 -; GFX10-NEXT: s_lshr_b32 s2, s2, 24 +; GFX10-NEXT: s_and_b32 s8, s9, s10 +; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000 +; GFX10-NEXT: s_bfe_u32 s4, s8, 0x100000 ; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v0 ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0 -; GFX10-NEXT: s_lshl_b32 s3, s3, s10 -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffffff -; GFX10-NEXT: v_sub_nc_u32_e32 v1, s4, v1 -; GFX10-NEXT: s_mov_b32 s4, 0xffffff +; GFX10-NEXT: s_lshl_b32 s4, s4, 16 +; GFX10-NEXT: s_and_b32 s3, s3, s10 +; GFX10-NEXT: v_sub_nc_u32_e32 v1, s5, v1 +; GFX10-NEXT: s_or_b32 s2, s2, s4 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo +; GFX10-NEXT: s_mov_b32 s4, 0xffffff +; GFX10-NEXT: s_lshl_b32 s3, s3, s11 +; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v1 +; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1 +; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v0 +; GFX10-NEXT: s_and_b32 s5, s12, s10 +; GFX10-NEXT: s_or_b32 s3, s13, s3 ; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000 -; GFX10-NEXT: s_lshl_b32 s8, s8, 16 -; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v1 -; GFX10-NEXT: s_or_b32 s2, s2, s3 -; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v0 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0 -; GFX10-NEXT: s_and_b32 s3, s11, s9 -; GFX10-NEXT: s_or_b32 s5, s5, s8 -; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000 -; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000 -; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo -; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1 ; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000 -; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000 -; GFX10-NEXT: s_lshl_b32 s3, s3, 16 -; GFX10-NEXT: v_sub_nc_u32_e32 v2, 23, v0 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo -; GFX10-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX10-NEXT: s_lshl_b32 s6, s6, 17 -; GFX10-NEXT: s_lshl_b32 s0, s0, 1 -; GFX10-NEXT: v_and_b32_e32 v2, s4, v2 -; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 24, v1 +; GFX10-NEXT: s_lshl_b32 s5, s5, 16 +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000 +; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 24, v1 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc_lo ; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e64 v0, v0, s5 -; GFX10-NEXT: s_or_b32 s0, s6, s0 -; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000 +; GFX10-NEXT: s_or_b32 s3, s3, s5 ; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo +; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v0 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo +; GFX10-NEXT: v_mov_b32_e32 v2, 0xffffff +; GFX10-NEXT: v_and_b32_e32 v0, s4, v0 ; GFX10-NEXT: s_bfe_u32 s7, s7, 0x100000 -; GFX10-NEXT: v_lshl_or_b32 v0, s0, v2, v0 -; GFX10-NEXT: s_lshl_b32 s0, s7, 17 +; GFX10-NEXT: v_and_b32_e32 v3, s4, v3 +; GFX10-NEXT: v_sub_nc_u32_e32 v4, 23, v1 +; GFX10-NEXT: v_and_b32_e32 v1, v1, v2 +; GFX10-NEXT: v_lshrrev_b32_e64 v0, v0, s2 +; GFX10-NEXT: s_lshl_b32 s5, s6, 17 +; GFX10-NEXT: s_lshl_b32 s0, s0, 1 +; GFX10-NEXT: v_and_b32_e32 v2, v4, v2 +; GFX10-NEXT: v_lshrrev_b32_e64 v1, v1, s3 +; GFX10-NEXT: s_or_b32 s0, s5, s0 +; GFX10-NEXT: s_lshl_b32 s2, s7, 17 ; GFX10-NEXT: s_lshl_b32 s1, s1, 1 -; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v1 -; GFX10-NEXT: v_and_b32_e32 v1, v1, v4 -; GFX10-NEXT: s_or_b32 s0, s0, s1 -; GFX10-NEXT: v_and_b32_e32 v2, v3, v4 -; GFX10-NEXT: v_lshrrev_b32_e64 v1, v1, s2 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 +; GFX10-NEXT: v_lshl_or_b32 v0, s0, v3, v0 +; GFX10-NEXT: s_or_b32 s0, s2, s1 ; GFX10-NEXT: v_lshl_or_b32 v1, s0, v2, v1 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_e32 v3, s9, v1 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, s9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v1 -; GFX10-NEXT: v_and_or_b32 v2, v0, s9, v2 -; GFX10-NEXT: v_and_b32_sdwa v0, v0, s9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_and_b32_e32 v3, s10, v1 +; GFX10-NEXT: v_bfe_u32 v4, v1, 8, 8 +; GFX10-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX10-NEXT: v_and_or_b32 v2, v0, s10, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX10-NEXT: v_lshl_or_b32 v1, v1, 8, v4 ; GFX10-NEXT: v_or3_b32 v0, v2, v0, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s9, v4 -; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: v_readfirstlane_b32 s1, v1 +; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: ; return to shader part epilog %lhs = bitcast i48 %lhs.arg to <2 x i24> %rhs = bitcast i48 %rhs.arg to <2 x i24> Index: llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i8.ll @@ -725,32 +725,32 @@ ; GFX9-LABEL: insertelement_v_v4i8_s_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_movk_i32 s1, 0xff ; GFX9-NEXT: s_and_b32 s3, s3, 3 ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s1 +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: s_and_b32 s2, s2, s4 ; GFX9-NEXT: s_lshl_b32 s3, s3, 3 ; GFX9-NEXT: s_lshl_b32 s2, s2, s3 -; GFX9-NEXT: s_lshl_b32 s3, s1, s3 +; GFX9-NEXT: s_lshl_b32 s3, s4, s3 ; GFX9-NEXT: s_not_b32 s3, s3 -; GFX9-NEXT: v_mov_b32_e32 v2, s2 +; GFX9-NEXT: v_mov_b32_e32 v3, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v2, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0 ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v5, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX9-NEXT: v_or3_b32 v0, v0, v5, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX9-NEXT: v_or3_b32 v0, v0, v6, v4 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v3 ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v4, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX9-NEXT: v_or3_b32 v2, v0, v4, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX9-NEXT: v_or3_b32 v2, v0, v2, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -759,35 +759,34 @@ ; GFX8-LABEL: insertelement_v_v4i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 +; GFX8-NEXT: v_mov_b32_e32 v2, 16 ; GFX8-NEXT: s_and_b32 s1, s3, 3 -; GFX8-NEXT: v_mov_b32_e32 v3, s0 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_lshl_b32 s1, s1, 3 ; GFX8-NEXT: s_and_b32 s2, s2, s0 ; GFX8-NEXT: s_lshl_b32 s0, s0, s1 ; GFX8-NEXT: s_not_b32 s0, s0 ; GFX8-NEXT: s_lshl_b32 s2, s2, s1 -; GFX8-NEXT: v_mov_b32_e32 v2, 8 +; GFX8-NEXT: v_mov_b32_e32 v3, 8 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v6, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_and_b32_e32 v0, s0, v0 ; GFX8-NEXT: v_or_b32_e32 v0, s2, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v3, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX8-NEXT: v_or_b32_e32 v2, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 @@ -808,32 +807,28 @@ ; GFX7-NEXT: s_not_b32 s1, s1 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 ; GFX7-NEXT: v_and_b32_e32 v0, s1, v0 ; GFX7-NEXT: v_or_b32_e32 v0, s2, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 ; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -841,30 +836,30 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dword v0, v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_and_b32 s1, s3, 3 -; GFX10-NEXT: s_lshl_b32 s1, s1, 3 +; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: s_and_b32 s2, s2, s1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: s_and_b32 s2, s2, s0 -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v1 -; GFX10-NEXT: s_lshl_b32 s3, s0, s1 -; GFX10-NEXT: s_lshl_b32 s1, s2, s1 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX10-NEXT: s_and_b32 s0, s3, 3 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: s_lshl_b32 s0, s0, 3 +; GFX10-NEXT: v_or3_b32 v0, v0, v3, v1 +; GFX10-NEXT: s_lshl_b32 s3, s1, s0 +; GFX10-NEXT: s_lshl_b32 s0, s2, s0 ; GFX10-NEXT: s_not_b32 s2, s3 ; GFX10-NEXT: v_mov_b32_e32 v1, 8 -; GFX10-NEXT: v_or3_b32 v0, v0, v3, v2 -; GFX10-NEXT: v_and_or_b32 v0, v0, s2, s1 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX10-NEXT: v_and_or_b32 v0, v0, s2, s0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v2, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v2, v0, v2, v1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -878,36 +873,34 @@ define amdgpu_ps void @insertelement_s_v4i8_v_s(<4 x i8> addrspace(4)* inreg %ptr, i8 %val, i32 inreg %idx) { ; GFX9-LABEL: insertelement_s_v4i8_v_s: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_load_dword s1, s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s6, 0xff -; GFX9-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_load_dword s0, s[2:3], 0x0 +; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: v_and_b32_e32 v0, s5, v0 +; GFX9-NEXT: s_mov_b32 s1, 8 +; GFX9-NEXT: s_mov_b32 s2, 16 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s1, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s6 -; GFX9-NEXT: s_lshr_b32 s3, s1, 16 -; GFX9-NEXT: s_lshr_b32 s5, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s6 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s6 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s5, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s4, 3 -; GFX9-NEXT: s_lshl_b32 s2, s2, 3 -; GFX9-NEXT: s_lshl_b32 s3, s6, s2 -; GFX9-NEXT: s_andn2_b32 s1, s1, s3 -; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_lshl_or_b32 v0, v0, s2, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v1, v0, s6, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX9-NEXT: v_or3_b32 v2, v1, v0, v2 +; GFX9-NEXT: s_bfe_u32 s7, s0, 0x80008 +; GFX9-NEXT: s_lshr_b32 s3, s0, 24 +; GFX9-NEXT: s_and_b32 s6, s0, s5 +; GFX9-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s6, s6, s7 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s6, s0 +; GFX9-NEXT: s_lshl_b32 s3, s3, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s3 +; GFX9-NEXT: s_and_b32 s3, s4, 3 +; GFX9-NEXT: s_lshl_b32 s3, s3, 3 +; GFX9-NEXT: s_lshl_b32 s4, s5, s3 +; GFX9-NEXT: s_andn2_b32 s0, s0, s4 +; GFX9-NEXT: v_mov_b32_e32 v1, s0 +; GFX9-NEXT: v_lshl_or_b32 v0, v0, s3, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v0, s5, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_or3_b32 v2, v2, v0, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -915,37 +908,34 @@ ; ; GFX8-LABEL: insertelement_s_v4i8_v_s: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s5, 0xff -; GFX8-NEXT: v_mov_b32_e32 v3, 8 +; GFX8-NEXT: s_load_dword s1, s[2:3], 0x0 +; GFX8-NEXT: s_movk_i32 s0, 0xff +; GFX8-NEXT: v_mov_b32_e32 v2, 8 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s1, s0, 8 -; GFX8-NEXT: s_and_b32 s1, s1, s5 -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s5 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s2, s5 +; GFX8-NEXT: s_bfe_u32 s5, s1, 0x80008 +; GFX8-NEXT: s_lshr_b32 s2, s1, 24 +; GFX8-NEXT: s_and_b32 s3, s1, s0 +; GFX8-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s3, s3, s5 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s3, 24 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s4, 3 -; GFX8-NEXT: s_lshl_b32 s1, s1, 3 -; GFX8-NEXT: v_mov_b32_e32 v1, s1 -; GFX8-NEXT: s_lshl_b32 s1, s5, s1 +; GFX8-NEXT: s_or_b32 s1, s3, s1 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 +; GFX8-NEXT: s_or_b32 s1, s1, s2 +; GFX8-NEXT: s_and_b32 s2, s4, 3 +; GFX8-NEXT: s_lshl_b32 s2, s2, 3 +; GFX8-NEXT: s_lshl_b32 s0, s0, s2 +; GFX8-NEXT: v_mov_b32_e32 v1, s2 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: s_andn2_b32 s0, s0, s1 +; GFX8-NEXT: s_andn2_b32 s0, s1, s0 ; GFX8-NEXT: v_or_b32_e32 v0, s0, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v3, s5 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 @@ -958,17 +948,15 @@ ; GFX7-NEXT: s_movk_i32 s5, 0xff ; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s1, s0, 8 -; GFX7-NEXT: s_and_b32 s1, s1, s5 -; GFX7-NEXT: s_lshr_b32 s2, s0, 16 -; GFX7-NEXT: s_lshr_b32 s3, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s5 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s2, s5 -; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s3, 24 +; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX7-NEXT: s_lshr_b32 s1, s0, 24 +; GFX7-NEXT: s_and_b32 s2, s0, s5 +; GFX7-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX7-NEXT: s_lshl_b32 s3, s3, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s3 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s2, s0 +; GFX7-NEXT: s_lshl_b32 s1, s1, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s1 ; GFX7-NEXT: s_and_b32 s1, s4, 3 ; GFX7-NEXT: s_lshl_b32 s1, s1, 3 @@ -976,17 +964,15 @@ ; GFX7-NEXT: s_lshl_b32 s1, s5, s1 ; GFX7-NEXT: s_andn2_b32 s0, s0, s1 ; GFX7-NEXT: v_or_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s5, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s5, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s5, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -1002,29 +988,27 @@ ; GFX10-NEXT: v_and_b32_e32 v0, s2, v0 ; GFX10-NEXT: s_lshl_b32 s1, s1, 3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s3, s0, 8 -; GFX10-NEXT: s_lshr_b32 s4, s0, 16 -; GFX10-NEXT: s_and_b32 s3, s3, s2 -; GFX10-NEXT: s_and_b32 s4, s4, s2 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s3, s3, 8 -; GFX10-NEXT: s_lshl_b32 s4, s4, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s3 -; GFX10-NEXT: s_lshl_b32 s3, s5, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s4 +; GFX10-NEXT: s_bfe_u32 s5, s0, 0x80008 +; GFX10-NEXT: s_lshr_b32 s3, s0, 24 +; GFX10-NEXT: s_and_b32 s4, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX10-NEXT: s_lshl_b32 s5, s5, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s4, s4, s5 +; GFX10-NEXT: s_lshl_b32 s3, s3, 24 +; GFX10-NEXT: s_or_b32 s0, s4, s0 ; GFX10-NEXT: s_lshl_b32 s4, s2, s1 ; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: s_andn2_b32 s0, s0, s4 ; GFX10-NEXT: v_lshl_or_b32 v0, v0, s1, s0 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v3, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX10-NEXT: v_or3_b32 v2, v0, v3, v1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1038,36 +1022,34 @@ define amdgpu_ps void @insertelement_s_v4i8_s_v(<4 x i8> addrspace(4)* inreg %ptr, i8 inreg %val, i32 %idx) { ; GFX9-LABEL: insertelement_s_v4i8_s_v: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_load_dword s1, s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s6, 0xff +; GFX9-NEXT: s_load_dword s0, s[2:3], 0x0 +; GFX9-NEXT: s_movk_i32 s5, 0xff ; GFX9-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 8 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s1, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s6 -; GFX9-NEXT: s_lshr_b32 s3, s1, 16 -; GFX9-NEXT: s_lshr_b32 s5, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s6 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s6 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s5, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s4, s6 -; GFX9-NEXT: v_lshlrev_b32_e64 v1, v0, s2 -; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s6 +; GFX9-NEXT: s_bfe_u32 s7, s0, 0x80008 +; GFX9-NEXT: s_lshr_b32 s3, s0, 24 +; GFX9-NEXT: s_and_b32 s6, s0, s5 +; GFX9-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s6, s6, s7 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s6, s0 +; GFX9-NEXT: s_lshl_b32 s3, s3, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s3 +; GFX9-NEXT: s_and_b32 s3, s4, s5 +; GFX9-NEXT: v_lshlrev_b32_e64 v1, v0, s3 +; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s5 ; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0 -; GFX9-NEXT: v_and_or_b32 v0, s1, v0, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v1, v0, s6, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX9-NEXT: v_or3_b32 v2, v1, v0, v2 +; GFX9-NEXT: v_and_or_b32 v0, s0, v0, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s2, 16 +; GFX9-NEXT: v_and_or_b32 v2, v0, s5, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_or3_b32 v2, v2, v0, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -1075,38 +1057,35 @@ ; ; GFX8-LABEL: insertelement_s_v4i8_s_v: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s5, 0xff +; GFX8-NEXT: s_load_dword s1, s[2:3], 0x0 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 +; GFX8-NEXT: v_mov_b32_e32 v2, 8 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s1, s0, 8 -; GFX8-NEXT: s_and_b32 s1, s1, s5 -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s5 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s2, s5 +; GFX8-NEXT: s_bfe_u32 s5, s1, 0x80008 +; GFX8-NEXT: s_lshr_b32 s2, s1, 24 +; GFX8-NEXT: s_and_b32 s3, s1, s0 +; GFX8-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s3, s3, s5 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s3, 24 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s4, s5 -; GFX8-NEXT: v_lshlrev_b32_e64 v1, v0, s1 -; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s5 +; GFX8-NEXT: s_or_b32 s1, s3, s1 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 +; GFX8-NEXT: s_or_b32 s1, s1, s2 +; GFX8-NEXT: s_and_b32 s2, s4, s0 +; GFX8-NEXT: v_lshlrev_b32_e64 v1, v0, s2 +; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s0 ; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0 -; GFX8-NEXT: v_and_b32_e32 v0, s0, v0 +; GFX8-NEXT: v_and_b32_e32 v0, s1, v0 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v3, s5 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 @@ -1120,17 +1099,15 @@ ; GFX7-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s1, s0, 8 -; GFX7-NEXT: s_and_b32 s1, s1, s5 -; GFX7-NEXT: s_lshr_b32 s2, s0, 16 -; GFX7-NEXT: s_lshr_b32 s3, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s5 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s2, s5 -; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s3, 24 +; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX7-NEXT: s_lshr_b32 s1, s0, 24 +; GFX7-NEXT: s_and_b32 s2, s0, s5 +; GFX7-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX7-NEXT: s_lshl_b32 s3, s3, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s3 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s2, s0 +; GFX7-NEXT: s_lshl_b32 s1, s1, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s1 ; GFX7-NEXT: s_and_b32 s1, s4, s5 ; GFX7-NEXT: v_lshl_b32_e32 v1, s1, v0 @@ -1138,17 +1115,15 @@ ; GFX7-NEXT: v_xor_b32_e32 v0, -1, v0 ; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s5, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s5, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s5, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -1167,27 +1142,25 @@ ; GFX10-NEXT: v_lshlrev_b32_e64 v0, v0, s2 ; GFX10-NEXT: v_xor_b32_e32 v1, -1, v1 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s3, s3, s1 -; GFX10-NEXT: s_lshr_b32 s4, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s1 -; GFX10-NEXT: s_lshl_b32 s2, s2, 8 -; GFX10-NEXT: s_lshl_b32 s3, s3, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s2, s4, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s3 +; GFX10-NEXT: s_bfe_u32 s4, s0, 0x80008 +; GFX10-NEXT: s_lshr_b32 s2, s0, 24 +; GFX10-NEXT: s_and_b32 s3, s0, s1 +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s3, s3, s4 +; GFX10-NEXT: s_lshl_b32 s2, s2, 24 +; GFX10-NEXT: s_or_b32 s0, s3, s0 ; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: v_and_or_b32 v0, s0, v1, v0 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v3, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX10-NEXT: v_or3_b32 v2, v0, v3, v1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1201,35 +1174,33 @@ define amdgpu_ps void @insertelement_s_v4i8_v_v(<4 x i8> addrspace(4)* inreg %ptr, i8 %val, i32 %idx) { ; GFX9-LABEL: insertelement_s_v4i8_v_v: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_load_dword s1, s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s5, 0xff +; GFX9-NEXT: s_load_dword s0, s[2:3], 0x0 +; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_and_b32_e32 v1, 3, v1 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s1, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s5 -; GFX9-NEXT: s_lshr_b32 s3, s1, 16 -; GFX9-NEXT: s_lshr_b32 s4, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s5 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s5 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s5 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s4, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 +; GFX9-NEXT: s_bfe_u32 s6, s0, 0x80008 +; GFX9-NEXT: s_lshr_b32 s3, s0, 24 +; GFX9-NEXT: s_and_b32 s5, s0, s4 +; GFX9-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_or_b32 s5, s5, s6 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s4 +; GFX9-NEXT: s_or_b32 s0, s5, s0 +; GFX9-NEXT: s_lshl_b32 s3, s3, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s3 ; GFX9-NEXT: v_xor_b32_e32 v1, -1, v1 -; GFX9-NEXT: v_and_or_b32 v0, s1, v1, v0 -; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v1, v0, s5, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX9-NEXT: v_or3_b32 v2, v1, v0, v2 +; GFX9-NEXT: v_and_or_b32 v0, s0, v1, v0 +; GFX9-NEXT: s_mov_b32 s1, 8 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s2, 16 +; GFX9-NEXT: v_and_or_b32 v2, v0, s4, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX9-NEXT: v_or3_b32 v2, v2, v0, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -1237,37 +1208,34 @@ ; ; GFX8-LABEL: insertelement_s_v4i8_v_v: ; GFX8: ; %bb.0: -; GFX8-NEXT: s_load_dword s0, s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s4, 0xff +; GFX8-NEXT: s_load_dword s1, s[2:3], 0x0 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_and_b32_e32 v1, 3, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s1, s0, 8 -; GFX8-NEXT: s_and_b32 s1, s1, s4 -; GFX8-NEXT: s_lshr_b32 s2, s0, 16 -; GFX8-NEXT: s_lshr_b32 s3, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s4 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s2, s4 +; GFX8-NEXT: s_bfe_u32 s4, s1, 0x80008 +; GFX8-NEXT: s_lshr_b32 s2, s1, 24 +; GFX8-NEXT: s_and_b32 s3, s1, s0 +; GFX8-NEXT: s_bfe_u32 s1, s1, 0x80010 +; GFX8-NEXT: s_lshl_b32 s4, s4, 8 +; GFX8-NEXT: s_or_b32 s3, s3, s4 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s4 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s3, 24 -; GFX8-NEXT: s_or_b32 s0, s0, s1 +; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s0 +; GFX8-NEXT: s_or_b32 s1, s3, s1 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 +; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 -; GFX8-NEXT: v_and_b32_e32 v1, s0, v1 +; GFX8-NEXT: v_and_b32_e32 v1, s1, v1 ; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v3, s4 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX8-NEXT: v_mov_b32_e32 v2, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 @@ -1282,34 +1250,30 @@ ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s1, s0, 8 -; GFX7-NEXT: s_and_b32 s1, s1, s4 -; GFX7-NEXT: s_lshr_b32 s2, s0, 16 -; GFX7-NEXT: s_lshr_b32 s3, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s4 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s2, s4 -; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s3, 24 +; GFX7-NEXT: s_bfe_u32 s3, s0, 0x80008 +; GFX7-NEXT: s_lshr_b32 s1, s0, 24 +; GFX7-NEXT: s_and_b32 s2, s0, s4 +; GFX7-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX7-NEXT: s_lshl_b32 s3, s3, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s3 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s2, s0 +; GFX7-NEXT: s_lshl_b32 s1, s1, 24 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_lshl_b32_e32 v1, s4, v1 ; GFX7-NEXT: s_or_b32 s0, s0, s1 ; GFX7-NEXT: v_xor_b32_e32 v1, -1, v1 ; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s4, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s4, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -1327,27 +1291,25 @@ ; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX10-NEXT: v_xor_b32_e32 v1, -1, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 16 -; GFX10-NEXT: s_and_b32 s2, s2, s1 -; GFX10-NEXT: s_and_b32 s3, s3, s1 -; GFX10-NEXT: s_lshr_b32 s4, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s1 -; GFX10-NEXT: s_lshl_b32 s2, s2, 8 -; GFX10-NEXT: s_lshl_b32 s3, s3, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s2, s4, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s3 +; GFX10-NEXT: s_bfe_u32 s4, s0, 0x80008 +; GFX10-NEXT: s_lshr_b32 s2, s0, 24 +; GFX10-NEXT: s_and_b32 s3, s0, s1 +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x80010 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s3, s3, s4 +; GFX10-NEXT: s_lshl_b32 s2, s2, 24 +; GFX10-NEXT: s_or_b32 s0, s3, s0 ; GFX10-NEXT: s_or_b32 s0, s0, s2 ; GFX10-NEXT: v_and_or_b32 v0, s0, v1, v0 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX10-NEXT: v_and_b32_sdwa v3, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v3, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX10-NEXT: v_or3_b32 v2, v0, v3, v1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1362,31 +1324,31 @@ ; GFX9-LABEL: insertelement_v_v4i8_s_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dword v0, v[0:1], off -; GFX9-NEXT: s_movk_i32 s1, 0xff ; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s1 +; GFX9-NEXT: s_movk_i32 s3, 0xff +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: s_and_b32 s2, s2, s3 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX9-NEXT: v_lshlrev_b32_e64 v3, v2, s2 -; GFX9-NEXT: v_lshlrev_b32_e64 v2, v2, s1 +; GFX9-NEXT: v_lshlrev_b32_e64 v4, v2, s2 +; GFX9-NEXT: v_lshlrev_b32_e64 v2, v2, s3 ; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2 ; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 ; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v6, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v6 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v4 -; GFX9-NEXT: v_or3_b32 v0, v0, v6, v5 -; GFX9-NEXT: v_and_or_b32 v0, v0, v2, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v4, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX9-NEXT: v_or3_b32 v2, v0, v4, v2 +; GFX9-NEXT: v_or3_b32 v0, v0, v7, v5 +; GFX9-NEXT: v_and_or_b32 v0, v0, v2, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_or3_b32 v2, v0, v3, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -1395,35 +1357,34 @@ ; GFX8-LABEL: insertelement_v_v4i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v1, 8 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_and_b32 s1, s2, s0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX8-NEXT: v_lshlrev_b32_e64 v5, v2, s1 +; GFX8-NEXT: v_lshlrev_b32_e64 v6, v2, s1 ; GFX8-NEXT: v_lshlrev_b32_e64 v2, v2, s0 ; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v8, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v7 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_and_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v4, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v2, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 @@ -1444,32 +1405,28 @@ ; GFX7-NEXT: v_xor_b32_e32 v1, -1, v1 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v3, s0, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s0, v4 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v4, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 ; GFX7-NEXT: v_and_b32_e32 v0, v0, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 ; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -1482,25 +1439,25 @@ ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX10-NEXT: v_lshlrev_b32_e64 v4, v1, s1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_and_b32_sdwa v5, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX10-NEXT: s_and_b32 s0, s2, s1 +; GFX10-NEXT: v_xor_b32_e32 v3, -1, v4 ; GFX10-NEXT: v_lshlrev_b32_e64 v1, v1, s0 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 -; GFX10-NEXT: v_xor_b32_e32 v2, -1, v4 -; GFX10-NEXT: v_or3_b32 v0, v0, v5, v3 -; GFX10-NEXT: v_and_or_b32 v0, v0, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX10-NEXT: v_or3_b32 v0, v0, v5, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, 8 +; GFX10-NEXT: v_and_or_b32 v0, v0, v3, v1 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v2, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v2, v0, v1, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1517,28 +1474,28 @@ ; GFX9-NEXT: global_load_dword v0, v[0:1], off ; GFX9-NEXT: s_and_b32 s2, s2, 3 ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: s_lshl_b32 s2, s2, 3 +; GFX9-NEXT: s_movk_i32 s3, 0xff ; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: s_lshl_b32 s2, s1, s2 +; GFX9-NEXT: s_lshl_b32 s2, s3, s2 ; GFX9-NEXT: s_not_b32 s2, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0 ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v5, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX9-NEXT: v_or3_b32 v0, v0, v5, v4 +; GFX9-NEXT: v_or3_b32 v0, v0, v6, v4 ; GFX9-NEXT: v_and_or_b32 v0, v0, s2, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v4, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v1 -; GFX9-NEXT: v_or3_b32 v2, v0, v4, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v1 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_or3_b32 v2, v0, v3, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -1548,34 +1505,33 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dword v0, v[0:1] ; GFX8-NEXT: s_and_b32 s1, s2, 3 +; GFX8-NEXT: v_mov_b32_e32 v1, 8 ; GFX8-NEXT: s_lshl_b32 s1, s1, 3 -; GFX8-NEXT: v_mov_b32_e32 v5, s1 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 +; GFX8-NEXT: v_mov_b32_e32 v6, s1 ; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_lshl_b32 s0, s0, s1 ; GFX8-NEXT: s_not_b32 s0, s0 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v7, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v6 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_and_b32_e32 v0, s0, v0 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v4, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_or_b32_e32 v2, v0, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 @@ -1588,40 +1544,36 @@ ; GFX7-NEXT: s_mov_b64 s[4:5], 0 ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 ; GFX7-NEXT: s_movk_i32 s0, 0xff -; GFX7-NEXT: v_and_b32_e32 v1, s0, v2 ; GFX7-NEXT: s_and_b32 s1, s2, 3 +; GFX7-NEXT: v_and_b32_e32 v1, s0, v2 ; GFX7-NEXT: s_lshl_b32 s1, s1, 3 ; GFX7-NEXT: v_lshlrev_b32_e32 v1, s1, v1 ; GFX7-NEXT: s_lshl_b32 s1, s0, s1 ; GFX7-NEXT: s_not_b32 s1, s1 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v3, s0, v3 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_bfe_u32 v4, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v3, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 ; GFX7-NEXT: v_and_b32_e32 v0, s1, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s0, v2 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v2, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v0, v2, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 ; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -1629,29 +1581,29 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dword v0, v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_and_b32 s1, s2, 3 -; GFX10-NEXT: s_lshl_b32 s1, s1, 3 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_and_b32 s2, s2, 3 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX10-NEXT: v_and_b32_sdwa v4, v0, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: s_lshl_b32 s1, s2, 3 ; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX10-NEXT: s_lshl_b32 s1, s0, s1 ; GFX10-NEXT: s_not_b32 s1, s1 -; GFX10-NEXT: v_or3_b32 v0, v0, v4, v3 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v1 +; GFX10-NEXT: v_or3_b32 v0, v0, v4, v1 ; GFX10-NEXT: v_mov_b32_e32 v1, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v2, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v1, 24, v3 +; GFX10-NEXT: v_or3_b32 v2, v0, v2, v1 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1668,29 +1620,29 @@ ; GFX9-NEXT: global_load_dword v0, v[0:1], off ; GFX9-NEXT: v_and_b32_e32 v3, 3, v3 ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX9-NEXT: s_movk_i32 s2, 0xff ; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_e64 v3, v3, s1 +; GFX9-NEXT: v_lshlrev_b32_e64 v3, v3, s2 ; GFX9-NEXT: v_xor_b32_e32 v3, -1, v3 ; GFX9-NEXT: v_mov_b32_e32 v4, 8 ; GFX9-NEXT: v_mov_b32_e32 v1, 0xff +; GFX9-NEXT: v_mov_b32_e32 v5, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v0 ; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, s0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v7, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s2, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v5 -; GFX9-NEXT: v_or3_b32 v0, v0, v7, v6 +; GFX9-NEXT: v_or3_b32 v0, v0, v8, v6 ; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v5, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v2 -; GFX9-NEXT: v_or3_b32 v2, v0, v5, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v1, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 24, v2 +; GFX9-NEXT: v_or3_b32 v2, v0, v4, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: v_mov_b32_e32 v1, 0 ; GFX9-NEXT: global_store_dword v[0:1], v2, off @@ -1699,35 +1651,33 @@ ; GFX8-LABEL: insertelement_v_v4i8_v_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dword v0, v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_mov_b32_e32 v6, s0 ; GFX8-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 3, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e64 v3, v3, s0 -; GFX8-NEXT: v_xor_b32_e32 v3, -1, v3 -; GFX8-NEXT: v_mov_b32_e32 v5, 8 ; GFX8-NEXT: v_mov_b32_e32 v1, 0xff +; GFX8-NEXT: v_lshlrev_b32_e32 v1, v3, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_xor_b32_e32 v1, -1, v1 +; GFX8-NEXT: v_mov_b32_e32 v6, 8 +; GFX8-NEXT: v_mov_b32_e32 v7, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v6, v0, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX8-NEXT: v_and_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_and_b32_e32 v0, v0, v1 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v1, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX8-NEXT: v_or_b32_e32 v2, v0, v3 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v2, v0, v1 ; GFX8-NEXT: v_mov_b32_e32 v0, 0 ; GFX8-NEXT: v_mov_b32_e32 v1, 0 ; GFX8-NEXT: flat_store_dword v[0:1], v2 @@ -1741,39 +1691,35 @@ ; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 ; GFX7-NEXT: s_movk_i32 s2, 0xff ; GFX7-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX7-NEXT: v_mov_b32_e32 v1, 0xff ; GFX7-NEXT: v_and_b32_e32 v2, s2, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 3, v3 ; GFX7-NEXT: v_lshlrev_b32_e32 v2, v3, v2 -; GFX7-NEXT: v_lshl_b32_e32 v3, s2, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, v3, v1 ; GFX7-NEXT: v_xor_b32_e32 v3, -1, v3 -; GFX7-NEXT: v_mov_b32_e32 v1, 0xff ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s2, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v5, s2, v5 -; GFX7-NEXT: v_and_b32_e32 v0, s2, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_bfe_u32 v6, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v5, s2, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 ; GFX7-NEXT: v_and_b32_e32 v0, v0, v3 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, v2, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_and_b32_e32 v1, v3, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_bfe_u32 v3, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v1, v0, v1 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 ; GFX7-NEXT: s_mov_b32 s2, -1 ; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; GFX7-NEXT: s_endpgm @@ -1789,23 +1735,22 @@ ; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX10-NEXT: v_xor_b32_e32 v2, -1, v5 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: s_mov_b32 s0, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX10-NEXT: v_mov_b32_e32 v3, 0xff -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v3 +; GFX10-NEXT: v_mov_b32_e32 v3, 8 ; GFX10-NEXT: v_and_or_b32 v0, v0, v2, v1 -; GFX10-NEXT: v_mov_b32_e32 v1, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v2, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10-NEXT: v_or3_b32 v2, v0, v2, v4 +; GFX10-NEXT: v_mov_b32_e32 v1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v3 +; GFX10-NEXT: v_or3_b32 v2, v0, v1, v2 ; GFX10-NEXT: v_mov_b32_e32 v0, 0 ; GFX10-NEXT: v_mov_b32_e32 v1, 0 ; GFX10-NEXT: global_store_dword v[0:1], v2, off @@ -1820,71 +1765,65 @@ ; GFX9-LABEL: insertelement_s_v8i8_s_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s10, 0xff +; GFX9-NEXT: s_mov_b32 s8, 0x80008 +; GFX9-NEXT: s_movk_i32 s6, 0xff ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s10 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s6, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_lshl_b32 s2, s6, 24 -; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_bfe_u32 s9, s0, s8 +; GFX9-NEXT: s_and_b32 s7, s0, s6 +; GFX9-NEXT: s_lshl_b32 s9, s9, 8 +; GFX9-NEXT: s_or_b32 s7, s7, s9 +; GFX9-NEXT: s_mov_b32 s9, 0x80010 +; GFX9-NEXT: s_lshr_b32 s2, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s9 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s7, s0 +; GFX9-NEXT: s_bfe_u32 s7, s1, s8 +; GFX9-NEXT: s_lshl_b32 s2, s2, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s7, s10 -; GFX9-NEXT: s_lshr_b32 s8, s1, 16 -; GFX9-NEXT: s_lshr_b32 s9, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s8, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s9, 24 +; GFX9-NEXT: s_lshr_b32 s3, s1, 24 +; GFX9-NEXT: s_and_b32 s2, s1, s6 +; GFX9-NEXT: s_bfe_u32 s1, s1, s9 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s2, s2, s7 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s2, s1 +; GFX9-NEXT: s_lshl_b32 s2, s3, 24 ; GFX9-NEXT: s_or_b32 s1, s1, s2 ; GFX9-NEXT: s_lshr_b32 s2, s5, 2 ; GFX9-NEXT: s_cmp_eq_u32 s2, 1 ; GFX9-NEXT: s_cselect_b32 s3, s1, s0 ; GFX9-NEXT: s_and_b32 s5, s5, 3 ; GFX9-NEXT: s_lshl_b32 s5, s5, 3 -; GFX9-NEXT: s_and_b32 s4, s4, s10 +; GFX9-NEXT: s_and_b32 s4, s4, s6 ; GFX9-NEXT: s_lshl_b32 s4, s4, s5 -; GFX9-NEXT: s_lshl_b32 s5, s10, s5 +; GFX9-NEXT: s_lshl_b32 s5, s6, s5 ; GFX9-NEXT: s_andn2_b32 s3, s3, s5 ; GFX9-NEXT: s_or_b32 s3, s3, s4 ; GFX9-NEXT: s_cmp_eq_u32 s2, 0 ; GFX9-NEXT: s_cselect_b32 s0, s3, s0 ; GFX9-NEXT: s_cmp_eq_u32 s2, 1 ; GFX9-NEXT: s_cselect_b32 s1, s3, s1 -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s10 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s4, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_lshl_b32 s2, s4, 24 -; GFX9-NEXT: s_lshr_b32 s5, s1, 8 +; GFX9-NEXT: s_bfe_u32 s5, s0, s8 +; GFX9-NEXT: s_lshr_b32 s2, s0, 24 +; GFX9-NEXT: s_and_b32 s4, s0, s6 +; GFX9-NEXT: s_bfe_u32 s0, s0, s9 +; GFX9-NEXT: s_lshl_b32 s5, s5, 8 +; GFX9-NEXT: s_or_b32 s4, s4, s5 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s4, s0 +; GFX9-NEXT: s_bfe_u32 s4, s1, s8 +; GFX9-NEXT: s_lshl_b32 s2, s2, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s5, s10 -; GFX9-NEXT: s_lshr_b32 s6, s1, 16 -; GFX9-NEXT: s_lshr_b32 s7, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s6, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s7, 24 +; GFX9-NEXT: s_lshr_b32 s3, s1, 24 +; GFX9-NEXT: s_and_b32 s2, s1, s6 +; GFX9-NEXT: s_bfe_u32 s1, s1, s9 +; GFX9-NEXT: s_lshl_b32 s4, s4, 8 +; GFX9-NEXT: s_or_b32 s2, s2, s4 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s2, s1 +; GFX9-NEXT: s_lshl_b32 s2, s3, 24 ; GFX9-NEXT: s_or_b32 s1, s1, s2 ; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 @@ -1894,71 +1833,65 @@ ; GFX8-LABEL: insertelement_s_v8i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s10, 0xff +; GFX8-NEXT: s_mov_b32 s8, 0x80008 +; GFX8-NEXT: s_movk_i32 s6, 0xff ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s2, s0, 8 -; GFX8-NEXT: s_and_b32 s2, s2, s10 -; GFX8-NEXT: s_lshr_b32 s3, s0, 16 -; GFX8-NEXT: s_lshr_b32 s6, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s3, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_lshl_b32 s2, s6, 24 -; GFX8-NEXT: s_lshr_b32 s7, s1, 8 +; GFX8-NEXT: s_bfe_u32 s9, s0, s8 +; GFX8-NEXT: s_and_b32 s7, s0, s6 +; GFX8-NEXT: s_lshl_b32 s9, s9, 8 +; GFX8-NEXT: s_or_b32 s7, s7, s9 +; GFX8-NEXT: s_mov_b32 s9, 0x80010 +; GFX8-NEXT: s_lshr_b32 s2, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s9 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s7, s0 +; GFX8-NEXT: s_bfe_u32 s7, s1, s8 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s7, s10 -; GFX8-NEXT: s_lshr_b32 s8, s1, 16 -; GFX8-NEXT: s_lshr_b32 s9, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_and_b32 s2, s8, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_lshl_b32 s2, s9, 24 +; GFX8-NEXT: s_lshr_b32 s3, s1, 24 +; GFX8-NEXT: s_and_b32 s2, s1, s6 +; GFX8-NEXT: s_bfe_u32 s1, s1, s9 +; GFX8-NEXT: s_lshl_b32 s7, s7, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s7 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_lshl_b32 s2, s3, 24 ; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: s_lshr_b32 s2, s5, 2 ; GFX8-NEXT: s_cmp_eq_u32 s2, 1 ; GFX8-NEXT: s_cselect_b32 s3, s1, s0 ; GFX8-NEXT: s_and_b32 s5, s5, 3 ; GFX8-NEXT: s_lshl_b32 s5, s5, 3 -; GFX8-NEXT: s_and_b32 s4, s4, s10 +; GFX8-NEXT: s_and_b32 s4, s4, s6 ; GFX8-NEXT: s_lshl_b32 s4, s4, s5 -; GFX8-NEXT: s_lshl_b32 s5, s10, s5 +; GFX8-NEXT: s_lshl_b32 s5, s6, s5 ; GFX8-NEXT: s_andn2_b32 s3, s3, s5 ; GFX8-NEXT: s_or_b32 s3, s3, s4 ; GFX8-NEXT: s_cmp_eq_u32 s2, 0 ; GFX8-NEXT: s_cselect_b32 s0, s3, s0 ; GFX8-NEXT: s_cmp_eq_u32 s2, 1 ; GFX8-NEXT: s_cselect_b32 s1, s3, s1 -; GFX8-NEXT: s_lshr_b32 s2, s0, 8 -; GFX8-NEXT: s_and_b32 s2, s2, s10 -; GFX8-NEXT: s_lshr_b32 s3, s0, 16 -; GFX8-NEXT: s_lshr_b32 s4, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s3, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_lshl_b32 s2, s4, 24 -; GFX8-NEXT: s_lshr_b32 s5, s1, 8 +; GFX8-NEXT: s_bfe_u32 s5, s0, s8 +; GFX8-NEXT: s_lshr_b32 s2, s0, 24 +; GFX8-NEXT: s_and_b32 s4, s0, s6 +; GFX8-NEXT: s_bfe_u32 s0, s0, s9 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s4, s4, s5 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s4, s0 +; GFX8-NEXT: s_bfe_u32 s4, s1, s8 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s5, s10 -; GFX8-NEXT: s_lshr_b32 s6, s1, 16 -; GFX8-NEXT: s_lshr_b32 s7, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_and_b32 s2, s6, s10 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_lshl_b32 s2, s7, 24 +; GFX8-NEXT: s_lshr_b32 s3, s1, 24 +; GFX8-NEXT: s_and_b32 s2, s1, s6 +; GFX8-NEXT: s_bfe_u32 s1, s1, s9 +; GFX8-NEXT: s_lshl_b32 s4, s4, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s4 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_lshl_b32 s2, s3, 24 ; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: v_mov_b32_e32 v0, s0 ; GFX8-NEXT: v_mov_b32_e32 v1, s1 @@ -1968,69 +1901,63 @@ ; GFX7-LABEL: insertelement_s_v8i8_s_s: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s10, 0xff +; GFX7-NEXT: s_mov_b32 s8, 0x80008 +; GFX7-NEXT: s_movk_i32 s6, 0xff ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s2, s0, 8 -; GFX7-NEXT: s_and_b32 s2, s2, s10 -; GFX7-NEXT: s_lshr_b32 s3, s0, 16 -; GFX7-NEXT: s_lshr_b32 s6, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s10 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s3, s10 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 +; GFX7-NEXT: s_bfe_u32 s9, s0, s8 +; GFX7-NEXT: s_and_b32 s7, s0, s6 +; GFX7-NEXT: s_lshl_b32 s9, s9, 8 +; GFX7-NEXT: s_or_b32 s7, s7, s9 +; GFX7-NEXT: s_mov_b32 s9, 0x80010 +; GFX7-NEXT: s_lshr_b32 s2, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s9 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s7, s0 +; GFX7-NEXT: s_bfe_u32 s7, s1, s8 +; GFX7-NEXT: s_lshl_b32 s2, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_lshl_b32 s2, s6, 24 -; GFX7-NEXT: s_lshr_b32 s7, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s7, s10 -; GFX7-NEXT: s_lshr_b32 s8, s1, 16 -; GFX7-NEXT: s_lshr_b32 s9, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s10 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_and_b32 s2, s8, s10 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_lshl_b32 s2, s9, 24 +; GFX7-NEXT: s_lshr_b32 s3, s1, 24 +; GFX7-NEXT: s_and_b32 s2, s1, s6 +; GFX7-NEXT: s_bfe_u32 s1, s1, s9 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s7 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s2, s1 +; GFX7-NEXT: s_lshl_b32 s2, s3, 24 ; GFX7-NEXT: s_or_b32 s1, s1, s2 ; GFX7-NEXT: s_lshr_b32 s2, s5, 2 ; GFX7-NEXT: s_cmp_eq_u32 s2, 1 ; GFX7-NEXT: s_cselect_b32 s3, s1, s0 ; GFX7-NEXT: s_and_b32 s5, s5, 3 ; GFX7-NEXT: s_lshl_b32 s5, s5, 3 -; GFX7-NEXT: s_and_b32 s4, s4, s10 +; GFX7-NEXT: s_and_b32 s4, s4, s6 ; GFX7-NEXT: s_lshl_b32 s4, s4, s5 -; GFX7-NEXT: s_lshl_b32 s5, s10, s5 +; GFX7-NEXT: s_lshl_b32 s5, s6, s5 ; GFX7-NEXT: s_andn2_b32 s3, s3, s5 ; GFX7-NEXT: s_or_b32 s3, s3, s4 ; GFX7-NEXT: s_cmp_eq_u32 s2, 0 ; GFX7-NEXT: s_cselect_b32 s4, s3, s0 ; GFX7-NEXT: s_cmp_eq_u32 s2, 1 ; GFX7-NEXT: s_cselect_b32 s3, s3, s1 -; GFX7-NEXT: s_lshr_b32 s2, s4, 8 -; GFX7-NEXT: s_and_b32 s2, s2, s10 -; GFX7-NEXT: s_lshr_b32 s5, s4, 16 -; GFX7-NEXT: s_lshr_b32 s6, s4, 24 -; GFX7-NEXT: s_and_b32 s4, s4, s10 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s2, s4, s2 -; GFX7-NEXT: s_and_b32 s4, s5, s10 -; GFX7-NEXT: s_lshl_b32 s4, s4, 16 -; GFX7-NEXT: s_or_b32 s2, s2, s4 -; GFX7-NEXT: s_lshl_b32 s4, s6, 24 -; GFX7-NEXT: s_lshr_b32 s7, s3, 8 -; GFX7-NEXT: s_or_b32 s2, s2, s4 -; GFX7-NEXT: s_and_b32 s4, s7, s10 -; GFX7-NEXT: s_lshr_b32 s8, s3, 16 -; GFX7-NEXT: s_lshr_b32 s9, s3, 24 -; GFX7-NEXT: s_and_b32 s3, s3, s10 -; GFX7-NEXT: s_lshl_b32 s4, s4, 8 -; GFX7-NEXT: s_or_b32 s3, s3, s4 -; GFX7-NEXT: s_and_b32 s4, s8, s10 +; GFX7-NEXT: s_bfe_u32 s10, s4, s8 +; GFX7-NEXT: s_lshr_b32 s2, s4, 24 +; GFX7-NEXT: s_and_b32 s7, s4, s6 +; GFX7-NEXT: s_bfe_u32 s4, s4, s9 +; GFX7-NEXT: s_lshl_b32 s10, s10, 8 +; GFX7-NEXT: s_or_b32 s7, s7, s10 ; GFX7-NEXT: s_lshl_b32 s4, s4, 16 -; GFX7-NEXT: s_or_b32 s3, s3, s4 -; GFX7-NEXT: s_lshl_b32 s4, s9, 24 +; GFX7-NEXT: s_or_b32 s4, s7, s4 +; GFX7-NEXT: s_lshl_b32 s2, s2, 24 +; GFX7-NEXT: s_or_b32 s2, s4, s2 +; GFX7-NEXT: s_and_b32 s4, s3, s6 +; GFX7-NEXT: s_bfe_u32 s6, s3, s8 +; GFX7-NEXT: s_lshr_b32 s5, s3, 24 +; GFX7-NEXT: s_bfe_u32 s3, s3, s9 +; GFX7-NEXT: s_lshl_b32 s6, s6, 8 +; GFX7-NEXT: s_or_b32 s4, s4, s6 +; GFX7-NEXT: s_lshl_b32 s3, s3, 16 +; GFX7-NEXT: s_or_b32 s3, s4, s3 +; GFX7-NEXT: s_lshl_b32 s4, s5, 24 ; GFX7-NEXT: s_or_b32 s3, s3, s4 ; GFX7-NEXT: v_mov_b32_e32 v0, s2 ; GFX7-NEXT: v_mov_b32_e32 v1, s3 @@ -2043,71 +1970,65 @@ ; GFX10-LABEL: insertelement_s_v8i8_s_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: s_movk_i32 s2, 0xff -; GFX10-NEXT: s_lshr_b32 s3, s5, 2 +; GFX10-NEXT: s_mov_b32 s6, 0x80010 +; GFX10-NEXT: s_lshr_b32 s7, s5, 2 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_lshr_b32 s9, s1, 8 -; GFX10-NEXT: s_lshr_b32 s7, s0, 16 -; GFX10-NEXT: s_lshr_b32 s10, s1, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_and_b32 s9, s9, s2 +; GFX10-NEXT: s_bfe_u32 s11, s0, s3 +; GFX10-NEXT: s_bfe_u32 s13, s1, s3 ; GFX10-NEXT: s_lshr_b32 s8, s0, 24 -; GFX10-NEXT: s_lshr_b32 s11, s1, 24 -; GFX10-NEXT: s_and_b32 s7, s7, s2 -; GFX10-NEXT: s_and_b32 s10, s10, s2 -; GFX10-NEXT: s_and_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s9, s9, 8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s10, s10, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s9 +; GFX10-NEXT: s_lshr_b32 s9, s1, 24 +; GFX10-NEXT: s_and_b32 s10, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, s6 +; GFX10-NEXT: s_and_b32 s12, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s6 +; GFX10-NEXT: s_lshl_b32 s11, s11, 8 +; GFX10-NEXT: s_lshl_b32 s13, s13, 8 +; GFX10-NEXT: s_or_b32 s10, s10, s11 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s11, s12, s13 ; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_lshl_b32 s11, s11, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s10 +; GFX10-NEXT: s_or_b32 s0, s10, s0 +; GFX10-NEXT: s_lshl_b32 s9, s9, 24 +; GFX10-NEXT: s_or_b32 s1, s11, s1 ; GFX10-NEXT: s_or_b32 s0, s0, s8 -; GFX10-NEXT: s_or_b32 s1, s1, s11 -; GFX10-NEXT: s_cmp_eq_u32 s3, 1 -; GFX10-NEXT: s_cselect_b32 s6, s1, s0 +; GFX10-NEXT: s_or_b32 s1, s1, s9 +; GFX10-NEXT: s_cmp_eq_u32 s7, 1 +; GFX10-NEXT: s_cselect_b32 s8, s1, s0 ; GFX10-NEXT: s_and_b32 s5, s5, 3 ; GFX10-NEXT: s_and_b32 s4, s4, s2 ; GFX10-NEXT: s_lshl_b32 s5, s5, 3 -; GFX10-NEXT: s_lshl_b32 s7, s2, s5 +; GFX10-NEXT: s_lshl_b32 s9, s2, s5 ; GFX10-NEXT: s_lshl_b32 s4, s4, s5 -; GFX10-NEXT: s_andn2_b32 s5, s6, s7 +; GFX10-NEXT: s_andn2_b32 s5, s8, s9 ; GFX10-NEXT: s_or_b32 s4, s5, s4 -; GFX10-NEXT: s_cmp_eq_u32 s3, 0 +; GFX10-NEXT: s_cmp_eq_u32 s7, 0 ; GFX10-NEXT: s_cselect_b32 s0, s4, s0 -; GFX10-NEXT: s_cmp_eq_u32 s3, 1 +; GFX10-NEXT: s_cmp_eq_u32 s7, 1 ; GFX10-NEXT: s_cselect_b32 s1, s4, s1 -; GFX10-NEXT: s_lshr_b32 s3, s0, 8 -; GFX10-NEXT: s_lshr_b32 s4, s0, 16 -; GFX10-NEXT: s_and_b32 s3, s3, s2 -; GFX10-NEXT: s_and_b32 s4, s4, s2 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s2 +; GFX10-NEXT: s_bfe_u32 s7, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 +; GFX10-NEXT: s_and_b32 s5, s0, s2 +; GFX10-NEXT: s_lshr_b32 s4, s0, 24 +; GFX10-NEXT: s_bfe_u32 s0, s0, s6 +; GFX10-NEXT: s_lshl_b32 s7, s7, 8 +; GFX10-NEXT: s_lshr_b32 s8, s1, 24 +; GFX10-NEXT: s_and_b32 s2, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s6 ; GFX10-NEXT: s_lshl_b32 s3, s3, 8 -; GFX10-NEXT: s_lshr_b32 s6, s1, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s5, s5, s7 +; GFX10-NEXT: s_or_b32 s2, s2, s3 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s0, s5, s0 +; GFX10-NEXT: s_lshl_b32 s3, s4, 24 +; GFX10-NEXT: s_or_b32 s1, s2, s1 +; GFX10-NEXT: s_lshl_b32 s2, s8, 24 ; GFX10-NEXT: s_or_b32 s0, s0, s3 -; GFX10-NEXT: s_lshl_b32 s4, s4, 16 -; GFX10-NEXT: s_lshr_b32 s7, s1, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_and_b32 s4, s6, s2 -; GFX10-NEXT: s_lshr_b32 s3, s1, 24 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_and_b32 s2, s7, s2 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_lshl_b32 s2, s2, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s4 -; GFX10-NEXT: s_lshl_b32 s4, s5, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s2, s3, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s4 ; GFX10-NEXT: s_or_b32 s1, s1, s2 ; GFX10-NEXT: v_mov_b32_e32 v0, s0 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 @@ -2124,52 +2045,50 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s4, 0xff -; GFX9-NEXT: s_lshr_b32 s1, s3, 2 +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: s_lshr_b32 s5, s3, 2 ; GFX9-NEXT: s_and_b32 s3, s3, 3 +; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: s_and_b32 s2, s2, s4 ; GFX9-NEXT: s_lshl_b32 s3, s3, 3 ; GFX9-NEXT: s_lshl_b32 s2, s2, s3 ; GFX9-NEXT: s_lshl_b32 s3, s4, s3 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s5, 1 ; GFX9-NEXT: s_not_b32 s3, s3 -; GFX9-NEXT: v_mov_b32_e32 v3, s2 +; GFX9-NEXT: v_mov_b32_e32 v4, s2 ; GFX9-NEXT: v_mov_b32_e32 v2, 8 +; GFX9-NEXT: v_mov_b32_e32 v3, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v8, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v9, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v4 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v1, v1, s4, v6 +; GFX9-NEXT: v_and_or_b32 v1, v1, s4, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX9-NEXT: v_or3_b32 v0, v0, v8, v5 -; GFX9-NEXT: v_or3_b32 v1, v1, v9, v7 -; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc -; GFX9-NEXT: v_and_or_b32 v3, v4, s3, v3 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX9-NEXT: v_or3_b32 v1, v1, v10, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc +; GFX9-NEXT: v_and_or_b32 v4, v5, s3, v4 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s5, 0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v7, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v8, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v1, v1, s4, v2 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v5 +; GFX9-NEXT: v_or3_b32 v1, v1, v3, v2 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX9-NEXT: v_or3_b32 v0, v0, v7, v4 -; GFX9-NEXT: v_or3_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm @@ -2177,58 +2096,55 @@ ; GFX8-LABEL: insertelement_v_v8i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v2, 8 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, s0 +; GFX8-NEXT: v_mov_b32_e32 v3, 16 ; GFX8-NEXT: s_lshr_b32 s1, s3, 2 ; GFX8-NEXT: s_and_b32 s3, s3, 3 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_lshl_b32 s3, s3, 3 ; GFX8-NEXT: s_and_b32 s2, s2, s0 ; GFX8-NEXT: s_lshl_b32 s0, s0, s3 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 ; GFX8-NEXT: s_not_b32 s0, s0 ; GFX8-NEXT: s_lshl_b32 s2, s2, s3 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 -; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v9, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v10, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v9 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v8 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v10 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v7 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX8-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v2, s0, v2 ; GFX8-NEXT: v_or_b32_e32 v2, s2, v2 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, 0 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v8, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v4, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_mov_b32_e32 v2, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GFX8-NEXT: s_endpgm @@ -2249,60 +2165,52 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s0, 1 ; GFX7-NEXT: s_not_b32 s1, s1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s6, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s6, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v6, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc ; GFX7-NEXT: v_and_b32_e32 v2, s1, v2 ; GFX7-NEXT: v_or_b32_e32 v2, s2, v2 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], s0, 0 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s6, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s6, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v6, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GFX7-NEXT: s_endpgm @@ -2311,52 +2219,50 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_movk_i32 s1, 0xff -; GFX10-NEXT: s_and_b32 s2, s2, s1 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: s_and_b32 s2, s2, s4 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v7, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 ; GFX10-NEXT: s_lshr_b32 s0, s3, 2 -; GFX10-NEXT: s_and_b32 s3, s3, 3 +; GFX10-NEXT: s_and_b32 s1, s3, 3 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s0, 1 -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 -; GFX10-NEXT: v_or3_b32 v1, v1, v7, v5 -; GFX10-NEXT: s_lshl_b32 s3, s3, 3 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v2 +; GFX10-NEXT: v_or3_b32 v1, v1, v7, v3 +; GFX10-NEXT: s_lshl_b32 s1, s1, 3 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s0, 0 -; GFX10-NEXT: s_lshl_b32 s4, s1, s3 -; GFX10-NEXT: s_lshl_b32 s2, s2, s3 +; GFX10-NEXT: s_lshl_b32 s3, s4, s1 +; GFX10-NEXT: s_lshl_b32 s1, s2, s1 ; GFX10-NEXT: v_cndmask_b32_e32 v2, v0, v1, vcc_lo -; GFX10-NEXT: s_not_b32 s3, s4 -; GFX10-NEXT: v_and_or_b32 v2, v2, s3, s2 +; GFX10-NEXT: s_not_b32 s2, s3 +; GFX10-NEXT: v_mov_b32_e32 v3, 8 +; GFX10-NEXT: v_and_or_b32 v2, v2, s2, s1 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v2, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo -; GFX10-NEXT: v_mov_b32_e32 v2, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v7, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v5 +; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_or3_b32 v1, v1, v2, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v4 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v7, v5 -; GFX10-NEXT: v_or3_b32 v1, v1, v4, v6 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(1 )* %ptr @@ -2369,62 +2275,59 @@ ; GFX9-LABEL: insertelement_s_v8i8_v_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s10, 0xff -; GFX9-NEXT: v_and_b32_e32 v0, s10, v0 -; GFX9-NEXT: s_mov_b32 s5, 8 +; GFX9-NEXT: s_mov_b32 s9, 0x80008 +; GFX9-NEXT: s_movk_i32 s7, 0xff +; GFX9-NEXT: v_and_b32_e32 v0, s7, v0 +; GFX9-NEXT: s_mov_b32 s2, 8 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s10 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s6, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_lshl_b32 s2, s6, 24 -; GFX9-NEXT: s_lshr_b32 s7, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s7, s10 -; GFX9-NEXT: s_lshr_b32 s8, s1, 16 -; GFX9-NEXT: s_lshr_b32 s9, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s8, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s9, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshr_b32 s2, s4, 2 -; GFX9-NEXT: s_cmp_eq_u32 s2, 1 -; GFX9-NEXT: s_cselect_b32 s3, s1, s0 +; GFX9-NEXT: s_bfe_u32 s10, s0, s9 +; GFX9-NEXT: s_and_b32 s8, s0, s7 +; GFX9-NEXT: s_lshl_b32 s10, s10, 8 +; GFX9-NEXT: s_or_b32 s8, s8, s10 +; GFX9-NEXT: s_mov_b32 s10, 0x80010 +; GFX9-NEXT: s_lshr_b32 s5, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s10 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s8, s0 +; GFX9-NEXT: s_bfe_u32 s8, s1, s9 +; GFX9-NEXT: s_lshl_b32 s5, s5, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s5 +; GFX9-NEXT: s_lshr_b32 s6, s1, 24 +; GFX9-NEXT: s_and_b32 s5, s1, s7 +; GFX9-NEXT: s_bfe_u32 s1, s1, s10 +; GFX9-NEXT: s_lshl_b32 s8, s8, 8 +; GFX9-NEXT: s_or_b32 s5, s5, s8 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s5, s1 +; GFX9-NEXT: s_lshl_b32 s5, s6, 24 +; GFX9-NEXT: s_or_b32 s1, s1, s5 +; GFX9-NEXT: s_lshr_b32 s5, s4, 2 +; GFX9-NEXT: s_cmp_eq_u32 s5, 1 +; GFX9-NEXT: s_cselect_b32 s6, s1, s0 ; GFX9-NEXT: s_and_b32 s4, s4, 3 ; GFX9-NEXT: s_lshl_b32 s4, s4, 3 -; GFX9-NEXT: s_lshl_b32 s6, s10, s4 -; GFX9-NEXT: s_andn2_b32 s3, s3, s6 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: s_lshl_b32 s8, s7, s4 +; GFX9-NEXT: s_andn2_b32 s6, s6, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s6 ; GFX9-NEXT: v_lshl_or_b32 v2, v0, s4, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s5, 0 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s5, 1 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v2, v0, s10, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s3, 16 +; GFX9-NEXT: v_and_or_b32 v4, v0, s7, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_or3_b32 v0, v4, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v1, s7, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_or3_b32 v0, v2, v0, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX9-NEXT: v_and_or_b32 v2, v1, s10, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v5 ; GFX9-NEXT: v_or3_b32 v1, v2, v1, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 @@ -2434,33 +2337,31 @@ ; GFX8-LABEL: insertelement_s_v8i8_v_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s9, 0xff -; GFX8-NEXT: v_mov_b32_e32 v6, 8 -; GFX8-NEXT: v_mov_b32_e32 v7, s9 +; GFX8-NEXT: s_mov_b32 s7, 0x80008 +; GFX8-NEXT: s_movk_i32 s5, 0xff +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s2, s0, 8 -; GFX8-NEXT: s_and_b32 s2, s2, s9 -; GFX8-NEXT: s_lshr_b32 s3, s0, 16 -; GFX8-NEXT: s_lshr_b32 s5, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s3, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 +; GFX8-NEXT: s_bfe_u32 s8, s0, s7 +; GFX8-NEXT: s_and_b32 s6, s0, s5 +; GFX8-NEXT: s_lshl_b32 s8, s8, 8 +; GFX8-NEXT: s_or_b32 s6, s6, s8 +; GFX8-NEXT: s_mov_b32 s8, 0x80010 +; GFX8-NEXT: s_lshr_b32 s2, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s8 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s6, s0 +; GFX8-NEXT: s_bfe_u32 s6, s1, s7 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_lshl_b32 s2, s5, 24 -; GFX8-NEXT: s_lshr_b32 s6, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s6, s9 -; GFX8-NEXT: s_lshr_b32 s7, s1, 16 -; GFX8-NEXT: s_lshr_b32 s8, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_and_b32 s2, s7, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_lshl_b32 s2, s8, 24 +; GFX8-NEXT: s_lshr_b32 s3, s1, 24 +; GFX8-NEXT: s_and_b32 s2, s1, s5 +; GFX8-NEXT: s_bfe_u32 s1, s1, s8 +; GFX8-NEXT: s_lshl_b32 s6, s6, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s6 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_lshl_b32 s2, s3, 24 ; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: s_lshr_b32 s2, s4, 2 ; GFX8-NEXT: s_cmp_eq_u32 s2, 1 @@ -2468,31 +2369,29 @@ ; GFX8-NEXT: s_and_b32 s4, s4, 3 ; GFX8-NEXT: s_lshl_b32 s4, s4, 3 ; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: s_lshl_b32 s4, s9, s4 +; GFX8-NEXT: s_lshl_b32 s4, s5, s4 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_andn2_b32 s3, s3, s4 ; GFX8-NEXT: v_or_b32_e32 v2, s3, v0 ; GFX8-NEXT: v_mov_b32_e32 v0, s0 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 ; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_mov_b32_e32 v1, s1 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 @@ -2502,32 +2401,30 @@ ; GFX7-LABEL: insertelement_s_v8i8_v_s: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s9, 0xff -; GFX7-NEXT: v_and_b32_e32 v0, s9, v0 +; GFX7-NEXT: s_mov_b32 s7, 0x80008 +; GFX7-NEXT: s_movk_i32 s5, 0xff +; GFX7-NEXT: v_and_b32_e32 v0, s5, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s2, s0, 8 -; GFX7-NEXT: s_and_b32 s2, s2, s9 -; GFX7-NEXT: s_lshr_b32 s3, s0, 16 -; GFX7-NEXT: s_lshr_b32 s5, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s3, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 +; GFX7-NEXT: s_bfe_u32 s8, s0, s7 +; GFX7-NEXT: s_and_b32 s6, s0, s5 +; GFX7-NEXT: s_lshl_b32 s8, s8, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s8 +; GFX7-NEXT: s_mov_b32 s8, 0x80010 +; GFX7-NEXT: s_lshr_b32 s2, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s8 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s6, s0 +; GFX7-NEXT: s_bfe_u32 s6, s1, s7 +; GFX7-NEXT: s_lshl_b32 s2, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_lshl_b32 s2, s5, 24 -; GFX7-NEXT: s_lshr_b32 s6, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s6, s9 -; GFX7-NEXT: s_lshr_b32 s7, s1, 16 -; GFX7-NEXT: s_lshr_b32 s8, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_and_b32 s2, s7, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_lshl_b32 s2, s8, 24 +; GFX7-NEXT: s_lshr_b32 s3, s1, 24 +; GFX7-NEXT: s_and_b32 s2, s1, s5 +; GFX7-NEXT: s_bfe_u32 s1, s1, s8 +; GFX7-NEXT: s_lshl_b32 s6, s6, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s6 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s2, s1 +; GFX7-NEXT: s_lshl_b32 s2, s3, 24 ; GFX7-NEXT: s_or_b32 s1, s1, s2 ; GFX7-NEXT: s_lshr_b32 s2, s4, 2 ; GFX7-NEXT: s_cmp_eq_u32 s2, 1 @@ -2535,38 +2432,34 @@ ; GFX7-NEXT: s_and_b32 s4, s4, 3 ; GFX7-NEXT: s_lshl_b32 s4, s4, 3 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, s4, v0 -; GFX7-NEXT: s_lshl_b32 s4, s9, s4 +; GFX7-NEXT: s_lshl_b32 s4, s5, s4 ; GFX7-NEXT: s_andn2_b32 s3, s3, s4 ; GFX7-NEXT: v_or_b32_e32 v2, s3, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s0 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s2, 0 ; GFX7-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 ; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s2, 1 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s9, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v4, s5, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_bfe_u32 v4, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s9, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, s5, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -2577,64 +2470,61 @@ ; GFX10-LABEL: insertelement_s_v8i8_v_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: s_movk_i32 s2, 0xff -; GFX10-NEXT: s_lshr_b32 s3, s4, 2 +; GFX10-NEXT: s_mov_b32 s5, 0x80010 +; GFX10-NEXT: s_lshr_b32 s6, s4, 2 ; GFX10-NEXT: v_and_b32_e32 v2, s2, v0 -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 0 +; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s6, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s5, s0, 8 -; GFX10-NEXT: s_lshr_b32 s8, s1, 8 -; GFX10-NEXT: s_lshr_b32 s6, s0, 16 -; GFX10-NEXT: s_lshr_b32 s9, s1, 16 -; GFX10-NEXT: s_and_b32 s5, s5, s2 -; GFX10-NEXT: s_and_b32 s8, s8, s2 +; GFX10-NEXT: s_bfe_u32 s10, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 ; GFX10-NEXT: s_lshr_b32 s7, s0, 24 -; GFX10-NEXT: s_lshr_b32 s10, s1, 24 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_and_b32 s9, s9, s2 -; GFX10-NEXT: s_and_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s6, s6, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s5 -; GFX10-NEXT: s_lshl_b32 s9, s9, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s8 +; GFX10-NEXT: s_lshr_b32 s8, s1, 24 +; GFX10-NEXT: s_and_b32 s9, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, s5 +; GFX10-NEXT: s_and_b32 s11, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s5 +; GFX10-NEXT: s_lshl_b32 s5, s10, 8 +; GFX10-NEXT: s_lshl_b32 s3, s3, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s5, s9, s5 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s3, s11, s3 ; GFX10-NEXT: s_lshl_b32 s7, s7, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s10, s10, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s9 +; GFX10-NEXT: s_or_b32 s0, s5, s0 +; GFX10-NEXT: s_lshl_b32 s8, s8, 24 +; GFX10-NEXT: s_or_b32 s1, s3, s1 ; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_or_b32 s1, s1, s10 -; GFX10-NEXT: s_cmp_eq_u32 s3, 1 +; GFX10-NEXT: s_or_b32 s1, s1, s8 +; GFX10-NEXT: s_cmp_eq_u32 s6, 1 ; GFX10-NEXT: v_mov_b32_e32 v0, s0 -; GFX10-NEXT: s_cselect_b32 s5, s1, s0 +; GFX10-NEXT: s_cselect_b32 s3, s1, s0 ; GFX10-NEXT: s_and_b32 s4, s4, 3 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 ; GFX10-NEXT: s_lshl_b32 s4, s4, 3 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_lshl_b32 s6, s2, s4 -; GFX10-NEXT: s_andn2_b32 s5, s5, s6 -; GFX10-NEXT: v_lshl_or_b32 v2, v2, s4, s5 +; GFX10-NEXT: s_lshl_b32 s5, s2, s4 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_andn2_b32 s3, s3, s5 +; GFX10-NEXT: v_lshl_or_b32 v2, v2, s4, s3 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s3, 1 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s6, 1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v7, v1, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 +; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v5 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v2 +; GFX10-NEXT: v_or3_b32 v1, v1, v7, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v1, v1, v7, v5 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(4)* %ptr @@ -2647,63 +2537,60 @@ ; GFX9-LABEL: insertelement_s_v8i8_s_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s10, 0xff +; GFX9-NEXT: s_mov_b32 s9, 0x80008 +; GFX9-NEXT: s_movk_i32 s7, 0xff ; GFX9-NEXT: v_lshrrev_b32_e32 v2, 2, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s10 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s6, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_lshl_b32 s2, s6, 24 -; GFX9-NEXT: s_lshr_b32 s7, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s7, s10 -; GFX9-NEXT: s_lshr_b32 s8, s1, 16 -; GFX9-NEXT: s_lshr_b32 s9, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s8, s10 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s9, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 +; GFX9-NEXT: s_bfe_u32 s10, s0, s9 +; GFX9-NEXT: s_and_b32 s8, s0, s7 +; GFX9-NEXT: s_lshl_b32 s10, s10, 8 +; GFX9-NEXT: s_or_b32 s8, s8, s10 +; GFX9-NEXT: s_mov_b32 s10, 0x80010 +; GFX9-NEXT: s_lshr_b32 s5, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s10 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s8, s0 +; GFX9-NEXT: s_bfe_u32 s8, s1, s9 +; GFX9-NEXT: s_lshl_b32 s5, s5, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s5 +; GFX9-NEXT: s_lshr_b32 s6, s1, 24 +; GFX9-NEXT: s_and_b32 s5, s1, s7 +; GFX9-NEXT: s_bfe_u32 s1, s1, s10 +; GFX9-NEXT: s_lshl_b32 s8, s8, 8 +; GFX9-NEXT: s_or_b32 s5, s5, s8 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s5, s1 +; GFX9-NEXT: s_lshl_b32 s5, s6, 24 +; GFX9-NEXT: s_or_b32 s1, s1, s5 ; GFX9-NEXT: v_mov_b32_e32 v1, s0 ; GFX9-NEXT: v_mov_b32_e32 v3, s1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX9-NEXT: s_and_b32 s2, s4, s10 +; GFX9-NEXT: s_and_b32 s4, s4, s7 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_lshlrev_b32_e64 v3, v0, s2 -; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s10 +; GFX9-NEXT: v_lshlrev_b32_e64 v3, v0, s4 +; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s7 ; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0 ; GFX9-NEXT: v_and_or_b32 v3, v1, v0, v3 ; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX9-NEXT: s_mov_b32 s5, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX9-NEXT: s_mov_b32 s2, 8 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s3, 16 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v2, v0, s10, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_and_or_b32 v4, v0, s7, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_or3_b32 v0, v4, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v1, s7, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_or3_b32 v0, v2, v0, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX9-NEXT: v_and_or_b32 v2, v1, s10, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v5 ; GFX9-NEXT: v_or3_b32 v1, v2, v1, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 @@ -2713,42 +2600,40 @@ ; GFX8-LABEL: insertelement_s_v8i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s9, 0xff +; GFX8-NEXT: s_mov_b32 s7, 0x80008 +; GFX8-NEXT: s_movk_i32 s5, 0xff ; GFX8-NEXT: v_lshrrev_b32_e32 v2, 2, v0 ; GFX8-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s2, s0, 8 -; GFX8-NEXT: s_and_b32 s2, s2, s9 -; GFX8-NEXT: s_lshr_b32 s3, s0, 16 -; GFX8-NEXT: s_lshr_b32 s5, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s3, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_lshl_b32 s2, s5, 24 -; GFX8-NEXT: s_lshr_b32 s6, s1, 8 +; GFX8-NEXT: s_bfe_u32 s8, s0, s7 +; GFX8-NEXT: s_and_b32 s6, s0, s5 +; GFX8-NEXT: s_lshl_b32 s8, s8, 8 +; GFX8-NEXT: s_or_b32 s6, s6, s8 +; GFX8-NEXT: s_mov_b32 s8, 0x80010 +; GFX8-NEXT: s_lshr_b32 s2, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s8 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s6, s0 +; GFX8-NEXT: s_bfe_u32 s6, s1, s7 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s6, s9 -; GFX8-NEXT: s_lshr_b32 s7, s1, 16 -; GFX8-NEXT: s_lshr_b32 s8, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_and_b32 s2, s7, s9 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_lshl_b32 s2, s8, 24 +; GFX8-NEXT: s_lshr_b32 s3, s1, 24 +; GFX8-NEXT: s_and_b32 s2, s1, s5 +; GFX8-NEXT: s_bfe_u32 s1, s1, s8 +; GFX8-NEXT: s_lshl_b32 s6, s6, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s6 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_lshl_b32 s2, s3, 24 ; GFX8-NEXT: s_or_b32 s1, s1, s2 ; GFX8-NEXT: v_mov_b32_e32 v1, s0 ; GFX8-NEXT: v_mov_b32_e32 v3, s1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX8-NEXT: s_and_b32 s2, s4, s9 +; GFX8-NEXT: s_and_b32 s2, s4, s5 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX8-NEXT: v_lshlrev_b32_e64 v3, v0, s2 -; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s9 +; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s5 ; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0 ; GFX8-NEXT: v_and_b32_e32 v0, v1, v0 ; GFX8-NEXT: v_or_b32_e32 v3, v0, v3 @@ -2756,24 +2641,22 @@ ; GFX8-NEXT: v_mov_b32_e32 v1, s1 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v6, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v7, s9 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 @@ -2783,42 +2666,40 @@ ; GFX7-LABEL: insertelement_s_v8i8_s_v: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s9, 0xff +; GFX7-NEXT: s_mov_b32 s7, 0x80008 +; GFX7-NEXT: s_movk_i32 s5, 0xff ; GFX7-NEXT: v_lshrrev_b32_e32 v2, 2, v0 ; GFX7-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s2, s0, 8 -; GFX7-NEXT: s_and_b32 s2, s2, s9 -; GFX7-NEXT: s_lshr_b32 s3, s0, 16 -; GFX7-NEXT: s_lshr_b32 s5, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s3, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_lshl_b32 s2, s5, 24 -; GFX7-NEXT: s_lshr_b32 s6, s1, 8 +; GFX7-NEXT: s_bfe_u32 s8, s0, s7 +; GFX7-NEXT: s_and_b32 s6, s0, s5 +; GFX7-NEXT: s_lshl_b32 s8, s8, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s8 +; GFX7-NEXT: s_mov_b32 s8, 0x80010 +; GFX7-NEXT: s_lshr_b32 s2, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s8 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s6, s0 +; GFX7-NEXT: s_bfe_u32 s6, s1, s7 +; GFX7-NEXT: s_lshl_b32 s2, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s6, s9 -; GFX7-NEXT: s_lshr_b32 s7, s1, 16 -; GFX7-NEXT: s_lshr_b32 s8, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_and_b32 s2, s7, s9 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_lshl_b32 s2, s8, 24 +; GFX7-NEXT: s_lshr_b32 s3, s1, 24 +; GFX7-NEXT: s_and_b32 s2, s1, s5 +; GFX7-NEXT: s_bfe_u32 s1, s1, s8 +; GFX7-NEXT: s_lshl_b32 s6, s6, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s6 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s2, s1 +; GFX7-NEXT: s_lshl_b32 s2, s3, 24 ; GFX7-NEXT: s_or_b32 s1, s1, s2 ; GFX7-NEXT: v_mov_b32_e32 v1, s0 ; GFX7-NEXT: v_mov_b32_e32 v3, s1 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX7-NEXT: s_and_b32 s2, s4, s9 +; GFX7-NEXT: s_and_b32 s2, s4, s5 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc ; GFX7-NEXT: v_lshl_b32_e32 v3, s2, v0 -; GFX7-NEXT: v_lshl_b32_e32 v0, s9, v0 +; GFX7-NEXT: v_lshl_b32_e32 v0, s5, v0 ; GFX7-NEXT: v_xor_b32_e32 v0, -1, v0 ; GFX7-NEXT: v_and_b32_e32 v0, v1, v0 ; GFX7-NEXT: v_or_b32_e32 v3, v0, v3 @@ -2826,30 +2707,26 @@ ; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v2 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v4, s5, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s9, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_bfe_u32 v4, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s9, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s9, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, s5, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -2861,64 +2738,61 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 ; GFX10-NEXT: v_and_b32_e32 v1, 3, v0 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: s_movk_i32 s2, 0xff +; GFX10-NEXT: s_mov_b32 s5, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 2, v0 -; GFX10-NEXT: s_and_b32 s3, s4, s2 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v1 +; GFX10-NEXT: s_and_b32 s4, s4, s2 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX10-NEXT: v_lshlrev_b32_e64 v3, v1, s3 +; GFX10-NEXT: v_lshlrev_b32_e64 v3, v1, s4 ; GFX10-NEXT: v_lshlrev_b32_e64 v0, v1, s2 ; GFX10-NEXT: v_xor_b32_e32 v4, -1, v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s1, 8 -; GFX10-NEXT: s_lshr_b32 s7, s1, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_lshr_b32 s8, s1, 24 -; GFX10-NEXT: s_and_b32 s7, s7, s2 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s6 -; GFX10-NEXT: s_lshr_b32 s4, s0, 16 -; GFX10-NEXT: s_and_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s4, s4, s2 -; GFX10-NEXT: s_and_b32 s0, s0, s2 +; GFX10-NEXT: s_bfe_u32 s8, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 +; GFX10-NEXT: s_lshr_b32 s6, s1, 24 +; GFX10-NEXT: s_and_b32 s9, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s5 ; GFX10-NEXT: s_lshl_b32 s3, s3, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s8 -; GFX10-NEXT: s_lshl_b32 s4, s4, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s3 -; GFX10-NEXT: v_mov_b32_e32 v1, s1 -; GFX10-NEXT: s_lshl_b32 s5, s5, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_or_b32 s0, s0, s5 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s3, s9, s3 +; GFX10-NEXT: s_lshl_b32 s6, s6, 24 +; GFX10-NEXT: s_or_b32 s1, s3, s1 +; GFX10-NEXT: s_lshr_b32 s4, s0, 24 +; GFX10-NEXT: s_and_b32 s7, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, s5 +; GFX10-NEXT: s_lshl_b32 s5, s8, 8 +; GFX10-NEXT: s_or_b32 s1, s1, s6 +; GFX10-NEXT: s_lshl_b32 s3, s4, 24 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s4, s7, s5 +; GFX10-NEXT: v_mov_b32_e32 v1, s1 +; GFX10-NEXT: s_or_b32 s0, s4, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, s0, v1, vcc_lo ; GFX10-NEXT: v_mov_b32_e32 v0, s0 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2 +; GFX10-NEXT: s_mov_b32 s1, 16 ; GFX10-NEXT: v_and_or_b32 v3, v5, v4, v3 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v3, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v7, v1, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v2 +; GFX10-NEXT: v_or3_b32 v1, v1, v7, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 -; GFX10-NEXT: v_or3_b32 v1, v1, v7, v5 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(4)* %ptr @@ -2931,37 +2805,35 @@ ; GFX9-LABEL: insertelement_s_v8i8_v_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s9, 0xff +; GFX9-NEXT: s_mov_b32 s8, 0x80008 +; GFX9-NEXT: s_movk_i32 s6, 0xff ; GFX9-NEXT: v_lshrrev_b32_e32 v2, 2, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_and_b32 s2, s2, s9 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s5, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s9 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s3, s9 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_lshl_b32 s2, s5, 24 -; GFX9-NEXT: s_lshr_b32 s6, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s2 -; GFX9-NEXT: s_and_b32 s2, s6, s9 -; GFX9-NEXT: s_lshr_b32 s7, s1, 16 -; GFX9-NEXT: s_lshr_b32 s8, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s9 -; GFX9-NEXT: s_lshl_b32 s2, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_and_b32 s2, s7, s9 -; GFX9-NEXT: s_lshl_b32 s2, s2, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s2 -; GFX9-NEXT: s_lshl_b32 s2, s8, 24 -; GFX9-NEXT: s_or_b32 s1, s1, s2 +; GFX9-NEXT: s_bfe_u32 s9, s0, s8 +; GFX9-NEXT: s_and_b32 s7, s0, s6 +; GFX9-NEXT: s_lshl_b32 s9, s9, 8 +; GFX9-NEXT: s_or_b32 s7, s7, s9 +; GFX9-NEXT: s_mov_b32 s9, 0x80010 +; GFX9-NEXT: s_lshr_b32 s4, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s9 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s7, s0 +; GFX9-NEXT: s_bfe_u32 s7, s1, s8 +; GFX9-NEXT: s_lshl_b32 s4, s4, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s4 +; GFX9-NEXT: s_lshr_b32 s5, s1, 24 +; GFX9-NEXT: s_and_b32 s4, s1, s6 +; GFX9-NEXT: s_bfe_u32 s1, s1, s9 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s4, s4, s7 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s4, s1 +; GFX9-NEXT: s_lshl_b32 s4, s5, 24 +; GFX9-NEXT: s_or_b32 s1, s1, s4 +; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s9 +; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s6 ; GFX9-NEXT: v_mov_b32_e32 v3, s0 ; GFX9-NEXT: v_mov_b32_e32 v4, s1 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 @@ -2972,21 +2844,20 @@ ; GFX9-NEXT: v_mov_b32_e32 v1, s1 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX9-NEXT: s_mov_b32 s4, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 +; GFX9-NEXT: s_mov_b32 s2, 8 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s3, 16 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v2, v0, s9, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_and_or_b32 v4, v0, s6, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_or3_b32 v0, v4, v0, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v1, s6, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_or3_b32 v0, v2, v0, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX9-NEXT: v_and_or_b32 v2, v1, s9, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v5 ; GFX9-NEXT: v_or3_b32 v1, v2, v1, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 @@ -2996,37 +2867,35 @@ ; GFX8-LABEL: insertelement_s_v8i8_v_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s8, 0xff +; GFX8-NEXT: s_mov_b32 s6, 0x80008 +; GFX8-NEXT: s_movk_i32 s4, 0xff ; GFX8-NEXT: v_lshrrev_b32_e32 v2, 2, v1 ; GFX8-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s2, s0, 8 -; GFX8-NEXT: s_and_b32 s2, s2, s8 -; GFX8-NEXT: s_lshr_b32 s3, s0, 16 -; GFX8-NEXT: s_lshr_b32 s4, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s8 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s3, s8 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_lshl_b32 s2, s4, 24 -; GFX8-NEXT: s_lshr_b32 s5, s1, 8 +; GFX8-NEXT: s_bfe_u32 s7, s0, s6 +; GFX8-NEXT: s_and_b32 s5, s0, s4 +; GFX8-NEXT: s_lshl_b32 s7, s7, 8 +; GFX8-NEXT: s_or_b32 s5, s5, s7 +; GFX8-NEXT: s_mov_b32 s7, 0x80010 +; GFX8-NEXT: s_lshr_b32 s2, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s7 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s5, s0 +; GFX8-NEXT: s_bfe_u32 s5, s1, s6 +; GFX8-NEXT: s_lshl_b32 s2, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s2 -; GFX8-NEXT: s_and_b32 s2, s5, s8 -; GFX8-NEXT: s_lshr_b32 s6, s1, 16 -; GFX8-NEXT: s_lshr_b32 s7, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s8 -; GFX8-NEXT: s_lshl_b32 s2, s2, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_and_b32 s2, s6, s8 -; GFX8-NEXT: s_lshl_b32 s2, s2, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s2 -; GFX8-NEXT: s_lshl_b32 s2, s7, 24 +; GFX8-NEXT: s_lshr_b32 s3, s1, 24 +; GFX8-NEXT: s_and_b32 s2, s1, s4 +; GFX8-NEXT: s_bfe_u32 s1, s1, s7 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s2, s2, s5 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s2, s1 +; GFX8-NEXT: s_lshl_b32 s2, s3, 24 ; GFX8-NEXT: s_or_b32 s1, s1, s2 +; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s8 +; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s4 ; GFX8-NEXT: v_mov_b32_e32 v3, s0 ; GFX8-NEXT: v_mov_b32_e32 v4, s1 ; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 @@ -3038,24 +2907,22 @@ ; GFX8-NEXT: v_mov_b32_e32 v1, s1 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v6, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v7, s8 +; GFX8-NEXT: v_mov_b32_e32 v4, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v2, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v1 +; GFX8-NEXT: v_or_b32_e32 v0, v5, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 ; GFX8-NEXT: v_or_b32_sdwa v2, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 @@ -3065,38 +2932,36 @@ ; GFX7-LABEL: insertelement_s_v8i8_v_v: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s8, 0xff +; GFX7-NEXT: s_mov_b32 s6, 0x80008 +; GFX7-NEXT: s_movk_i32 s4, 0xff ; GFX7-NEXT: v_lshrrev_b32_e32 v2, 2, v1 ; GFX7-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s2, s0, 8 -; GFX7-NEXT: s_and_b32 s2, s2, s8 -; GFX7-NEXT: s_lshr_b32 s3, s0, 16 -; GFX7-NEXT: s_lshr_b32 s4, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s8 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s3, s8 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_lshl_b32 s2, s4, 24 -; GFX7-NEXT: s_lshr_b32 s5, s1, 8 +; GFX7-NEXT: s_bfe_u32 s7, s0, s6 +; GFX7-NEXT: s_and_b32 s5, s0, s4 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s5, s5, s7 +; GFX7-NEXT: s_mov_b32 s7, 0x80010 +; GFX7-NEXT: s_lshr_b32 s2, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s7 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s5, s0 +; GFX7-NEXT: s_bfe_u32 s5, s1, s6 +; GFX7-NEXT: s_lshl_b32 s2, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s2 -; GFX7-NEXT: s_and_b32 s2, s5, s8 -; GFX7-NEXT: s_lshr_b32 s6, s1, 16 -; GFX7-NEXT: s_lshr_b32 s7, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s8 -; GFX7-NEXT: s_lshl_b32 s2, s2, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_and_b32 s2, s6, s8 -; GFX7-NEXT: s_lshl_b32 s2, s2, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: s_lshl_b32 s2, s7, 24 +; GFX7-NEXT: s_lshr_b32 s3, s1, 24 +; GFX7-NEXT: s_and_b32 s2, s1, s4 +; GFX7-NEXT: s_bfe_u32 s1, s1, s7 +; GFX7-NEXT: s_lshl_b32 s5, s5, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s5 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s2, s1 +; GFX7-NEXT: s_lshl_b32 s2, s3, 24 ; GFX7-NEXT: s_or_b32 s1, s1, s2 -; GFX7-NEXT: v_and_b32_e32 v0, s8, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v1 +; GFX7-NEXT: v_and_b32_e32 v0, s4, v0 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshl_b32_e32 v1, s8, v1 +; GFX7-NEXT: v_lshl_b32_e32 v1, s4, v1 ; GFX7-NEXT: v_mov_b32_e32 v3, s0 ; GFX7-NEXT: v_mov_b32_e32 v4, s1 ; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2 @@ -3108,30 +2973,26 @@ ; GFX7-NEXT: v_mov_b32_e32 v1, s1 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v2 ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v3, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s8, v2 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v4, s4, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s8, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s8, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_bfe_u32 v4, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s8, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s8, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s8, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v7 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v3 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -3142,8 +3003,10 @@ ; GFX10-LABEL: insertelement_s_v8i8_v_v: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x0 -; GFX10-NEXT: s_movk_i32 s2, 0xff +; GFX10-NEXT: s_mov_b32 s3, 0x80008 ; GFX10-NEXT: v_and_b32_e32 v2, 3, v1 +; GFX10-NEXT: s_movk_i32 s2, 0xff +; GFX10-NEXT: s_mov_b32 s4, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 2, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 3, v2 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 @@ -3151,55 +3014,50 @@ ; GFX10-NEXT: v_lshlrev_b32_e64 v0, v2, s2 ; GFX10-NEXT: v_xor_b32_e32 v2, -1, v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s1, 8 -; GFX10-NEXT: s_lshr_b32 s7, s1, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s2 -; GFX10-NEXT: s_lshr_b32 s8, s1, 24 -; GFX10-NEXT: s_and_b32 s7, s7, s2 -; GFX10-NEXT: s_and_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshr_b32 s3, s0, 8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s6 -; GFX10-NEXT: s_lshr_b32 s4, s0, 16 -; GFX10-NEXT: s_and_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshr_b32 s5, s0, 24 -; GFX10-NEXT: s_and_b32 s4, s4, s2 -; GFX10-NEXT: s_and_b32 s0, s0, s2 +; GFX10-NEXT: s_bfe_u32 s8, s0, s3 +; GFX10-NEXT: s_bfe_u32 s3, s1, s3 +; GFX10-NEXT: s_lshr_b32 s6, s1, 24 +; GFX10-NEXT: s_and_b32 s9, s1, s2 +; GFX10-NEXT: s_bfe_u32 s1, s1, s4 ; GFX10-NEXT: s_lshl_b32 s3, s3, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s8 -; GFX10-NEXT: s_lshl_b32 s4, s4, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s3, s9, s3 +; GFX10-NEXT: s_lshl_b32 s6, s6, 24 +; GFX10-NEXT: s_or_b32 s1, s3, s1 +; GFX10-NEXT: s_lshr_b32 s5, s0, 24 +; GFX10-NEXT: s_and_b32 s7, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s0, s4 +; GFX10-NEXT: s_lshl_b32 s4, s8, 8 +; GFX10-NEXT: s_or_b32 s1, s1, s6 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s4, s7, s4 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 -; GFX10-NEXT: s_lshl_b32 s5, s5, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_or_b32 s0, s0, s5 +; GFX10-NEXT: s_lshl_b32 s3, s5, 24 +; GFX10-NEXT: s_or_b32 s0, s4, s0 +; GFX10-NEXT: s_or_b32 s0, s0, s3 ; GFX10-NEXT: v_cndmask_b32_e32 v5, s0, v1, vcc_lo ; GFX10-NEXT: v_mov_b32_e32 v0, s0 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v3 +; GFX10-NEXT: s_mov_b32 s1, 16 ; GFX10-NEXT: v_and_or_b32 v2, v5, v2, v4 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v2, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v6, v0, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v7, v1, s2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4 +; GFX10-NEXT: v_or3_b32 v0, v0, v6, v2 +; GFX10-NEXT: v_or3_b32 v1, v1, v7, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v6, v4 -; GFX10-NEXT: v_or3_b32 v1, v1, v7, v5 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(4)* %ptr @@ -3213,51 +3071,49 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s3, 0xff -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 2, v2 +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 2, v2 ; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX9-NEXT: s_and_b32 s1, s2, s3 +; GFX9-NEXT: s_movk_i32 s3, 0xff +; GFX9-NEXT: s_and_b32 s2, s2, s3 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX9-NEXT: v_lshlrev_b32_e64 v5, v2, s1 +; GFX9-NEXT: v_lshlrev_b32_e64 v6, v2, s2 ; GFX9-NEXT: v_lshlrev_b32_e64 v2, v2, s3 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5 ; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2 ; GFX9-NEXT: v_mov_b32_e32 v3, 8 +; GFX9-NEXT: v_mov_b32_e32 v4, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v10, v0, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v11, v1, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v9 ; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v6 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v8 +; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v11 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 ; GFX9-NEXT: v_or3_b32 v0, v0, v10, v7 -; GFX9-NEXT: v_or3_b32 v1, v1, v11, v9 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc -; GFX9-NEXT: v_and_or_b32 v2, v6, v2, v5 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v4 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX9-NEXT: v_or3_b32 v1, v1, v12, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v7, v0, v1, vcc +; GFX9-NEXT: v_and_or_b32 v2, v7, v2, v6 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v5 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v7, v0, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v8, v1, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v5 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_or3_b32 v0, v0, v7, v2 +; GFX9-NEXT: v_or3_b32 v1, v1, v4, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: v_or3_b32 v0, v0, v7, v4 -; GFX9-NEXT: v_or3_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm @@ -3265,58 +3121,55 @@ ; GFX8-LABEL: insertelement_v_v8i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_mov_b32_e32 v5, s0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 2, v2 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 2, v2 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_and_b32 s1, s2, s0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX8-NEXT: v_lshlrev_b32_e64 v7, v2, s1 +; GFX8-NEXT: v_lshlrev_b32_e64 v8, v2, s1 ; GFX8-NEXT: v_lshlrev_b32_e64 v2, v2, s0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7 ; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v6 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v7 +; GFX8-NEXT: v_mov_b32_e32 v5, 8 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v9 -; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v12, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v13, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v12 -; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v11 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v13 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v10 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v10 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v9 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v2, v3, v2 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v7 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v8 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v8, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v5, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX8-NEXT: v_mov_b32_e32 v2, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GFX8-NEXT: s_endpgm @@ -3339,59 +3192,51 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v3 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v5, s3, v5 -; GFX7-NEXT: v_and_b32_e32 v8, s3, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v6, s3, v6 -; GFX7-NEXT: v_and_b32_e32 v9, s3, v9 -; GFX7-NEXT: v_and_b32_e32 v0, s3, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v1, s3, v1 +; GFX7-NEXT: v_bfe_u32 v8, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v10, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v7, s3, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v9, s3, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; GFX7-NEXT: v_or_b32_e32 v7, v7, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v8, v9, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v0, v7, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX7-NEXT: v_or_b32_e32 v1, v8, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v10 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 ; GFX7-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc ; GFX7-NEXT: v_and_b32_e32 v2, v5, v2 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s3, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s3, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s3, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s3, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s3, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v1, s3, v1 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s3, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v6, s3, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -3400,51 +3245,49 @@ ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 ; GFX10-NEXT: v_and_b32_e32 v3, 3, v2 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s3, 0xff ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 2, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 3, v3 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v8, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v9, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v4 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v5 -; GFX10-NEXT: v_lshlrev_b32_e64 v4, v3, s1 -; GFX10-NEXT: s_and_b32 s0, s2, s1 -; GFX10-NEXT: v_or3_b32 v0, v0, v8, v6 -; GFX10-NEXT: v_or3_b32 v1, v1, v9, v7 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v8, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v5 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v6 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v7 +; GFX10-NEXT: v_lshlrev_b32_e64 v6, v3, s3 +; GFX10-NEXT: s_and_b32 s0, s2, s3 +; GFX10-NEXT: v_or3_b32 v0, v0, v8, v4 ; GFX10-NEXT: v_lshlrev_b32_e64 v3, v3, s0 -; GFX10-NEXT: v_xor_b32_e32 v4, -1, v4 +; GFX10-NEXT: v_or3_b32 v1, v1, v9, v5 +; GFX10-NEXT: v_xor_b32_e32 v4, -1, v6 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2 ; GFX10-NEXT: v_mov_b32_e32 v2, 8 ; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc_lo ; GFX10-NEXT: v_and_or_b32 v3, v5, v4, v3 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v3, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX10-NEXT: v_mov_b32_e32 v3, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v7, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v5 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v2 +; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_or3_b32 v1, v1, v3, v2 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v4 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v7, v5 -; GFX10-NEXT: v_or3_b32 v1, v1, v4, v6 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -3458,50 +3301,48 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s3, 0xff -; GFX9-NEXT: s_lshr_b32 s1, s2, 2 +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: s_lshr_b32 s4, s2, 2 ; GFX9-NEXT: s_and_b32 s2, s2, 3 +; GFX9-NEXT: s_movk_i32 s3, 0xff ; GFX9-NEXT: s_lshl_b32 s2, s2, 3 ; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_lshl_b32 s2, s3, s2 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 ; GFX9-NEXT: s_not_b32 s2, s2 ; GFX9-NEXT: v_mov_b32_e32 v3, 8 +; GFX9-NEXT: v_mov_b32_e32 v4, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v8, v0, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v9, v1, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v7 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v4 -; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v6 +; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX9-NEXT: v_or3_b32 v0, v0, v8, v5 -; GFX9-NEXT: v_or3_b32 v1, v1, v9, v7 -; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc -; GFX9-NEXT: v_and_or_b32 v2, v4, s2, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX9-NEXT: v_or3_b32 v1, v1, v10, v6 +; GFX9-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc +; GFX9-NEXT: v_and_or_b32 v2, v5, s2, v2 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s4, 0 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v7, v0, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v8, v1, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v1, v1, s3, v3 -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v5 +; GFX9-NEXT: v_and_or_b32 v0, v0, s3, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_or3_b32 v0, v0, v7, v2 +; GFX9-NEXT: v_or3_b32 v1, v1, v4, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: v_or3_b32 v0, v0, v7, v4 -; GFX9-NEXT: v_or3_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm @@ -3511,56 +3352,53 @@ ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] ; GFX8-NEXT: s_lshr_b32 s1, s2, 2 ; GFX8-NEXT: s_and_b32 s2, s2, 3 +; GFX8-NEXT: v_mov_b32_e32 v3, 8 ; GFX8-NEXT: s_lshl_b32 s2, s2, 3 -; GFX8-NEXT: v_mov_b32_e32 v6, s2 +; GFX8-NEXT: v_mov_b32_e32 v4, 16 +; GFX8-NEXT: v_mov_b32_e32 v7, s2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v3, 8 -; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_mov_b32_e32 v5, s0 ; GFX8-NEXT: s_lshl_b32 s0, s0, s2 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s1, 1 ; GFX8-NEXT: s_not_b32 s0, s0 +; GFX8-NEXT: v_mov_b32_e32 v5, 8 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v3, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v10, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v11, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v7 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v10 -; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v9 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v11 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v8 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc ; GFX8-NEXT: v_and_b32_e32 v3, s0, v3 ; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s1, 0 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v8, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v5, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX8-NEXT: v_mov_b32_e32 v2, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 ; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 +; GFX8-NEXT: v_mov_b32_e32 v2, 0 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GFX8-NEXT: s_endpgm @@ -3582,60 +3420,52 @@ ; GFX7-NEXT: s_not_b32 s1, s1 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s3, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s3, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v4, s3, v4 -; GFX7-NEXT: v_and_b32_e32 v7, s3, v7 -; GFX7-NEXT: v_and_b32_e32 v0, s3, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_and_b32_e32 v1, s3, v1 +; GFX7-NEXT: v_bfe_u32 v6, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v8, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v5, s3, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v7, s3, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v6, v7, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v0, v5, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v1, v6, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 ; GFX7-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc ; GFX7-NEXT: v_and_b32_e32 v3, s1, v3 ; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], s0, 0 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s3, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s3, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v3, s3, v3 -; GFX7-NEXT: v_and_b32_e32 v6, s3, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s3, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v1, s3, v1 +; GFX7-NEXT: v_bfe_u32 v5, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s3, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v6, s3, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v6, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -3643,51 +3473,49 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s3, 0xff ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v7, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v8, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v4 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v8, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v4 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX10-NEXT: s_lshr_b32 s1, s2, 2 ; GFX10-NEXT: s_and_b32 s0, s2, 3 -; GFX10-NEXT: s_lshr_b32 s2, s2, 2 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v3 +; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s1, 1 +; GFX10-NEXT: v_or3_b32 v1, v1, v8, v4 ; GFX10-NEXT: s_lshl_b32 s0, s0, 3 -; GFX10-NEXT: v_or3_b32 v0, v0, v7, v5 -; GFX10-NEXT: v_or3_b32 v1, v1, v8, v6 -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s2, 1 ; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_lshl_b32 s0, s1, s0 -; GFX10-NEXT: s_not_b32 s0, s0 +; GFX10-NEXT: s_lshl_b32 s0, s3, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v0, v1, vcc_lo +; GFX10-NEXT: s_not_b32 s0, s0 ; GFX10-NEXT: v_and_or_b32 v2, v3, s0, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s2, 0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s1, 0 +; GFX10-NEXT: v_mov_b32_e32 v3, 8 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v2, s0 -; GFX10-NEXT: v_mov_b32_e32 v2, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 ; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v7, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s3, v5 +; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_or3_b32 v1, v1, v2, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 +; GFX10-NEXT: v_or3_b32 v0, v0, v7, v4 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v7, v5 -; GFX10-NEXT: v_or3_b32 v1, v1, v4, v6 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -3701,51 +3529,49 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 2, v3 +; GFX9-NEXT: s_mov_b32 s1, 16 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 2, v3 ; GFX9-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX9-NEXT: s_movk_i32 s2, 0xff ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v3 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xff ; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, v3, v4 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v6 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7 ; GFX9-NEXT: v_xor_b32_e32 v3, -1, v3 ; GFX9-NEXT: v_mov_b32_e32 v5, 8 +; GFX9-NEXT: v_mov_b32_e32 v6, 16 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v7, s0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v11, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v12, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, s2, v10 ; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX9-NEXT: v_and_or_b32 v0, v0, s1, v7 -; GFX9-NEXT: v_and_or_b32 v1, v1, s1, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_and_or_b32 v1, v1, s2, v12 +; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 ; GFX9-NEXT: v_or3_b32 v0, v0, v11, v8 -; GFX9-NEXT: v_or3_b32 v1, v1, v12, v10 -; GFX9-NEXT: v_cndmask_b32_e32 v7, v0, v1, vcc -; GFX9-NEXT: v_and_or_b32 v2, v7, v3, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v6 +; GFX9-NEXT: v_or3_b32 v1, v1, v13, v9 +; GFX9-NEXT: v_cndmask_b32_e32 v8, v0, v1, vcc +; GFX9-NEXT: v_and_or_b32 v2, v8, v3, v2 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v7 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v8, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v7 +; GFX9-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX9-NEXT: v_and_or_b32 v0, v0, v4, v2 -; GFX9-NEXT: v_and_b32_sdwa v9, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_or3_b32 v0, v0, v8, v3 +; GFX9-NEXT: v_or3_b32 v0, v0, v8, v2 +; GFX9-NEXT: v_or3_b32 v1, v1, v6, v3 ; GFX9-NEXT: v_mov_b32_e32 v2, 0 -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v5 -; GFX9-NEXT: v_or3_b32 v1, v1, v9, v6 ; GFX9-NEXT: v_mov_b32_e32 v3, 0 ; GFX9-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX9-NEXT: s_endpgm @@ -3753,58 +3579,54 @@ ; GFX8-LABEL: insertelement_v_v8i8_v_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_mov_b32_e32 v5, 8 -; GFX8-NEXT: v_mov_b32_e32 v6, 8 -; GFX8-NEXT: v_mov_b32_e32 v7, s0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 2, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 2, v3 ; GFX8-NEXT: v_and_b32_e32 v3, 3, v3 +; GFX8-NEXT: v_mov_b32_e32 v5, 8 +; GFX8-NEXT: v_mov_b32_e32 v6, 16 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 3, v3 ; GFX8-NEXT: v_mov_b32_e32 v4, 0xff ; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, v3, v4 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 ; GFX8-NEXT: v_xor_b32_e32 v3, -1, v3 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v8 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v9 +; GFX8-NEXT: v_mov_b32_e32 v7, 8 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v10 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v6, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_and_b32_sdwa v13, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v7, v1, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v0, v13 -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v9 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v11 -; GFX8-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc -; GFX8-NEXT: v_and_b32_e32 v3, v5, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v12 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v10 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX8-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc +; GFX8-NEXT: v_and_b32_e32 v3, v4, v3 ; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v0 -; GFX8-NEXT: v_and_b32_sdwa v8, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_and_b32_sdwa v4, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v5 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v3 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX8-NEXT: v_mov_b32_e32 v2, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v6 ; GFX8-NEXT: v_mov_b32_e32 v3, 0 ; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GFX8-NEXT: s_endpgm @@ -3827,60 +3649,52 @@ ; GFX7-NEXT: v_xor_b32_e32 v3, -1, v3 ; GFX7-NEXT: s_mov_b32 s6, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v6, s0, v6 -; GFX7-NEXT: v_and_b32_e32 v9, s0, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v7, s0, v7 -; GFX7-NEXT: v_and_b32_e32 v10, s0, v10 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v11, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v8, s0, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s0, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v9, v10, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v9, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v10 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v10 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v11 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 ; GFX7-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc ; GFX7-NEXT: v_and_b32_e32 v3, v6, v3 ; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v5 -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v0 -; GFX7-NEXT: v_and_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v6, v6, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, v7, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX7-NEXT: v_bfe_u32 v6, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v7, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v3, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v5, v0, v4 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v4, v1, v4 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v6, 8, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v5, v5, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v5, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v3 ; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; GFX7-NEXT: s_endpgm ; @@ -3889,51 +3703,49 @@ ; GFX10-NEXT: global_load_dwordx2 v[0:1], v[0:1], off ; GFX10-NEXT: s_mov_b32 s0, 8 ; GFX10-NEXT: v_and_b32_e32 v4, 3, v3 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s2, 0xff ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 2, v3 ; GFX10-NEXT: v_mov_b32_e32 v5, 0xff ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 3, v4 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 ; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v10, v0, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v11, v1, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v6 -; GFX10-NEXT: v_and_or_b32 v1, v1, s1, v7 -; GFX10-NEXT: v_lshlrev_b32_e32 v6, v4, v5 +; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s2, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v8 +; GFX10-NEXT: v_and_or_b32 v1, v1, s2, v9 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, v4, v5 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v3 -; GFX10-NEXT: v_or3_b32 v0, v0, v10, v8 -; GFX10-NEXT: v_or3_b32 v1, v1, v11, v9 -; GFX10-NEXT: v_xor_b32_e32 v4, -1, v6 +; GFX10-NEXT: v_or3_b32 v0, v0, v10, v6 +; GFX10-NEXT: v_mov_b32_e32 v3, 8 +; GFX10-NEXT: v_or3_b32 v1, v1, v11, v7 +; GFX10-NEXT: v_xor_b32_e32 v4, -1, v8 ; GFX10-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc_lo ; GFX10-NEXT: v_and_or_b32 v2, v6, v4, v2 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v2, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo -; GFX10-NEXT: v_mov_b32_e32 v2, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v0 +; GFX10-NEXT: v_mov_b32_e32 v2, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_and_b32_sdwa v8, v0, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v4, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v6 +; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v3 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_or3_b32 v1, v1, v2, v3 ; GFX10-NEXT: v_mov_b32_e32 v2, 0 +; GFX10-NEXT: v_or3_b32 v0, v0, v8, v4 ; GFX10-NEXT: v_mov_b32_e32 v3, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v8, v6 -; GFX10-NEXT: v_or3_b32 v1, v1, v4, v7 ; GFX10-NEXT: global_store_dwordx2 v[2:3], v[0:1], off ; GFX10-NEXT: s_endpgm %vec = load <8 x i8>, <8 x i8> addrspace(1)* %ptr @@ -3946,57 +3758,51 @@ ; GFX9-LABEL: insertelement_s_v16i8_s_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s18, 0xff +; GFX9-NEXT: s_mov_b32 s12, 0x80008 +; GFX9-NEXT: s_movk_i32 s10, 0xff ; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s6, s0, 8 -; GFX9-NEXT: s_and_b32 s6, s6, s18 -; GFX9-NEXT: s_lshr_b32 s7, s0, 16 -; GFX9-NEXT: s_lshr_b32 s8, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_and_b32 s6, s7, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_lshl_b32 s6, s8, 24 -; GFX9-NEXT: s_lshr_b32 s9, s1, 8 +; GFX9-NEXT: s_bfe_u32 s13, s0, s12 +; GFX9-NEXT: s_and_b32 s11, s0, s10 +; GFX9-NEXT: s_lshl_b32 s13, s13, 8 +; GFX9-NEXT: s_or_b32 s11, s11, s13 +; GFX9-NEXT: s_mov_b32 s13, 0x80010 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s13 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s11, s0 +; GFX9-NEXT: s_bfe_u32 s11, s1, s12 +; GFX9-NEXT: s_lshl_b32 s6, s6, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_and_b32 s6, s9, s18 -; GFX9-NEXT: s_lshr_b32 s10, s1, 16 -; GFX9-NEXT: s_lshr_b32 s11, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_and_b32 s6, s10, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_lshl_b32 s6, s11, 24 -; GFX9-NEXT: s_lshr_b32 s12, s2, 8 +; GFX9-NEXT: s_lshr_b32 s7, s1, 24 +; GFX9-NEXT: s_and_b32 s6, s1, s10 +; GFX9-NEXT: s_bfe_u32 s1, s1, s13 +; GFX9-NEXT: s_lshl_b32 s11, s11, 8 +; GFX9-NEXT: s_or_b32 s6, s6, s11 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s6, s1 +; GFX9-NEXT: s_lshl_b32 s6, s7, 24 +; GFX9-NEXT: s_bfe_u32 s7, s2, s12 ; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_and_b32 s6, s12, s18 -; GFX9-NEXT: s_lshr_b32 s13, s2, 16 -; GFX9-NEXT: s_lshr_b32 s14, s2, 24 -; GFX9-NEXT: s_and_b32 s2, s2, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_and_b32 s6, s13, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_lshl_b32 s6, s14, 24 -; GFX9-NEXT: s_lshr_b32 s15, s3, 8 +; GFX9-NEXT: s_lshr_b32 s8, s2, 24 +; GFX9-NEXT: s_and_b32 s6, s2, s10 +; GFX9-NEXT: s_bfe_u32 s2, s2, s13 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s6, s6, s7 +; GFX9-NEXT: s_lshl_b32 s2, s2, 16 +; GFX9-NEXT: s_bfe_u32 s7, s3, s12 +; GFX9-NEXT: s_or_b32 s2, s6, s2 +; GFX9-NEXT: s_lshl_b32 s6, s8, 24 ; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_and_b32 s6, s15, s18 -; GFX9-NEXT: s_lshr_b32 s16, s3, 16 -; GFX9-NEXT: s_lshr_b32 s17, s3, 24 -; GFX9-NEXT: s_and_b32 s3, s3, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s3, s3, s6 -; GFX9-NEXT: s_and_b32 s6, s16, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s3, s3, s6 -; GFX9-NEXT: s_lshl_b32 s6, s17, 24 +; GFX9-NEXT: s_lshr_b32 s9, s3, 24 +; GFX9-NEXT: s_and_b32 s6, s3, s10 +; GFX9-NEXT: s_bfe_u32 s3, s3, s13 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_or_b32 s6, s6, s7 +; GFX9-NEXT: s_lshl_b32 s3, s3, 16 +; GFX9-NEXT: s_or_b32 s3, s6, s3 +; GFX9-NEXT: s_lshl_b32 s6, s9, 24 ; GFX9-NEXT: s_or_b32 s3, s3, s6 ; GFX9-NEXT: s_lshr_b32 s6, s5, 2 ; GFX9-NEXT: s_cmp_eq_u32 s6, 1 @@ -4007,9 +3813,9 @@ ; GFX9-NEXT: s_cselect_b32 s7, s3, s7 ; GFX9-NEXT: s_and_b32 s5, s5, 3 ; GFX9-NEXT: s_lshl_b32 s5, s5, 3 -; GFX9-NEXT: s_and_b32 s4, s4, s18 +; GFX9-NEXT: s_and_b32 s4, s4, s10 ; GFX9-NEXT: s_lshl_b32 s4, s4, s5 -; GFX9-NEXT: s_lshl_b32 s5, s18, s5 +; GFX9-NEXT: s_lshl_b32 s5, s10, s5 ; GFX9-NEXT: s_andn2_b32 s5, s7, s5 ; GFX9-NEXT: s_or_b32 s4, s5, s4 ; GFX9-NEXT: s_cmp_eq_u32 s6, 0 @@ -4020,53 +3826,45 @@ ; GFX9-NEXT: s_cselect_b32 s2, s4, s2 ; GFX9-NEXT: s_cmp_eq_u32 s6, 3 ; GFX9-NEXT: s_cselect_b32 s3, s4, s3 -; GFX9-NEXT: s_lshr_b32 s4, s0, 8 -; GFX9-NEXT: s_and_b32 s4, s4, s18 -; GFX9-NEXT: s_lshr_b32 s5, s0, 16 -; GFX9-NEXT: s_lshr_b32 s6, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s4 -; GFX9-NEXT: s_and_b32 s4, s5, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s4 -; GFX9-NEXT: s_lshl_b32 s4, s6, 24 -; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_bfe_u32 s9, s0, s12 +; GFX9-NEXT: s_lshr_b32 s4, s0, 24 +; GFX9-NEXT: s_and_b32 s8, s0, s10 +; GFX9-NEXT: s_bfe_u32 s0, s0, s13 +; GFX9-NEXT: s_lshl_b32 s9, s9, 8 +; GFX9-NEXT: s_or_b32 s8, s8, s9 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s8, s0 +; GFX9-NEXT: s_bfe_u32 s8, s1, s12 +; GFX9-NEXT: s_lshl_b32 s4, s4, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s4 -; GFX9-NEXT: s_and_b32 s4, s7, s18 -; GFX9-NEXT: s_lshr_b32 s8, s1, 16 -; GFX9-NEXT: s_lshr_b32 s9, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s4 -; GFX9-NEXT: s_and_b32 s4, s8, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s4 -; GFX9-NEXT: s_lshl_b32 s4, s9, 24 -; GFX9-NEXT: s_lshr_b32 s10, s2, 8 +; GFX9-NEXT: s_lshr_b32 s5, s1, 24 +; GFX9-NEXT: s_and_b32 s4, s1, s10 +; GFX9-NEXT: s_bfe_u32 s1, s1, s13 +; GFX9-NEXT: s_lshl_b32 s8, s8, 8 +; GFX9-NEXT: s_or_b32 s4, s4, s8 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s4, s1 +; GFX9-NEXT: s_lshl_b32 s4, s5, 24 +; GFX9-NEXT: s_bfe_u32 s5, s2, s12 ; GFX9-NEXT: s_or_b32 s1, s1, s4 -; GFX9-NEXT: s_and_b32 s4, s10, s18 -; GFX9-NEXT: s_lshr_b32 s11, s2, 16 -; GFX9-NEXT: s_lshr_b32 s12, s2, 24 -; GFX9-NEXT: s_and_b32 s2, s2, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_or_b32 s2, s2, s4 -; GFX9-NEXT: s_and_b32 s4, s11, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 16 -; GFX9-NEXT: s_or_b32 s2, s2, s4 -; GFX9-NEXT: s_lshl_b32 s4, s12, 24 -; GFX9-NEXT: s_lshr_b32 s13, s3, 8 +; GFX9-NEXT: s_lshr_b32 s6, s2, 24 +; GFX9-NEXT: s_and_b32 s4, s2, s10 +; GFX9-NEXT: s_bfe_u32 s2, s2, s13 +; GFX9-NEXT: s_lshl_b32 s5, s5, 8 +; GFX9-NEXT: s_or_b32 s4, s4, s5 +; GFX9-NEXT: s_lshl_b32 s2, s2, 16 +; GFX9-NEXT: s_bfe_u32 s5, s3, s12 +; GFX9-NEXT: s_or_b32 s2, s4, s2 +; GFX9-NEXT: s_lshl_b32 s4, s6, 24 ; GFX9-NEXT: s_or_b32 s2, s2, s4 -; GFX9-NEXT: s_and_b32 s4, s13, s18 -; GFX9-NEXT: s_lshr_b32 s14, s3, 16 -; GFX9-NEXT: s_lshr_b32 s15, s3, 24 -; GFX9-NEXT: s_and_b32 s3, s3, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_or_b32 s3, s3, s4 -; GFX9-NEXT: s_and_b32 s4, s14, s18 -; GFX9-NEXT: s_lshl_b32 s4, s4, 16 -; GFX9-NEXT: s_or_b32 s3, s3, s4 -; GFX9-NEXT: s_lshl_b32 s4, s15, 24 +; GFX9-NEXT: s_lshr_b32 s7, s3, 24 +; GFX9-NEXT: s_and_b32 s4, s3, s10 +; GFX9-NEXT: s_bfe_u32 s3, s3, s13 +; GFX9-NEXT: s_lshl_b32 s5, s5, 8 +; GFX9-NEXT: s_or_b32 s4, s4, s5 +; GFX9-NEXT: s_lshl_b32 s3, s3, 16 +; GFX9-NEXT: s_or_b32 s3, s4, s3 +; GFX9-NEXT: s_lshl_b32 s4, s7, 24 ; GFX9-NEXT: s_or_b32 s3, s3, s4 ; GFX9-NEXT: v_mov_b32_e32 v0, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s1 @@ -4078,57 +3876,51 @@ ; GFX8-LABEL: insertelement_s_v16i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s18, 0xff +; GFX8-NEXT: s_mov_b32 s12, 0x80008 +; GFX8-NEXT: s_movk_i32 s10, 0xff ; GFX8-NEXT: v_mov_b32_e32 v4, 0 ; GFX8-NEXT: v_mov_b32_e32 v5, 0 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s6, s0, 8 -; GFX8-NEXT: s_and_b32 s6, s6, s18 -; GFX8-NEXT: s_lshr_b32 s7, s0, 16 -; GFX8-NEXT: s_lshr_b32 s8, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s6 -; GFX8-NEXT: s_and_b32 s6, s7, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s6 -; GFX8-NEXT: s_lshl_b32 s6, s8, 24 -; GFX8-NEXT: s_lshr_b32 s9, s1, 8 +; GFX8-NEXT: s_bfe_u32 s13, s0, s12 +; GFX8-NEXT: s_and_b32 s11, s0, s10 +; GFX8-NEXT: s_lshl_b32 s13, s13, 8 +; GFX8-NEXT: s_or_b32 s11, s11, s13 +; GFX8-NEXT: s_mov_b32 s13, 0x80010 +; GFX8-NEXT: s_lshr_b32 s6, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s13 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s11, s0 +; GFX8-NEXT: s_bfe_u32 s11, s1, s12 +; GFX8-NEXT: s_lshl_b32 s6, s6, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s6 -; GFX8-NEXT: s_and_b32 s6, s9, s18 -; GFX8-NEXT: s_lshr_b32 s10, s1, 16 -; GFX8-NEXT: s_lshr_b32 s11, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s6 -; GFX8-NEXT: s_and_b32 s6, s10, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s6 -; GFX8-NEXT: s_lshl_b32 s6, s11, 24 -; GFX8-NEXT: s_lshr_b32 s12, s2, 8 +; GFX8-NEXT: s_lshr_b32 s7, s1, 24 +; GFX8-NEXT: s_and_b32 s6, s1, s10 +; GFX8-NEXT: s_bfe_u32 s1, s1, s13 +; GFX8-NEXT: s_lshl_b32 s11, s11, 8 +; GFX8-NEXT: s_or_b32 s6, s6, s11 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s6, s1 +; GFX8-NEXT: s_lshl_b32 s6, s7, 24 +; GFX8-NEXT: s_bfe_u32 s7, s2, s12 ; GFX8-NEXT: s_or_b32 s1, s1, s6 -; GFX8-NEXT: s_and_b32 s6, s12, s18 -; GFX8-NEXT: s_lshr_b32 s13, s2, 16 -; GFX8-NEXT: s_lshr_b32 s14, s2, 24 -; GFX8-NEXT: s_and_b32 s2, s2, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 8 -; GFX8-NEXT: s_or_b32 s2, s2, s6 -; GFX8-NEXT: s_and_b32 s6, s13, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 16 -; GFX8-NEXT: s_or_b32 s2, s2, s6 -; GFX8-NEXT: s_lshl_b32 s6, s14, 24 -; GFX8-NEXT: s_lshr_b32 s15, s3, 8 +; GFX8-NEXT: s_lshr_b32 s8, s2, 24 +; GFX8-NEXT: s_and_b32 s6, s2, s10 +; GFX8-NEXT: s_bfe_u32 s2, s2, s13 +; GFX8-NEXT: s_lshl_b32 s7, s7, 8 +; GFX8-NEXT: s_or_b32 s6, s6, s7 +; GFX8-NEXT: s_lshl_b32 s2, s2, 16 +; GFX8-NEXT: s_bfe_u32 s7, s3, s12 +; GFX8-NEXT: s_or_b32 s2, s6, s2 +; GFX8-NEXT: s_lshl_b32 s6, s8, 24 ; GFX8-NEXT: s_or_b32 s2, s2, s6 -; GFX8-NEXT: s_and_b32 s6, s15, s18 -; GFX8-NEXT: s_lshr_b32 s16, s3, 16 -; GFX8-NEXT: s_lshr_b32 s17, s3, 24 -; GFX8-NEXT: s_and_b32 s3, s3, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 8 -; GFX8-NEXT: s_or_b32 s3, s3, s6 -; GFX8-NEXT: s_and_b32 s6, s16, s18 -; GFX8-NEXT: s_lshl_b32 s6, s6, 16 -; GFX8-NEXT: s_or_b32 s3, s3, s6 -; GFX8-NEXT: s_lshl_b32 s6, s17, 24 +; GFX8-NEXT: s_lshr_b32 s9, s3, 24 +; GFX8-NEXT: s_and_b32 s6, s3, s10 +; GFX8-NEXT: s_bfe_u32 s3, s3, s13 +; GFX8-NEXT: s_lshl_b32 s7, s7, 8 +; GFX8-NEXT: s_or_b32 s6, s6, s7 +; GFX8-NEXT: s_lshl_b32 s3, s3, 16 +; GFX8-NEXT: s_or_b32 s3, s6, s3 +; GFX8-NEXT: s_lshl_b32 s6, s9, 24 ; GFX8-NEXT: s_or_b32 s3, s3, s6 ; GFX8-NEXT: s_lshr_b32 s6, s5, 2 ; GFX8-NEXT: s_cmp_eq_u32 s6, 1 @@ -4139,9 +3931,9 @@ ; GFX8-NEXT: s_cselect_b32 s7, s3, s7 ; GFX8-NEXT: s_and_b32 s5, s5, 3 ; GFX8-NEXT: s_lshl_b32 s5, s5, 3 -; GFX8-NEXT: s_and_b32 s4, s4, s18 +; GFX8-NEXT: s_and_b32 s4, s4, s10 ; GFX8-NEXT: s_lshl_b32 s4, s4, s5 -; GFX8-NEXT: s_lshl_b32 s5, s18, s5 +; GFX8-NEXT: s_lshl_b32 s5, s10, s5 ; GFX8-NEXT: s_andn2_b32 s5, s7, s5 ; GFX8-NEXT: s_or_b32 s4, s5, s4 ; GFX8-NEXT: s_cmp_eq_u32 s6, 0 @@ -4152,53 +3944,45 @@ ; GFX8-NEXT: s_cselect_b32 s2, s4, s2 ; GFX8-NEXT: s_cmp_eq_u32 s6, 3 ; GFX8-NEXT: s_cselect_b32 s3, s4, s3 -; GFX8-NEXT: s_lshr_b32 s4, s0, 8 -; GFX8-NEXT: s_and_b32 s4, s4, s18 -; GFX8-NEXT: s_lshr_b32 s5, s0, 16 -; GFX8-NEXT: s_lshr_b32 s6, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s4 -; GFX8-NEXT: s_and_b32 s4, s5, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s4 -; GFX8-NEXT: s_lshl_b32 s4, s6, 24 -; GFX8-NEXT: s_lshr_b32 s7, s1, 8 +; GFX8-NEXT: s_bfe_u32 s9, s0, s12 +; GFX8-NEXT: s_lshr_b32 s4, s0, 24 +; GFX8-NEXT: s_and_b32 s8, s0, s10 +; GFX8-NEXT: s_bfe_u32 s0, s0, s13 +; GFX8-NEXT: s_lshl_b32 s9, s9, 8 +; GFX8-NEXT: s_or_b32 s8, s8, s9 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s8, s0 +; GFX8-NEXT: s_bfe_u32 s8, s1, s12 +; GFX8-NEXT: s_lshl_b32 s4, s4, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s4 -; GFX8-NEXT: s_and_b32 s4, s7, s18 -; GFX8-NEXT: s_lshr_b32 s8, s1, 16 -; GFX8-NEXT: s_lshr_b32 s9, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s4 -; GFX8-NEXT: s_and_b32 s4, s8, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s4 -; GFX8-NEXT: s_lshl_b32 s4, s9, 24 -; GFX8-NEXT: s_lshr_b32 s10, s2, 8 +; GFX8-NEXT: s_lshr_b32 s5, s1, 24 +; GFX8-NEXT: s_and_b32 s4, s1, s10 +; GFX8-NEXT: s_bfe_u32 s1, s1, s13 +; GFX8-NEXT: s_lshl_b32 s8, s8, 8 +; GFX8-NEXT: s_or_b32 s4, s4, s8 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s4, s1 +; GFX8-NEXT: s_lshl_b32 s4, s5, 24 +; GFX8-NEXT: s_bfe_u32 s5, s2, s12 ; GFX8-NEXT: s_or_b32 s1, s1, s4 -; GFX8-NEXT: s_and_b32 s4, s10, s18 -; GFX8-NEXT: s_lshr_b32 s11, s2, 16 -; GFX8-NEXT: s_lshr_b32 s12, s2, 24 -; GFX8-NEXT: s_and_b32 s2, s2, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 8 -; GFX8-NEXT: s_or_b32 s2, s2, s4 -; GFX8-NEXT: s_and_b32 s4, s11, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_or_b32 s2, s2, s4 -; GFX8-NEXT: s_lshl_b32 s4, s12, 24 -; GFX8-NEXT: s_lshr_b32 s13, s3, 8 +; GFX8-NEXT: s_lshr_b32 s6, s2, 24 +; GFX8-NEXT: s_and_b32 s4, s2, s10 +; GFX8-NEXT: s_bfe_u32 s2, s2, s13 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s4, s4, s5 +; GFX8-NEXT: s_lshl_b32 s2, s2, 16 +; GFX8-NEXT: s_bfe_u32 s5, s3, s12 +; GFX8-NEXT: s_or_b32 s2, s4, s2 +; GFX8-NEXT: s_lshl_b32 s4, s6, 24 ; GFX8-NEXT: s_or_b32 s2, s2, s4 -; GFX8-NEXT: s_and_b32 s4, s13, s18 -; GFX8-NEXT: s_lshr_b32 s14, s3, 16 -; GFX8-NEXT: s_lshr_b32 s15, s3, 24 -; GFX8-NEXT: s_and_b32 s3, s3, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 8 -; GFX8-NEXT: s_or_b32 s3, s3, s4 -; GFX8-NEXT: s_and_b32 s4, s14, s18 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_or_b32 s3, s3, s4 -; GFX8-NEXT: s_lshl_b32 s4, s15, 24 +; GFX8-NEXT: s_lshr_b32 s7, s3, 24 +; GFX8-NEXT: s_and_b32 s4, s3, s10 +; GFX8-NEXT: s_bfe_u32 s3, s3, s13 +; GFX8-NEXT: s_lshl_b32 s5, s5, 8 +; GFX8-NEXT: s_or_b32 s4, s4, s5 +; GFX8-NEXT: s_lshl_b32 s3, s3, 16 +; GFX8-NEXT: s_or_b32 s3, s4, s3 +; GFX8-NEXT: s_lshl_b32 s4, s7, 24 ; GFX8-NEXT: s_or_b32 s3, s3, s4 ; GFX8-NEXT: v_mov_b32_e32 v0, s0 ; GFX8-NEXT: v_mov_b32_e32 v1, s1 @@ -4210,55 +3994,49 @@ ; GFX7-LABEL: insertelement_s_v16i8_s_s: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s18, 0xff +; GFX7-NEXT: s_mov_b32 s12, 0x80008 +; GFX7-NEXT: s_movk_i32 s10, 0xff ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s6, s0, 8 -; GFX7-NEXT: s_and_b32 s6, s6, s18 -; GFX7-NEXT: s_lshr_b32 s7, s0, 16 -; GFX7-NEXT: s_lshr_b32 s8, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s6 -; GFX7-NEXT: s_and_b32 s6, s7, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s6 -; GFX7-NEXT: s_lshl_b32 s6, s8, 24 -; GFX7-NEXT: s_lshr_b32 s9, s1, 8 +; GFX7-NEXT: s_bfe_u32 s13, s0, s12 +; GFX7-NEXT: s_and_b32 s11, s0, s10 +; GFX7-NEXT: s_lshl_b32 s13, s13, 8 +; GFX7-NEXT: s_or_b32 s11, s11, s13 +; GFX7-NEXT: s_mov_b32 s13, 0x80010 +; GFX7-NEXT: s_lshr_b32 s6, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s13 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s11, s0 +; GFX7-NEXT: s_bfe_u32 s11, s1, s12 +; GFX7-NEXT: s_lshl_b32 s6, s6, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s6 -; GFX7-NEXT: s_and_b32 s6, s9, s18 -; GFX7-NEXT: s_lshr_b32 s10, s1, 16 -; GFX7-NEXT: s_lshr_b32 s11, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s6 -; GFX7-NEXT: s_and_b32 s6, s10, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s6 -; GFX7-NEXT: s_lshl_b32 s6, s11, 24 -; GFX7-NEXT: s_lshr_b32 s12, s2, 8 +; GFX7-NEXT: s_lshr_b32 s7, s1, 24 +; GFX7-NEXT: s_and_b32 s6, s1, s10 +; GFX7-NEXT: s_bfe_u32 s1, s1, s13 +; GFX7-NEXT: s_lshl_b32 s11, s11, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s11 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s6, s1 +; GFX7-NEXT: s_lshl_b32 s6, s7, 24 +; GFX7-NEXT: s_bfe_u32 s7, s2, s12 ; GFX7-NEXT: s_or_b32 s1, s1, s6 -; GFX7-NEXT: s_and_b32 s6, s12, s18 -; GFX7-NEXT: s_lshr_b32 s13, s2, 16 -; GFX7-NEXT: s_lshr_b32 s14, s2, 24 -; GFX7-NEXT: s_and_b32 s2, s2, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s2, s2, s6 -; GFX7-NEXT: s_and_b32 s6, s13, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_or_b32 s2, s2, s6 -; GFX7-NEXT: s_lshl_b32 s6, s14, 24 -; GFX7-NEXT: s_lshr_b32 s15, s3, 8 +; GFX7-NEXT: s_lshr_b32 s8, s2, 24 +; GFX7-NEXT: s_and_b32 s6, s2, s10 +; GFX7-NEXT: s_bfe_u32 s2, s2, s13 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s7 +; GFX7-NEXT: s_lshl_b32 s2, s2, 16 +; GFX7-NEXT: s_bfe_u32 s7, s3, s12 +; GFX7-NEXT: s_or_b32 s2, s6, s2 +; GFX7-NEXT: s_lshl_b32 s6, s8, 24 ; GFX7-NEXT: s_or_b32 s2, s2, s6 -; GFX7-NEXT: s_and_b32 s6, s15, s18 -; GFX7-NEXT: s_lshr_b32 s16, s3, 16 -; GFX7-NEXT: s_lshr_b32 s17, s3, 24 -; GFX7-NEXT: s_and_b32 s3, s3, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s3, s3, s6 -; GFX7-NEXT: s_and_b32 s6, s16, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_or_b32 s3, s3, s6 -; GFX7-NEXT: s_lshl_b32 s6, s17, 24 +; GFX7-NEXT: s_lshr_b32 s9, s3, 24 +; GFX7-NEXT: s_and_b32 s6, s3, s10 +; GFX7-NEXT: s_bfe_u32 s3, s3, s13 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s7 +; GFX7-NEXT: s_lshl_b32 s3, s3, 16 +; GFX7-NEXT: s_or_b32 s3, s6, s3 +; GFX7-NEXT: s_lshl_b32 s6, s9, 24 ; GFX7-NEXT: s_or_b32 s3, s3, s6 ; GFX7-NEXT: s_lshr_b32 s6, s5, 2 ; GFX7-NEXT: s_cmp_eq_u32 s6, 1 @@ -4269,9 +4047,9 @@ ; GFX7-NEXT: s_cselect_b32 s7, s3, s7 ; GFX7-NEXT: s_and_b32 s5, s5, 3 ; GFX7-NEXT: s_lshl_b32 s5, s5, 3 -; GFX7-NEXT: s_and_b32 s4, s4, s18 +; GFX7-NEXT: s_and_b32 s4, s4, s10 ; GFX7-NEXT: s_lshl_b32 s4, s4, s5 -; GFX7-NEXT: s_lshl_b32 s5, s18, s5 +; GFX7-NEXT: s_lshl_b32 s5, s10, s5 ; GFX7-NEXT: s_andn2_b32 s5, s7, s5 ; GFX7-NEXT: s_or_b32 s4, s5, s4 ; GFX7-NEXT: s_cmp_eq_u32 s6, 0 @@ -4282,53 +4060,45 @@ ; GFX7-NEXT: s_cselect_b32 s2, s4, s2 ; GFX7-NEXT: s_cmp_eq_u32 s6, 3 ; GFX7-NEXT: s_cselect_b32 s3, s4, s3 -; GFX7-NEXT: s_lshr_b32 s4, s5, 8 -; GFX7-NEXT: s_and_b32 s4, s4, s18 -; GFX7-NEXT: s_lshr_b32 s6, s5, 16 -; GFX7-NEXT: s_lshr_b32 s8, s5, 24 -; GFX7-NEXT: s_and_b32 s5, s5, s18 -; GFX7-NEXT: s_lshl_b32 s4, s4, 8 -; GFX7-NEXT: s_or_b32 s4, s5, s4 -; GFX7-NEXT: s_and_b32 s5, s6, s18 -; GFX7-NEXT: s_lshr_b32 s9, s7, 8 +; GFX7-NEXT: s_bfe_u32 s14, s5, s12 +; GFX7-NEXT: s_lshr_b32 s4, s5, 24 +; GFX7-NEXT: s_and_b32 s11, s5, s10 +; GFX7-NEXT: s_bfe_u32 s5, s5, s13 +; GFX7-NEXT: s_lshl_b32 s14, s14, 8 +; GFX7-NEXT: s_or_b32 s11, s11, s14 ; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_and_b32 s6, s9, s18 -; GFX7-NEXT: s_or_b32 s4, s4, s5 -; GFX7-NEXT: s_lshl_b32 s5, s8, 24 -; GFX7-NEXT: s_or_b32 s4, s4, s5 -; GFX7-NEXT: s_lshr_b32 s10, s7, 16 -; GFX7-NEXT: s_and_b32 s5, s7, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s5, s5, s6 -; GFX7-NEXT: s_and_b32 s6, s10, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_lshr_b32 s11, s7, 24 -; GFX7-NEXT: s_or_b32 s5, s5, s6 -; GFX7-NEXT: s_lshl_b32 s6, s11, 24 -; GFX7-NEXT: s_lshr_b32 s12, s2, 8 +; GFX7-NEXT: s_or_b32 s5, s11, s5 +; GFX7-NEXT: s_lshl_b32 s4, s4, 24 +; GFX7-NEXT: s_bfe_u32 s11, s7, s12 +; GFX7-NEXT: s_lshr_b32 s6, s7, 24 +; GFX7-NEXT: s_or_b32 s4, s5, s4 +; GFX7-NEXT: s_and_b32 s5, s7, s10 +; GFX7-NEXT: s_bfe_u32 s7, s7, s13 +; GFX7-NEXT: s_lshl_b32 s11, s11, 8 +; GFX7-NEXT: s_or_b32 s5, s5, s11 +; GFX7-NEXT: s_lshl_b32 s7, s7, 16 +; GFX7-NEXT: s_or_b32 s5, s5, s7 +; GFX7-NEXT: s_bfe_u32 s7, s2, s12 +; GFX7-NEXT: s_lshl_b32 s6, s6, 24 ; GFX7-NEXT: s_or_b32 s5, s5, s6 -; GFX7-NEXT: s_and_b32 s6, s12, s18 -; GFX7-NEXT: s_lshr_b32 s13, s2, 16 -; GFX7-NEXT: s_lshr_b32 s14, s2, 24 -; GFX7-NEXT: s_and_b32 s2, s2, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 8 -; GFX7-NEXT: s_or_b32 s2, s2, s6 -; GFX7-NEXT: s_and_b32 s6, s13, s18 -; GFX7-NEXT: s_lshl_b32 s6, s6, 16 -; GFX7-NEXT: s_or_b32 s2, s2, s6 -; GFX7-NEXT: s_lshl_b32 s6, s14, 24 -; GFX7-NEXT: s_lshr_b32 s15, s3, 8 +; GFX7-NEXT: s_lshr_b32 s8, s2, 24 +; GFX7-NEXT: s_and_b32 s6, s2, s10 +; GFX7-NEXT: s_bfe_u32 s2, s2, s13 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s6, s6, s7 +; GFX7-NEXT: s_lshl_b32 s2, s2, 16 +; GFX7-NEXT: s_bfe_u32 s7, s3, s12 +; GFX7-NEXT: s_or_b32 s2, s6, s2 +; GFX7-NEXT: s_lshl_b32 s6, s8, 24 ; GFX7-NEXT: s_or_b32 s6, s2, s6 -; GFX7-NEXT: s_lshr_b32 s16, s3, 16 -; GFX7-NEXT: s_lshr_b32 s17, s3, 24 -; GFX7-NEXT: s_and_b32 s2, s3, s18 -; GFX7-NEXT: s_and_b32 s3, s15, s18 -; GFX7-NEXT: s_lshl_b32 s3, s3, 8 -; GFX7-NEXT: s_or_b32 s2, s2, s3 -; GFX7-NEXT: s_and_b32 s3, s16, s18 +; GFX7-NEXT: s_lshr_b32 s9, s3, 24 +; GFX7-NEXT: s_and_b32 s2, s3, s10 +; GFX7-NEXT: s_bfe_u32 s3, s3, s13 +; GFX7-NEXT: s_lshl_b32 s7, s7, 8 +; GFX7-NEXT: s_or_b32 s2, s2, s7 ; GFX7-NEXT: s_lshl_b32 s3, s3, 16 ; GFX7-NEXT: s_or_b32 s2, s2, s3 -; GFX7-NEXT: s_lshl_b32 s3, s17, 24 +; GFX7-NEXT: s_lshl_b32 s3, s9, 24 ; GFX7-NEXT: s_or_b32 s7, s2, s3 ; GFX7-NEXT: v_mov_b32_e32 v0, s4 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 @@ -4343,127 +4113,113 @@ ; GFX10-LABEL: insertelement_s_v16i8_s_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s7, 0x80008 ; GFX10-NEXT: s_movk_i32 s6, 0xff +; GFX10-NEXT: s_mov_b32 s8, 0x80010 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s7, s0, 8 -; GFX10-NEXT: s_lshr_b32 s8, s0, 16 -; GFX10-NEXT: s_and_b32 s7, s7, s6 +; GFX10-NEXT: s_bfe_u32 s14, s0, s7 ; GFX10-NEXT: s_lshr_b32 s9, s0, 24 -; GFX10-NEXT: s_and_b32 s8, s8, s6 -; GFX10-NEXT: s_and_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_lshr_b32 s13, s2, 8 -; GFX10-NEXT: s_lshl_b32 s8, s8, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_lshr_b32 s10, s1, 8 -; GFX10-NEXT: s_or_b32 s0, s0, s8 -; GFX10-NEXT: s_and_b32 s8, s13, s6 +; GFX10-NEXT: s_bfe_u32 s16, s1, s7 +; GFX10-NEXT: s_and_b32 s13, s0, s6 +; GFX10-NEXT: s_bfe_u32 s0, s0, s8 +; GFX10-NEXT: s_lshl_b32 s14, s14, 8 +; GFX10-NEXT: s_lshr_b32 s10, s1, 24 +; GFX10-NEXT: s_and_b32 s15, s1, s6 +; GFX10-NEXT: s_bfe_u32 s1, s1, s8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s13, s13, s14 +; GFX10-NEXT: s_lshl_b32 s16, s16, 8 +; GFX10-NEXT: s_bfe_u32 s18, s2, s7 ; GFX10-NEXT: s_lshl_b32 s9, s9, 24 -; GFX10-NEXT: s_lshr_b32 s14, s2, 16 -; GFX10-NEXT: s_lshr_b32 s11, s1, 16 -; GFX10-NEXT: s_and_b32 s10, s10, s6 -; GFX10-NEXT: s_lshr_b32 s15, s2, 24 +; GFX10-NEXT: s_or_b32 s0, s13, s0 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s14, s15, s16 ; GFX10-NEXT: s_or_b32 s0, s0, s9 -; GFX10-NEXT: s_and_b32 s2, s2, s6 -; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_and_b32 s9, s14, s6 -; GFX10-NEXT: s_lshr_b32 s12, s1, 24 -; GFX10-NEXT: s_and_b32 s11, s11, s6 -; GFX10-NEXT: s_or_b32 s2, s2, s8 -; GFX10-NEXT: s_lshl_b32 s8, s9, 16 -; GFX10-NEXT: s_lshr_b32 s16, s3, 8 -; GFX10-NEXT: s_and_b32 s1, s1, s6 -; GFX10-NEXT: s_lshl_b32 s10, s10, 8 -; GFX10-NEXT: s_lshr_b32 s17, s3, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s8 -; GFX10-NEXT: s_and_b32 s8, s16, s6 -; GFX10-NEXT: s_lshl_b32 s7, s11, 16 +; GFX10-NEXT: s_lshr_b32 s11, s2, 24 +; GFX10-NEXT: s_and_b32 s17, s2, s6 +; GFX10-NEXT: s_lshl_b32 s9, s18, 8 +; GFX10-NEXT: s_bfe_u32 s2, s2, s8 +; GFX10-NEXT: s_lshl_b32 s10, s10, 24 +; GFX10-NEXT: s_or_b32 s1, s14, s1 +; GFX10-NEXT: s_or_b32 s9, s17, s9 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 ; GFX10-NEXT: s_or_b32 s1, s1, s10 -; GFX10-NEXT: s_lshr_b32 s18, s3, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshl_b32 s7, s12, 24 -; GFX10-NEXT: s_and_b32 s3, s3, s6 -; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_and_b32 s9, s17, s6 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshl_b32 s7, s15, 24 -; GFX10-NEXT: s_or_b32 s3, s3, s8 -; GFX10-NEXT: s_lshl_b32 s8, s9, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_or_b32 s3, s3, s8 -; GFX10-NEXT: s_lshl_b32 s7, s18, 24 -; GFX10-NEXT: s_lshr_b32 s8, s5, 2 -; GFX10-NEXT: s_or_b32 s3, s3, s7 -; GFX10-NEXT: s_cmp_eq_u32 s8, 1 -; GFX10-NEXT: s_cselect_b32 s7, s1, s0 -; GFX10-NEXT: s_cmp_eq_u32 s8, 2 -; GFX10-NEXT: s_cselect_b32 s7, s2, s7 -; GFX10-NEXT: s_cmp_eq_u32 s8, 3 -; GFX10-NEXT: s_cselect_b32 s7, s3, s7 +; GFX10-NEXT: s_bfe_u32 s10, s3, s7 +; GFX10-NEXT: s_or_b32 s2, s9, s2 +; GFX10-NEXT: s_lshl_b32 s9, s11, 24 +; GFX10-NEXT: s_lshr_b32 s12, s3, 24 +; GFX10-NEXT: s_and_b32 s11, s3, s6 +; GFX10-NEXT: s_lshl_b32 s10, s10, 8 +; GFX10-NEXT: s_bfe_u32 s3, s3, s8 +; GFX10-NEXT: s_or_b32 s10, s11, s10 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s2, s2, s9 +; GFX10-NEXT: s_or_b32 s3, s10, s3 +; GFX10-NEXT: s_lshl_b32 s9, s12, 24 +; GFX10-NEXT: s_lshr_b32 s10, s5, 2 +; GFX10-NEXT: s_or_b32 s3, s3, s9 +; GFX10-NEXT: s_cmp_eq_u32 s10, 1 +; GFX10-NEXT: s_cselect_b32 s9, s1, s0 +; GFX10-NEXT: s_cmp_eq_u32 s10, 2 +; GFX10-NEXT: s_cselect_b32 s9, s2, s9 +; GFX10-NEXT: s_cmp_eq_u32 s10, 3 +; GFX10-NEXT: s_cselect_b32 s9, s3, s9 ; GFX10-NEXT: s_and_b32 s5, s5, 3 ; GFX10-NEXT: s_and_b32 s4, s4, s6 ; GFX10-NEXT: s_lshl_b32 s5, s5, 3 -; GFX10-NEXT: s_lshl_b32 s9, s6, s5 +; GFX10-NEXT: s_lshl_b32 s11, s6, s5 ; GFX10-NEXT: s_lshl_b32 s4, s4, s5 -; GFX10-NEXT: s_andn2_b32 s5, s7, s9 +; GFX10-NEXT: s_andn2_b32 s5, s9, s11 ; GFX10-NEXT: s_or_b32 s4, s5, s4 -; GFX10-NEXT: s_cmp_eq_u32 s8, 0 +; GFX10-NEXT: s_cmp_eq_u32 s10, 0 ; GFX10-NEXT: s_cselect_b32 s0, s4, s0 -; GFX10-NEXT: s_cmp_eq_u32 s8, 1 +; GFX10-NEXT: s_cmp_eq_u32 s10, 1 ; GFX10-NEXT: s_cselect_b32 s1, s4, s1 -; GFX10-NEXT: s_cmp_eq_u32 s8, 2 +; GFX10-NEXT: s_cmp_eq_u32 s10, 2 ; GFX10-NEXT: s_cselect_b32 s2, s4, s2 -; GFX10-NEXT: s_cmp_eq_u32 s8, 3 +; GFX10-NEXT: s_cmp_eq_u32 s10, 3 ; GFX10-NEXT: s_cselect_b32 s3, s4, s3 -; GFX10-NEXT: s_lshr_b32 s4, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s0, 16 -; GFX10-NEXT: s_and_b32 s4, s4, s6 -; GFX10-NEXT: s_lshr_b32 s7, s0, 24 -; GFX10-NEXT: s_and_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_and_b32 s5, s5, s6 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_lshl_b32 s4, s5, 16 -; GFX10-NEXT: s_lshr_b32 s8, s1, 8 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_lshl_b32 s4, s7, 24 -; GFX10-NEXT: s_and_b32 s7, s8, s6 -; GFX10-NEXT: s_lshr_b32 s9, s1, 16 -; GFX10-NEXT: s_lshr_b32 s10, s1, 24 -; GFX10-NEXT: s_and_b32 s1, s1, s6 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_and_b32 s8, s9, s6 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshl_b32 s7, s8, 16 +; GFX10-NEXT: s_bfe_u32 s10, s0, s7 +; GFX10-NEXT: s_lshr_b32 s4, s0, 24 +; GFX10-NEXT: s_and_b32 s11, s0, s6 +; GFX10-NEXT: s_lshl_b32 s10, s10, 8 +; GFX10-NEXT: s_bfe_u32 s0, s0, s8 +; GFX10-NEXT: s_or_b32 s10, s11, s10 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_lshr_b32 s5, s1, 24 +; GFX10-NEXT: s_or_b32 s0, s10, s0 +; GFX10-NEXT: s_bfe_u32 s10, s1, s7 +; GFX10-NEXT: s_lshl_b32 s4, s4, 24 +; GFX10-NEXT: s_and_b32 s12, s1, s6 +; GFX10-NEXT: s_lshl_b32 s10, s10, 8 +; GFX10-NEXT: s_bfe_u32 s1, s1, s8 ; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_lshr_b32 s11, s2, 8 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshl_b32 s4, s10, 24 -; GFX10-NEXT: s_lshr_b32 s14, s3, 8 -; GFX10-NEXT: s_and_b32 s7, s11, s6 -; GFX10-NEXT: s_lshr_b32 s12, s2, 16 +; GFX10-NEXT: s_lshl_b32 s4, s5, 24 +; GFX10-NEXT: s_bfe_u32 s5, s2, s7 +; GFX10-NEXT: s_or_b32 s10, s12, s10 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_lshr_b32 s9, s2, 24 +; GFX10-NEXT: s_or_b32 s1, s10, s1 +; GFX10-NEXT: s_and_b32 s10, s2, s6 +; GFX10-NEXT: s_lshl_b32 s5, s5, 8 +; GFX10-NEXT: s_bfe_u32 s2, s2, s8 ; GFX10-NEXT: s_or_b32 s1, s1, s4 -; GFX10-NEXT: s_and_b32 s4, s14, s6 -; GFX10-NEXT: s_lshr_b32 s15, s3, 16 -; GFX10-NEXT: s_lshr_b32 s13, s2, 24 -; GFX10-NEXT: s_lshr_b32 s5, s3, 24 -; GFX10-NEXT: s_and_b32 s2, s2, s6 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_and_b32 s8, s12, s6 -; GFX10-NEXT: s_and_b32 s3, s3, s6 +; GFX10-NEXT: s_bfe_u32 s4, s3, s7 +; GFX10-NEXT: s_or_b32 s5, s10, s5 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 +; GFX10-NEXT: s_lshr_b32 s11, s3, 24 +; GFX10-NEXT: s_or_b32 s2, s5, s2 +; GFX10-NEXT: s_and_b32 s5, s3, s6 ; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_and_b32 s6, s15, s6 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_lshl_b32 s7, s8, 16 -; GFX10-NEXT: s_or_b32 s3, s3, s4 -; GFX10-NEXT: s_lshl_b32 s4, s6, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_lshl_b32 s6, s13, 24 -; GFX10-NEXT: s_or_b32 s3, s3, s4 -; GFX10-NEXT: s_lshl_b32 s4, s5, 24 -; GFX10-NEXT: s_or_b32 s2, s2, s6 +; GFX10-NEXT: s_bfe_u32 s3, s3, s8 +; GFX10-NEXT: s_or_b32 s4, s5, s4 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_lshl_b32 s5, s9, 24 +; GFX10-NEXT: s_or_b32 s3, s4, s3 +; GFX10-NEXT: s_lshl_b32 s4, s11, 24 +; GFX10-NEXT: s_or_b32 s2, s2, s5 ; GFX10-NEXT: s_or_b32 s3, s3, s4 ; GFX10-NEXT: v_mov_b32_e32 v0, s0 ; GFX10-NEXT: v_mov_b32_e32 v1, s1 @@ -4482,86 +4238,80 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_mov_b32_e32 v4, 8 ; GFX9-NEXT: s_movk_i32 s6, 0xff -; GFX9-NEXT: s_and_b32 s1, s3, 3 +; GFX9-NEXT: v_mov_b32_e32 v5, 16 ; GFX9-NEXT: s_lshr_b32 s4, s3, 2 +; GFX9-NEXT: s_and_b32 s3, s3, 3 ; GFX9-NEXT: s_and_b32 s2, s2, s6 -; GFX9-NEXT: s_lshl_b32 s1, s1, 3 -; GFX9-NEXT: s_lshl_b32 s2, s2, s1 -; GFX9-NEXT: s_lshl_b32 s1, s6, s1 +; GFX9-NEXT: s_lshl_b32 s3, s3, 3 ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 -; GFX9-NEXT: s_not_b32 s5, s1 -; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: s_lshl_b32 s2, s2, s3 +; GFX9-NEXT: s_lshl_b32 s3, s6, s3 +; GFX9-NEXT: s_not_b32 s5, s3 +; GFX9-NEXT: v_mov_b32_e32 v6, s2 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v1 ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, s0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v3 -; GFX9-NEXT: v_and_b32_sdwa v14, v0, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v15, v1, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v0, v0, s6, v11 ; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_or_b32 v0, v0, s6, v6 +; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v17, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v2, v2, s6, v15 ; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v8 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v3 -; GFX9-NEXT: v_and_b32_sdwa v16, v2, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_and_or_b32 v2, v2, s6, v10 -; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v4, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or3_b32 v0, v0, v14, v7 -; GFX9-NEXT: v_or3_b32 v1, v1, v15, v9 -; GFX9-NEXT: v_and_b32_sdwa v17, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v12 -; GFX9-NEXT: v_or3_b32 v2, v2, v16, v11 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc +; GFX9-NEXT: v_or3_b32 v0, v0, v12, v7 +; GFX9-NEXT: v_or3_b32 v1, v1, v14, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v18, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_or3_b32 v2, v2, v16, v9 +; GFX9-NEXT: v_cndmask_b32_e32 v7, v0, v1, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s4, 2 -; GFX9-NEXT: v_or3_b32 v3, v3, v17, v13 -; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v2, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v3, s[2:3] -; GFX9-NEXT: v_and_or_b32 v5, v6, s5, v5 +; GFX9-NEXT: v_or3_b32 v3, v3, v18, v10 +; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v3, s[2:3] +; GFX9-NEXT: v_and_or_b32 v6, v7, s5, v6 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] -; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v3 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v6, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v6, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v6, s[2:3] +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v13, v0, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v14, v1, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v15, v2, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v16, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX9-NEXT: v_and_or_b32 v0, v0, s6, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v4 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v12 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v9 +; GFX9-NEXT: v_or3_b32 v3, v3, v5, v4 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_and_or_b32 v0, v0, s6, v10 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v12 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX9-NEXT: v_and_or_b32 v2, v2, s6, v14 ; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX9-NEXT: v_and_or_b32 v2, v2, s6, v9 -; GFX9-NEXT: v_or3_b32 v0, v0, v13, v6 -; GFX9-NEXT: v_or3_b32 v1, v1, v14, v8 -; GFX9-NEXT: v_or3_b32 v2, v2, v15, v10 -; GFX9-NEXT: v_or3_b32 v3, v3, v16, v11 +; GFX9-NEXT: v_or3_b32 v0, v0, v11, v6 +; GFX9-NEXT: v_or3_b32 v1, v1, v13, v7 +; GFX9-NEXT: v_or3_b32 v2, v2, v15, v8 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm @@ -4569,11 +4319,12 @@ ; GFX8-LABEL: insertelement_v_v16i8_s_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_mov_b32_e32 v5, 8 -; GFX8-NEXT: v_mov_b32_e32 v6, s0 +; GFX8-NEXT: v_mov_b32_e32 v6, 8 +; GFX8-NEXT: v_mov_b32_e32 v5, 16 +; GFX8-NEXT: v_mov_b32_e32 v7, 16 ; GFX8-NEXT: s_and_b32 s1, s3, 3 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_lshr_b32 s4, s3, 2 ; GFX8-NEXT: s_lshl_b32 s1, s1, 3 ; GFX8-NEXT: s_and_b32 s2, s2, s0 @@ -4584,81 +4335,73 @@ ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s4, 2 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 8, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v8 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v5, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v2 -; GFX8-NEXT: v_and_b32_sdwa v15, v0, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v16, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v9 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v13 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v10 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 8, v3 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v15 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v16 -; GFX8-NEXT: v_and_b32_sdwa v17, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v17 -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v18, v3, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v3, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v14 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v18 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v11 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v15 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v11 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v17 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v9 ; GFX8-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc -; GFX8-NEXT: v_or_b32_e32 v3, v3, v13 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v10 ; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v2, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v3, s[2:3] ; GFX8-NEXT: v_and_b32_e32 v4, s6, v4 ; GFX8-NEXT: v_or_b32_e32 v4, s5, v4 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v4, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v4, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v5, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v3 -; GFX8-NEXT: v_and_b32_sdwa v14, v0, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v15, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v16, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v6, v3, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v6, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v8, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v15 +; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v9 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v7 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 ; GFX8-NEXT: v_mov_b32_e32 v4, 0 -; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v14 -; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v15 -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v16 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v8 ; GFX8-NEXT: v_or_b32_e32 v3, v3, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v11 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v12 ; GFX8-NEXT: v_mov_b32_e32 v5, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NEXT: s_endpgm @@ -4682,111 +4425,95 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX7-NEXT: s_mov_b32 s10, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v4 -; GFX7-NEXT: v_and_b32_e32 v7, s6, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v3 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_and_b32_e32 v8, s6, v8 -; GFX7-NEXT: v_and_b32_e32 v10, s6, v10 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 8, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v3 -; GFX7-NEXT: v_and_b32_e32 v11, s6, v11 -; GFX7-NEXT: v_and_b32_e32 v13, s6, v13 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v8 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v14, s6, v14 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v11, v1, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_bfe_u32 v13, v2, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v8, s6, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX7-NEXT: v_bfe_u32 v15, v3, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v12, s6, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v9, v10, v11 ; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v14, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v15, 8, v15 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v9, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v10, v12, v13 ; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v10 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 24, v12 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v13 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 24, v15 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v14 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v12 +; GFX7-NEXT: v_or_b32_e32 v2, v10, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v11, v14, v15 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_or_b32_e32 v3, v11, v3 +; GFX7-NEXT: v_or_b32_e32 v2, v2, v6 ; GFX7-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc -; GFX7-NEXT: v_or_b32_e32 v3, v3, v15 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v7 ; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v2, s[0:1] ; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v3, s[2:3] ; GFX7-NEXT: v_and_b32_e32 v4, s7, v4 ; GFX7-NEXT: v_or_b32_e32 v4, s5, v4 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 -; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc +; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v4, s[0:1] +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v11, v1, 8, 8 ; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v4, s[2:3] -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_bfe_u32 v13, v2, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v8, s6, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v12, s6, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v9, v10, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v9, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_or_b32_e32 v10, v12, v13 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v3 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX7-NEXT: v_bfe_u32 v5, v3, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v10, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v13 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v4, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; GFX7-NEXT: s_endpgm @@ -4794,86 +4521,80 @@ ; GFX10-LABEL: insertelement_v_v16i8_s_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v4, 8 ; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: v_mov_b32_e32 v4, 8 +; GFX10-NEXT: s_mov_b32 s1, 16 ; GFX10-NEXT: s_movk_i32 s4, 0xff +; GFX10-NEXT: v_mov_b32_e32 v5, 16 ; GFX10-NEXT: s_lshr_b32 s5, s3, 2 -; GFX10-NEXT: s_and_b32 s1, s3, 3 -; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s5, 1 -; GFX10-NEXT: s_lshl_b32 s3, s1, 3 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, s5, 3 ; GFX10-NEXT: s_and_b32 s2, s2, s4 -; GFX10-NEXT: s_lshl_b32 s6, s4, s3 -; GFX10-NEXT: s_lshl_b32 s2, s2, s3 -; GFX10-NEXT: s_not_b32 s3, s6 +; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s5, 1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v1 ; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, s0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v13, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v14, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v5 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v10 ; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v7 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v3 +; GFX10-NEXT: v_or3_b32 v0, v0, v11, v6 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_or3_b32 v1, v1, v13, v7 +; GFX10-NEXT: v_and_or_b32 v2, v2, s4, v14 ; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v15, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX10-NEXT: v_or3_b32 v0, v0, v13, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v10 -; GFX10-NEXT: v_and_or_b32 v2, v2, s4, v9 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v4, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_or3_b32 v1, v1, v14, v8 -; GFX10-NEXT: v_and_b32_sdwa v16, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v12 -; GFX10-NEXT: v_or3_b32 v2, v2, v15, v5 -; GFX10-NEXT: v_and_or_b32 v3, v3, s4, v7 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v3, v3, s4, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v9 +; GFX10-NEXT: v_cndmask_b32_e32 v7, v0, v1, vcc_lo +; GFX10-NEXT: v_or3_b32 v2, v2, v15, v8 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s5, 2 -; GFX10-NEXT: v_or3_b32 v3, v3, v16, v6 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v2, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v3, s1 -; GFX10-NEXT: v_and_or_b32 v5, v5, s3, s2 +; GFX10-NEXT: s_and_b32 s1, s3, 3 +; GFX10-NEXT: v_or3_b32 v3, v3, v10, v6 +; GFX10-NEXT: s_lshl_b32 s3, s1, 3 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, s5, 3 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v7, v2, s0 +; GFX10-NEXT: s_lshl_b32 s6, s4, s3 +; GFX10-NEXT: s_lshl_b32 s2, s2, s3 +; GFX10-NEXT: s_not_b32 s3, s6 +; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v3, s1 +; GFX10-NEXT: v_and_or_b32 v6, v6, s3, s2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s2, s5, 0 -; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v5, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v5, s1 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v5, s2 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v0 +; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v6, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v6, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v6, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v6, s1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v4, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v13, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v14, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v15, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v16, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v5 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v5, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v3, v3, s4, v4 -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v9 +; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX10-NEXT: v_and_or_b32 v2, v2, s4, v14 ; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_and_or_b32 v1, v1, s4, v7 -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_and_or_b32 v2, v2, s4, v9 +; GFX10-NEXT: v_or3_b32 v3, v3, v5, v4 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 -; GFX10-NEXT: v_or3_b32 v0, v0, v13, v6 -; GFX10-NEXT: v_or3_b32 v1, v1, v14, v8 -; GFX10-NEXT: v_or3_b32 v3, v3, v16, v11 -; GFX10-NEXT: v_or3_b32 v2, v2, v15, v10 +; GFX10-NEXT: v_or3_b32 v0, v0, v11, v6 +; GFX10-NEXT: v_or3_b32 v1, v1, v13, v7 +; GFX10-NEXT: v_or3_b32 v2, v2, v15, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -4887,111 +4608,103 @@ ; GFX9-LABEL: insertelement_s_v16i8_v_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s18, 0xff -; GFX9-NEXT: v_and_b32_e32 v0, s18, v0 +; GFX9-NEXT: s_mov_b32 s13, 0x80008 +; GFX9-NEXT: s_movk_i32 s11, 0xff +; GFX9-NEXT: v_and_b32_e32 v0, s11, v0 ; GFX9-NEXT: s_mov_b32 s5, 8 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s6, s0, 8 -; GFX9-NEXT: s_and_b32 s6, s6, s18 -; GFX9-NEXT: s_lshr_b32 s7, s0, 16 -; GFX9-NEXT: s_lshr_b32 s8, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_and_b32 s6, s7, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_lshl_b32 s6, s8, 24 -; GFX9-NEXT: s_lshr_b32 s9, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s6 -; GFX9-NEXT: s_and_b32 s6, s9, s18 -; GFX9-NEXT: s_lshr_b32 s10, s1, 16 -; GFX9-NEXT: s_lshr_b32 s11, s1, 24 -; GFX9-NEXT: s_and_b32 s1, s1, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_and_b32 s6, s10, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_lshl_b32 s6, s11, 24 -; GFX9-NEXT: s_lshr_b32 s12, s2, 8 -; GFX9-NEXT: s_or_b32 s1, s1, s6 -; GFX9-NEXT: s_and_b32 s6, s12, s18 -; GFX9-NEXT: s_lshr_b32 s13, s2, 16 -; GFX9-NEXT: s_lshr_b32 s14, s2, 24 -; GFX9-NEXT: s_and_b32 s2, s2, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_and_b32 s6, s13, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_lshl_b32 s6, s14, 24 -; GFX9-NEXT: s_lshr_b32 s15, s3, 8 -; GFX9-NEXT: s_or_b32 s2, s2, s6 -; GFX9-NEXT: s_and_b32 s6, s15, s18 -; GFX9-NEXT: s_lshr_b32 s16, s3, 16 -; GFX9-NEXT: s_lshr_b32 s17, s3, 24 -; GFX9-NEXT: s_and_b32 s3, s3, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 8 -; GFX9-NEXT: s_or_b32 s3, s3, s6 -; GFX9-NEXT: s_and_b32 s6, s16, s18 -; GFX9-NEXT: s_lshl_b32 s6, s6, 16 -; GFX9-NEXT: s_or_b32 s3, s3, s6 -; GFX9-NEXT: s_lshl_b32 s6, s17, 24 -; GFX9-NEXT: s_or_b32 s3, s3, s6 -; GFX9-NEXT: s_lshr_b32 s6, s4, 2 -; GFX9-NEXT: s_cmp_eq_u32 s6, 1 -; GFX9-NEXT: s_cselect_b32 s7, s1, s0 -; GFX9-NEXT: s_cmp_eq_u32 s6, 2 -; GFX9-NEXT: s_cselect_b32 s7, s2, s7 -; GFX9-NEXT: s_cmp_eq_u32 s6, 3 -; GFX9-NEXT: s_cselect_b32 s7, s3, s7 +; GFX9-NEXT: s_bfe_u32 s14, s0, s13 +; GFX9-NEXT: s_and_b32 s12, s0, s11 +; GFX9-NEXT: s_lshl_b32 s14, s14, 8 +; GFX9-NEXT: s_or_b32 s12, s12, s14 +; GFX9-NEXT: s_mov_b32 s14, 0x80010 +; GFX9-NEXT: s_lshr_b32 s7, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s14 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s12, s0 +; GFX9-NEXT: s_bfe_u32 s12, s1, s13 +; GFX9-NEXT: s_lshl_b32 s7, s7, 24 +; GFX9-NEXT: s_or_b32 s0, s0, s7 +; GFX9-NEXT: s_lshr_b32 s8, s1, 24 +; GFX9-NEXT: s_and_b32 s7, s1, s11 +; GFX9-NEXT: s_bfe_u32 s1, s1, s14 +; GFX9-NEXT: s_lshl_b32 s12, s12, 8 +; GFX9-NEXT: s_or_b32 s7, s7, s12 +; GFX9-NEXT: s_lshl_b32 s1, s1, 16 +; GFX9-NEXT: s_or_b32 s1, s7, s1 +; GFX9-NEXT: s_lshl_b32 s7, s8, 24 +; GFX9-NEXT: s_bfe_u32 s8, s2, s13 +; GFX9-NEXT: s_or_b32 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s9, s2, 24 +; GFX9-NEXT: s_and_b32 s7, s2, s11 +; GFX9-NEXT: s_bfe_u32 s2, s2, s14 +; GFX9-NEXT: s_lshl_b32 s8, s8, 8 +; GFX9-NEXT: s_or_b32 s7, s7, s8 +; GFX9-NEXT: s_lshl_b32 s2, s2, 16 +; GFX9-NEXT: s_bfe_u32 s8, s3, s13 +; GFX9-NEXT: s_or_b32 s2, s7, s2 +; GFX9-NEXT: s_lshl_b32 s7, s9, 24 +; GFX9-NEXT: s_or_b32 s2, s2, s7 +; GFX9-NEXT: s_lshr_b32 s10, s3, 24 +; GFX9-NEXT: s_and_b32 s7, s3, s11 +; GFX9-NEXT: s_bfe_u32 s3, s3, s14 +; GFX9-NEXT: s_lshl_b32 s8, s8, 8 +; GFX9-NEXT: s_or_b32 s7, s7, s8 +; GFX9-NEXT: s_lshl_b32 s3, s3, 16 +; GFX9-NEXT: s_or_b32 s3, s7, s3 +; GFX9-NEXT: s_lshl_b32 s7, s10, 24 +; GFX9-NEXT: s_or_b32 s3, s3, s7 +; GFX9-NEXT: s_lshr_b32 s7, s4, 2 +; GFX9-NEXT: s_cmp_eq_u32 s7, 1 +; GFX9-NEXT: s_cselect_b32 s8, s1, s0 +; GFX9-NEXT: s_cmp_eq_u32 s7, 2 +; GFX9-NEXT: s_cselect_b32 s8, s2, s8 +; GFX9-NEXT: s_cmp_eq_u32 s7, 3 +; GFX9-NEXT: s_cselect_b32 s8, s3, s8 ; GFX9-NEXT: s_and_b32 s4, s4, 3 ; GFX9-NEXT: s_lshl_b32 s4, s4, 3 -; GFX9-NEXT: s_lshl_b32 s8, s18, s4 -; GFX9-NEXT: s_andn2_b32 s7, s7, s8 -; GFX9-NEXT: v_mov_b32_e32 v1, s7 +; GFX9-NEXT: s_lshl_b32 s9, s11, s4 +; GFX9-NEXT: s_andn2_b32 s8, s8, s9 +; GFX9-NEXT: v_mov_b32_e32 v1, s8 ; GFX9-NEXT: v_lshl_or_b32 v4, v0, s4, v1 ; GFX9-NEXT: v_mov_b32_e32 v0, s0 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s6, 0 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s7, 0 ; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc ; GFX9-NEXT: v_mov_b32_e32 v1, s1 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s6, 1 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s7, 1 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc ; GFX9-NEXT: v_mov_b32_e32 v2, s2 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s6, 2 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s7, 2 ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GFX9-NEXT: v_mov_b32_e32 v3, s3 -; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s6, 3 +; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s7, 3 ; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v4, v0, s18, v4 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s6, 16 +; GFX9-NEXT: v_and_or_b32 v8, v0, s11, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_or3_b32 v0, v8, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v4, v1, s11, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_or3_b32 v0, v4, v0, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX9-NEXT: v_or3_b32 v1, v4, v1, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 8 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v5, v1, s18, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX9-NEXT: v_or3_b32 v1, v5, v1, v6 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v5, v2, s18, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v3 -; GFX9-NEXT: v_and_b32_sdwa v2, v2, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v9 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v8, 16 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v5, v2, s11, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX9-NEXT: v_or3_b32 v2, v5, v2, v6 -; GFX9-NEXT: v_and_or_b32 v4, v3, s18, v4 -; GFX9-NEXT: v_and_b32_sdwa v3, v3, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v11 +; GFX9-NEXT: v_and_or_b32 v4, v3, s11, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v7 ; GFX9-NEXT: v_or3_b32 v3, v4, v3, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 @@ -5001,56 +4714,51 @@ ; GFX8-LABEL: insertelement_s_v16i8_v_s: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s17, 0xff -; GFX8-NEXT: v_mov_b32_e32 v12, 8 +; GFX8-NEXT: s_mov_b32 s11, 0x80008 +; GFX8-NEXT: s_movk_i32 s9, 0xff +; GFX8-NEXT: v_mov_b32_e32 v8, 8 +; GFX8-NEXT: v_mov_b32_e32 v10, 16 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s5, s0, 8 -; GFX8-NEXT: s_and_b32 s5, s5, s17 -; GFX8-NEXT: s_lshr_b32 s6, s0, 16 -; GFX8-NEXT: s_lshr_b32 s7, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s5 -; GFX8-NEXT: s_and_b32 s5, s6, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s5 -; GFX8-NEXT: s_lshl_b32 s5, s7, 24 -; GFX8-NEXT: s_lshr_b32 s8, s1, 8 +; GFX8-NEXT: s_bfe_u32 s12, s0, s11 +; GFX8-NEXT: s_and_b32 s10, s0, s9 +; GFX8-NEXT: s_lshl_b32 s12, s12, 8 +; GFX8-NEXT: s_or_b32 s10, s10, s12 +; GFX8-NEXT: s_mov_b32 s12, 0x80010 +; GFX8-NEXT: s_lshr_b32 s5, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s12 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s10, s0 +; GFX8-NEXT: s_bfe_u32 s10, s1, s11 +; GFX8-NEXT: s_lshl_b32 s5, s5, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s5 -; GFX8-NEXT: s_and_b32 s5, s8, s17 -; GFX8-NEXT: s_lshr_b32 s9, s1, 16 -; GFX8-NEXT: s_lshr_b32 s10, s1, 24 -; GFX8-NEXT: s_and_b32 s1, s1, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 8 -; GFX8-NEXT: s_or_b32 s1, s1, s5 -; GFX8-NEXT: s_and_b32 s5, s9, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 16 -; GFX8-NEXT: s_or_b32 s1, s1, s5 -; GFX8-NEXT: s_lshl_b32 s5, s10, 24 -; GFX8-NEXT: s_lshr_b32 s11, s2, 8 +; GFX8-NEXT: s_lshr_b32 s6, s1, 24 +; GFX8-NEXT: s_and_b32 s5, s1, s9 +; GFX8-NEXT: s_bfe_u32 s1, s1, s12 +; GFX8-NEXT: s_lshl_b32 s10, s10, 8 +; GFX8-NEXT: s_or_b32 s5, s5, s10 +; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: s_or_b32 s1, s5, s1 +; GFX8-NEXT: s_lshl_b32 s5, s6, 24 +; GFX8-NEXT: s_bfe_u32 s6, s2, s11 ; GFX8-NEXT: s_or_b32 s1, s1, s5 -; GFX8-NEXT: s_and_b32 s5, s11, s17 -; GFX8-NEXT: s_lshr_b32 s12, s2, 16 -; GFX8-NEXT: s_lshr_b32 s13, s2, 24 -; GFX8-NEXT: s_and_b32 s2, s2, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 8 -; GFX8-NEXT: s_or_b32 s2, s2, s5 -; GFX8-NEXT: s_and_b32 s5, s12, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 16 -; GFX8-NEXT: s_or_b32 s2, s2, s5 -; GFX8-NEXT: s_lshl_b32 s5, s13, 24 -; GFX8-NEXT: s_lshr_b32 s14, s3, 8 +; GFX8-NEXT: s_lshr_b32 s7, s2, 24 +; GFX8-NEXT: s_and_b32 s5, s2, s9 +; GFX8-NEXT: s_bfe_u32 s2, s2, s12 +; GFX8-NEXT: s_lshl_b32 s6, s6, 8 +; GFX8-NEXT: s_or_b32 s5, s5, s6 +; GFX8-NEXT: s_lshl_b32 s2, s2, 16 +; GFX8-NEXT: s_bfe_u32 s6, s3, s11 +; GFX8-NEXT: s_or_b32 s2, s5, s2 +; GFX8-NEXT: s_lshl_b32 s5, s7, 24 ; GFX8-NEXT: s_or_b32 s2, s2, s5 -; GFX8-NEXT: s_and_b32 s5, s14, s17 -; GFX8-NEXT: s_lshr_b32 s15, s3, 16 -; GFX8-NEXT: s_lshr_b32 s16, s3, 24 -; GFX8-NEXT: s_and_b32 s3, s3, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 8 -; GFX8-NEXT: s_or_b32 s3, s3, s5 -; GFX8-NEXT: s_and_b32 s5, s15, s17 -; GFX8-NEXT: s_lshl_b32 s5, s5, 16 -; GFX8-NEXT: s_or_b32 s3, s3, s5 -; GFX8-NEXT: s_lshl_b32 s5, s16, 24 +; GFX8-NEXT: s_lshr_b32 s8, s3, 24 +; GFX8-NEXT: s_and_b32 s5, s3, s9 +; GFX8-NEXT: s_bfe_u32 s3, s3, s12 +; GFX8-NEXT: s_lshl_b32 s6, s6, 8 +; GFX8-NEXT: s_or_b32 s5, s5, s6 +; GFX8-NEXT: s_lshl_b32 s3, s3, 16 +; GFX8-NEXT: s_or_b32 s3, s5, s3 +; GFX8-NEXT: s_lshl_b32 s5, s8, 24 ; GFX8-NEXT: s_or_b32 s3, s3, s5 ; GFX8-NEXT: s_lshr_b32 s5, s4, 2 ; GFX8-NEXT: s_cmp_eq_u32 s5, 1 @@ -5062,7 +4770,7 @@ ; GFX8-NEXT: s_and_b32 s4, s4, 3 ; GFX8-NEXT: s_lshl_b32 s4, s4, 3 ; GFX8-NEXT: v_mov_b32_e32 v1, s4 -; GFX8-NEXT: s_lshl_b32 s4, s17, s4 +; GFX8-NEXT: s_lshl_b32 s4, s9, s4 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_andn2_b32 s4, s6, s4 ; GFX8-NEXT: v_or_b32_e32 v4, s4, v0 @@ -5075,41 +4783,37 @@ ; GFX8-NEXT: v_mov_b32_e32 v2, s2 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s5, 2 ; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_mov_b32_e32 v3, s3 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s5, 3 ; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v12, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v12, s17 -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v4, v0 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v0, v9, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_or_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v1, v5, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v7 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v2 ; GFX8-NEXT: v_or_b32_sdwa v5, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v2, v2, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v3 ; GFX8-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v3, v3, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 ; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX8-NEXT: v_or_b32_e32 v2, v2, v5 ; GFX8-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 0 @@ -5120,56 +4824,50 @@ ; GFX7-LABEL: insertelement_s_v16i8_v_s: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s17, 0xff -; GFX7-NEXT: v_and_b32_e32 v0, s17, v0 +; GFX7-NEXT: s_mov_b32 s11, 0x80008 +; GFX7-NEXT: s_movk_i32 s9, 0xff +; GFX7-NEXT: v_and_b32_e32 v0, s9, v0 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s5, s0, 8 -; GFX7-NEXT: s_and_b32 s5, s5, s17 -; GFX7-NEXT: s_lshr_b32 s6, s0, 16 -; GFX7-NEXT: s_lshr_b32 s7, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s5 -; GFX7-NEXT: s_and_b32 s5, s6, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s5 -; GFX7-NEXT: s_lshl_b32 s5, s7, 24 -; GFX7-NEXT: s_lshr_b32 s8, s1, 8 +; GFX7-NEXT: s_bfe_u32 s12, s0, s11 +; GFX7-NEXT: s_and_b32 s10, s0, s9 +; GFX7-NEXT: s_lshl_b32 s12, s12, 8 +; GFX7-NEXT: s_or_b32 s10, s10, s12 +; GFX7-NEXT: s_mov_b32 s12, 0x80010 +; GFX7-NEXT: s_lshr_b32 s5, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s12 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s10, s0 +; GFX7-NEXT: s_bfe_u32 s10, s1, s11 +; GFX7-NEXT: s_lshl_b32 s5, s5, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s5 -; GFX7-NEXT: s_and_b32 s5, s8, s17 -; GFX7-NEXT: s_lshr_b32 s9, s1, 16 -; GFX7-NEXT: s_lshr_b32 s10, s1, 24 -; GFX7-NEXT: s_and_b32 s1, s1, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 8 -; GFX7-NEXT: s_or_b32 s1, s1, s5 -; GFX7-NEXT: s_and_b32 s5, s9, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_or_b32 s1, s1, s5 -; GFX7-NEXT: s_lshl_b32 s5, s10, 24 -; GFX7-NEXT: s_lshr_b32 s11, s2, 8 +; GFX7-NEXT: s_lshr_b32 s6, s1, 24 +; GFX7-NEXT: s_and_b32 s5, s1, s9 +; GFX7-NEXT: s_bfe_u32 s1, s1, s12 +; GFX7-NEXT: s_lshl_b32 s10, s10, 8 +; GFX7-NEXT: s_or_b32 s5, s5, s10 +; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: s_or_b32 s1, s5, s1 +; GFX7-NEXT: s_lshl_b32 s5, s6, 24 +; GFX7-NEXT: s_bfe_u32 s6, s2, s11 ; GFX7-NEXT: s_or_b32 s1, s1, s5 -; GFX7-NEXT: s_and_b32 s5, s11, s17 -; GFX7-NEXT: s_lshr_b32 s12, s2, 16 -; GFX7-NEXT: s_lshr_b32 s13, s2, 24 -; GFX7-NEXT: s_and_b32 s2, s2, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 8 -; GFX7-NEXT: s_or_b32 s2, s2, s5 -; GFX7-NEXT: s_and_b32 s5, s12, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_or_b32 s2, s2, s5 -; GFX7-NEXT: s_lshl_b32 s5, s13, 24 -; GFX7-NEXT: s_lshr_b32 s14, s3, 8 +; GFX7-NEXT: s_lshr_b32 s7, s2, 24 +; GFX7-NEXT: s_and_b32 s5, s2, s9 +; GFX7-NEXT: s_bfe_u32 s2, s2, s12 +; GFX7-NEXT: s_lshl_b32 s6, s6, 8 +; GFX7-NEXT: s_or_b32 s5, s5, s6 +; GFX7-NEXT: s_lshl_b32 s2, s2, 16 +; GFX7-NEXT: s_bfe_u32 s6, s3, s11 +; GFX7-NEXT: s_or_b32 s2, s5, s2 +; GFX7-NEXT: s_lshl_b32 s5, s7, 24 ; GFX7-NEXT: s_or_b32 s2, s2, s5 -; GFX7-NEXT: s_and_b32 s5, s14, s17 -; GFX7-NEXT: s_lshr_b32 s15, s3, 16 -; GFX7-NEXT: s_lshr_b32 s16, s3, 24 -; GFX7-NEXT: s_and_b32 s3, s3, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 8 -; GFX7-NEXT: s_or_b32 s3, s3, s5 -; GFX7-NEXT: s_and_b32 s5, s15, s17 -; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_or_b32 s3, s3, s5 -; GFX7-NEXT: s_lshl_b32 s5, s16, 24 +; GFX7-NEXT: s_lshr_b32 s8, s3, 24 +; GFX7-NEXT: s_and_b32 s5, s3, s9 +; GFX7-NEXT: s_bfe_u32 s3, s3, s12 +; GFX7-NEXT: s_lshl_b32 s6, s6, 8 +; GFX7-NEXT: s_or_b32 s5, s5, s6 +; GFX7-NEXT: s_lshl_b32 s3, s3, 16 +; GFX7-NEXT: s_or_b32 s3, s5, s3 +; GFX7-NEXT: s_lshl_b32 s5, s8, 24 ; GFX7-NEXT: s_or_b32 s3, s3, s5 ; GFX7-NEXT: s_lshr_b32 s5, s4, 2 ; GFX7-NEXT: s_cmp_eq_u32 s5, 1 @@ -5181,7 +4879,7 @@ ; GFX7-NEXT: s_and_b32 s4, s4, 3 ; GFX7-NEXT: s_lshl_b32 s4, s4, 3 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, s4, v0 -; GFX7-NEXT: s_lshl_b32 s4, s17, s4 +; GFX7-NEXT: s_lshl_b32 s4, s9, s4 ; GFX7-NEXT: s_andn2_b32 s4, s6, s4 ; GFX7-NEXT: v_or_b32_e32 v4, s4, v0 ; GFX7-NEXT: v_mov_b32_e32 v0, s0 @@ -5192,57 +4890,49 @@ ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc ; GFX7-NEXT: v_mov_b32_e32 v2, s2 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s5, 2 -; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc ; GFX7-NEXT: v_mov_b32_e32 v3, s3 +; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 ; GFX7-NEXT: v_cmp_eq_u32_e64 vcc, s5, 3 ; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s17, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v8, s9, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_bfe_u32 v8, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s17, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s9, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX7-NEXT: v_bfe_u32 v5, v2, 8, 8 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s17, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v4, s9, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v5, v3, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v13 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s17, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s17, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v4, s9, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -5253,58 +4943,53 @@ ; GFX10-LABEL: insertelement_s_v16i8_v_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s6, 0x80008 ; GFX10-NEXT: s_movk_i32 s5, 0xff -; GFX10-NEXT: v_mov_b32_e32 v9, 8 +; GFX10-NEXT: s_mov_b32 s7, 0x80010 ; GFX10-NEXT: v_and_b32_e32 v0, s5, v0 +; GFX10-NEXT: v_mov_b32_e32 v10, 8 +; GFX10-NEXT: v_mov_b32_e32 v12, 16 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_lshr_b32 s7, s0, 16 -; GFX10-NEXT: s_and_b32 s6, s6, s5 +; GFX10-NEXT: s_bfe_u32 s13, s0, s6 +; GFX10-NEXT: s_bfe_u32 s15, s1, s6 ; GFX10-NEXT: s_lshr_b32 s8, s0, 24 -; GFX10-NEXT: s_and_b32 s7, s7, s5 -; GFX10-NEXT: s_and_b32 s0, s0, s5 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshr_b32 s12, s2, 8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshr_b32 s9, s1, 8 -; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_and_b32 s7, s12, s5 +; GFX10-NEXT: s_lshr_b32 s9, s1, 24 +; GFX10-NEXT: s_and_b32 s12, s0, s5 +; GFX10-NEXT: s_bfe_u32 s0, s0, s7 +; GFX10-NEXT: s_and_b32 s14, s1, s5 +; GFX10-NEXT: s_bfe_u32 s1, s1, s7 +; GFX10-NEXT: s_lshl_b32 s13, s13, 8 +; GFX10-NEXT: s_lshl_b32 s15, s15, 8 +; GFX10-NEXT: s_or_b32 s12, s12, s13 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s13, s14, s15 +; GFX10-NEXT: s_bfe_u32 s17, s2, s6 +; GFX10-NEXT: s_bfe_u32 s6, s3, s6 ; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_lshr_b32 s13, s2, 16 -; GFX10-NEXT: s_lshr_b32 s10, s1, 16 -; GFX10-NEXT: s_and_b32 s9, s9, s5 -; GFX10-NEXT: s_lshr_b32 s14, s2, 24 +; GFX10-NEXT: s_or_b32 s0, s12, s0 +; GFX10-NEXT: s_lshl_b32 s9, s9, 24 +; GFX10-NEXT: s_or_b32 s1, s13, s1 ; GFX10-NEXT: s_or_b32 s0, s0, s8 -; GFX10-NEXT: s_and_b32 s2, s2, s5 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_and_b32 s8, s13, s5 -; GFX10-NEXT: s_lshr_b32 s11, s1, 24 -; GFX10-NEXT: s_and_b32 s10, s10, s5 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_lshl_b32 s7, s8, 16 -; GFX10-NEXT: s_lshr_b32 s15, s3, 8 -; GFX10-NEXT: s_and_b32 s1, s1, s5 -; GFX10-NEXT: s_lshl_b32 s9, s9, 8 -; GFX10-NEXT: s_lshr_b32 s16, s3, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s7 -; GFX10-NEXT: s_and_b32 s7, s15, s5 -; GFX10-NEXT: s_lshl_b32 s6, s10, 16 ; GFX10-NEXT: s_or_b32 s1, s1, s9 -; GFX10-NEXT: s_lshr_b32 s17, s3, 24 -; GFX10-NEXT: s_or_b32 s1, s1, s6 +; GFX10-NEXT: s_lshr_b32 s10, s2, 24 +; GFX10-NEXT: s_and_b32 s16, s2, s5 +; GFX10-NEXT: s_lshl_b32 s8, s17, 8 +; GFX10-NEXT: s_bfe_u32 s2, s2, s7 +; GFX10-NEXT: s_lshr_b32 s11, s3, 24 +; GFX10-NEXT: s_and_b32 s9, s3, s5 +; GFX10-NEXT: s_bfe_u32 s3, s3, s7 +; GFX10-NEXT: s_lshl_b32 s6, s6, 8 +; GFX10-NEXT: s_or_b32 s8, s16, s8 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 +; GFX10-NEXT: s_or_b32 s6, s9, s6 +; GFX10-NEXT: s_lshl_b32 s3, s3, 16 +; GFX10-NEXT: s_or_b32 s2, s8, s2 +; GFX10-NEXT: s_lshl_b32 s8, s10, 24 +; GFX10-NEXT: s_or_b32 s3, s6, s3 ; GFX10-NEXT: s_lshl_b32 s6, s11, 24 -; GFX10-NEXT: s_and_b32 s3, s3, s5 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_and_b32 s8, s16, s5 -; GFX10-NEXT: s_or_b32 s1, s1, s6 -; GFX10-NEXT: s_lshl_b32 s6, s14, 24 -; GFX10-NEXT: s_or_b32 s3, s3, s7 -; GFX10-NEXT: s_lshl_b32 s7, s8, 16 -; GFX10-NEXT: s_or_b32 s2, s2, s6 -; GFX10-NEXT: s_or_b32 s3, s3, s7 -; GFX10-NEXT: s_lshl_b32 s6, s17, 24 ; GFX10-NEXT: s_lshr_b32 s7, s4, 2 +; GFX10-NEXT: s_or_b32 s2, s2, s8 ; GFX10-NEXT: s_or_b32 s3, s3, s6 ; GFX10-NEXT: s_cmp_eq_u32 s7, 1 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s7, 0 @@ -5325,40 +5010,37 @@ ; GFX10-NEXT: s_mov_b32 s0, 8 ; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s7, 1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s7, 2 -; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX10-NEXT: v_and_or_b32 v6, v0, s5, v6 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 ; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s7, 3 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v2 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX10-NEXT: v_and_or_b32 v6, v1, s5, v6 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v10, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v9, v1, s5, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_and_or_b32 v11, v2, s5, v11 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v12, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v9, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_or_b32 v4, v0, s5, v4 -; GFX10-NEXT: v_and_b32_sdwa v0, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v8, v2, s5, v8 -; GFX10-NEXT: v_and_b32_sdwa v2, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_and_or_b32 v9, v3, s5, v9 -; GFX10-NEXT: v_and_b32_sdwa v3, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX10-NEXT: v_or3_b32 v0, v4, v0, v5 +; GFX10-NEXT: v_and_or_b32 v10, v3, s5, v10 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX10-NEXT: v_or3_b32 v0, v6, v0, v4 +; GFX10-NEXT: v_or3_b32 v1, v9, v1, v5 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 -; GFX10-NEXT: v_or3_b32 v1, v6, v1, v7 -; GFX10-NEXT: v_or3_b32 v2, v8, v2, v10 -; GFX10-NEXT: v_or3_b32 v3, v9, v3, v11 +; GFX10-NEXT: v_or3_b32 v2, v11, v2, v7 +; GFX10-NEXT: v_or3_b32 v3, v10, v3, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -5372,68 +5054,62 @@ ; GFX9-LABEL: insertelement_s_v16i8_s_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s18, 0xff +; GFX9-NEXT: s_mov_b32 s13, 0x80008 +; GFX9-NEXT: s_movk_i32 s12, 0xff ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 2, v0 ; GFX9-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s5, s0, 8 -; GFX9-NEXT: s_and_b32 s5, s5, s18 -; GFX9-NEXT: s_lshr_b32 s7, s0, 16 -; GFX9-NEXT: s_lshr_b32 s8, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s18 +; GFX9-NEXT: s_bfe_u32 s14, s0, s13 +; GFX9-NEXT: s_and_b32 s8, s0, s12 +; GFX9-NEXT: s_lshl_b32 s14, s14, 8 +; GFX9-NEXT: s_or_b32 s8, s8, s14 +; GFX9-NEXT: s_mov_b32 s14, 0x80010 +; GFX9-NEXT: s_lshr_b32 s5, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s14 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s8, s0 +; GFX9-NEXT: s_lshl_b32 s5, s5, 24 +; GFX9-NEXT: s_or_b32 s8, s0, s5 +; GFX9-NEXT: s_bfe_u32 s5, s1, s13 +; GFX9-NEXT: s_lshr_b32 s9, s1, 24 +; GFX9-NEXT: s_and_b32 s0, s1, s12 +; GFX9-NEXT: s_bfe_u32 s1, s1, s14 ; GFX9-NEXT: s_lshl_b32 s5, s5, 8 ; GFX9-NEXT: s_or_b32 s0, s0, s5 -; GFX9-NEXT: s_and_b32 s5, s7, s18 -; GFX9-NEXT: s_lshl_b32 s5, s5, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s5 -; GFX9-NEXT: s_lshl_b32 s5, s8, 24 -; GFX9-NEXT: s_lshr_b32 s9, s1, 8 -; GFX9-NEXT: s_or_b32 s8, s0, s5 -; GFX9-NEXT: s_lshr_b32 s10, s1, 16 -; GFX9-NEXT: s_lshr_b32 s11, s1, 24 -; GFX9-NEXT: s_and_b32 s0, s1, s18 -; GFX9-NEXT: s_and_b32 s1, s9, s18 -; GFX9-NEXT: s_lshl_b32 s1, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s10, s18 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s11, 24 -; GFX9-NEXT: s_lshr_b32 s12, s2, 8 +; GFX9-NEXT: s_lshl_b32 s1, s9, 24 ; GFX9-NEXT: s_or_b32 s9, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s12, s18 -; GFX9-NEXT: s_lshr_b32 s13, s2, 16 -; GFX9-NEXT: s_and_b32 s0, s2, s18 +; GFX9-NEXT: s_bfe_u32 s1, s2, s13 +; GFX9-NEXT: s_and_b32 s0, s2, s12 ; GFX9-NEXT: s_lshl_b32 s1, s1, 8 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s13, s18 +; GFX9-NEXT: s_bfe_u32 s1, s2, s14 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 -; GFX9-NEXT: s_lshr_b32 s14, s2, 24 +; GFX9-NEXT: s_lshr_b32 s10, s2, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s14, 24 -; GFX9-NEXT: s_lshr_b32 s15, s3, 8 +; GFX9-NEXT: s_lshl_b32 s1, s10, 24 ; GFX9-NEXT: s_or_b32 s10, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s15, s18 -; GFX9-NEXT: s_lshr_b32 s16, s3, 16 -; GFX9-NEXT: s_and_b32 s0, s3, s18 +; GFX9-NEXT: s_bfe_u32 s1, s3, s13 +; GFX9-NEXT: s_and_b32 s0, s3, s12 ; GFX9-NEXT: s_lshl_b32 s1, s1, 8 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s16, s18 -; GFX9-NEXT: s_lshr_b32 s17, s3, 24 +; GFX9-NEXT: s_bfe_u32 s1, s3, s14 +; GFX9-NEXT: s_lshr_b32 s11, s3, 24 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s17, 24 +; GFX9-NEXT: s_lshl_b32 s1, s11, 24 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 ; GFX9-NEXT: v_mov_b32_e32 v2, s9 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX9-NEXT: s_or_b32 s11, s0, s1 ; GFX9-NEXT: v_mov_b32_e32 v3, s10 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX9-NEXT: s_and_b32 s4, s4, s18 +; GFX9-NEXT: s_and_b32 s4, s4, s12 ; GFX9-NEXT: v_lshlrev_b32_e64 v2, v0, s4 -; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s18 +; GFX9-NEXT: v_lshlrev_b32_e64 v0, v0, s12 ; GFX9-NEXT: v_mov_b32_e32 v5, s11 ; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -5443,41 +5119,39 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, s8 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] +; GFX9-NEXT: s_mov_b32 s6, 8 ; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s7, 16 ; GFX9-NEXT: v_mov_b32_e32 v2, s10 ; GFX9-NEXT: v_mov_b32_e32 v3, s11 -; GFX9-NEXT: s_mov_b32 s6, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 +; GFX9-NEXT: v_and_or_b32 v8, v0, s12, v8 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_or3_b32 v0, v8, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v4, v0, s18, v4 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_and_or_b32 v4, v1, s12, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_or3_b32 v0, v4, v0, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX9-NEXT: v_or3_b32 v1, v4, v1, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 8 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v5, v1, s18, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX9-NEXT: v_or3_b32 v1, v5, v1, v6 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v5, v2, s18, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v3 -; GFX9-NEXT: v_and_b32_sdwa v2, v2, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v9 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v8, 16 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v5, v2, s12, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX9-NEXT: v_or3_b32 v2, v5, v2, v6 -; GFX9-NEXT: v_and_or_b32 v4, v3, s18, v4 -; GFX9-NEXT: v_and_b32_sdwa v3, v3, s18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v11 +; GFX9-NEXT: v_and_or_b32 v4, v3, s12, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v7 ; GFX9-NEXT: v_or3_b32 v3, v4, v3, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 @@ -5487,68 +5161,62 @@ ; GFX8-LABEL: insertelement_s_v16i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s18, 0xff +; GFX8-NEXT: s_mov_b32 s13, 0x80008 +; GFX8-NEXT: s_movk_i32 s12, 0xff +; GFX8-NEXT: s_mov_b32 s14, 0x80010 ; GFX8-NEXT: v_lshrrev_b32_e32 v4, 2, v0 -; GFX8-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s5, s0, 8 -; GFX8-NEXT: s_and_b32 s5, s5, s18 -; GFX8-NEXT: s_lshr_b32 s6, s0, 16 -; GFX8-NEXT: s_lshr_b32 s7, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s18 +; GFX8-NEXT: s_bfe_u32 s9, s0, s13 +; GFX8-NEXT: s_lshr_b32 s5, s0, 24 +; GFX8-NEXT: s_and_b32 s8, s0, s12 +; GFX8-NEXT: s_bfe_u32 s0, s0, s14 +; GFX8-NEXT: s_lshl_b32 s9, s9, 8 +; GFX8-NEXT: s_or_b32 s8, s8, s9 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s8, s0 +; GFX8-NEXT: s_lshl_b32 s5, s5, 24 +; GFX8-NEXT: s_or_b32 s8, s0, s5 +; GFX8-NEXT: s_bfe_u32 s5, s1, s13 +; GFX8-NEXT: s_lshr_b32 s6, s1, 24 +; GFX8-NEXT: s_and_b32 s0, s1, s12 +; GFX8-NEXT: s_bfe_u32 s1, s1, s14 ; GFX8-NEXT: s_lshl_b32 s5, s5, 8 ; GFX8-NEXT: s_or_b32 s0, s0, s5 -; GFX8-NEXT: s_and_b32 s5, s6, s18 -; GFX8-NEXT: s_lshl_b32 s5, s5, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s5 -; GFX8-NEXT: s_lshl_b32 s5, s7, 24 -; GFX8-NEXT: s_lshr_b32 s9, s1, 8 -; GFX8-NEXT: s_or_b32 s8, s0, s5 -; GFX8-NEXT: s_lshr_b32 s10, s1, 16 -; GFX8-NEXT: s_lshr_b32 s11, s1, 24 -; GFX8-NEXT: s_and_b32 s0, s1, s18 -; GFX8-NEXT: s_and_b32 s1, s9, s18 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s10, s18 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s11, 24 -; GFX8-NEXT: s_lshr_b32 s12, s2, 8 +; GFX8-NEXT: s_lshl_b32 s1, s6, 24 ; GFX8-NEXT: s_or_b32 s9, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s12, s18 -; GFX8-NEXT: s_lshr_b32 s13, s2, 16 -; GFX8-NEXT: s_and_b32 s0, s2, s18 +; GFX8-NEXT: s_bfe_u32 s1, s2, s13 +; GFX8-NEXT: s_and_b32 s0, s2, s12 ; GFX8-NEXT: s_lshl_b32 s1, s1, 8 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s13, s18 +; GFX8-NEXT: s_bfe_u32 s1, s2, s14 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_lshr_b32 s14, s2, 24 +; GFX8-NEXT: s_lshr_b32 s7, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s14, 24 -; GFX8-NEXT: s_lshr_b32 s15, s3, 8 +; GFX8-NEXT: s_lshl_b32 s1, s7, 24 ; GFX8-NEXT: s_or_b32 s10, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s15, s18 -; GFX8-NEXT: s_lshr_b32 s16, s3, 16 -; GFX8-NEXT: s_and_b32 s0, s3, s18 +; GFX8-NEXT: s_bfe_u32 s1, s3, s13 +; GFX8-NEXT: s_and_b32 s0, s3, s12 ; GFX8-NEXT: s_lshl_b32 s1, s1, 8 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s16, s18 -; GFX8-NEXT: s_lshr_b32 s17, s3, 24 +; GFX8-NEXT: s_bfe_u32 s1, s3, s14 +; GFX8-NEXT: s_lshr_b32 s11, s3, 24 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 +; GFX8-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s17, 24 +; GFX8-NEXT: s_lshl_b32 s1, s11, 24 ; GFX8-NEXT: v_mov_b32_e32 v1, s8 ; GFX8-NEXT: v_mov_b32_e32 v2, s9 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX8-NEXT: s_or_b32 s11, s0, s1 ; GFX8-NEXT: v_mov_b32_e32 v3, s10 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX8-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX8-NEXT: s_and_b32 s4, s4, s18 +; GFX8-NEXT: s_and_b32 s4, s4, s12 ; GFX8-NEXT: v_lshlrev_b32_e64 v2, v0, s4 -; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s18 +; GFX8-NEXT: v_lshlrev_b32_e64 v0, v0, s12 ; GFX8-NEXT: v_mov_b32_e32 v5, s11 ; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -5559,45 +5227,42 @@ ; GFX8-NEXT: v_mov_b32_e32 v0, s8 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v12, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v12, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_mov_b32_e32 v8, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v10, 16 ; GFX8-NEXT: v_mov_b32_e32 v1, s9 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX8-NEXT: v_or_b32_e32 v0, v9, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX8-NEXT: v_mov_b32_e32 v2, s10 ; GFX8-NEXT: v_mov_b32_e32 v3, s11 -; GFX8-NEXT: v_mov_b32_e32 v12, s18 -; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_or_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v1, v5, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v7 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v2 ; GFX8-NEXT: v_or_b32_sdwa v5, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v2, v2, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v3 ; GFX8-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v3, v3, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 ; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX8-NEXT: v_or_b32_e32 v2, v2, v5 ; GFX8-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 0 @@ -5608,68 +5273,62 @@ ; GFX7-LABEL: insertelement_s_v16i8_s_v: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s18, 0xff +; GFX7-NEXT: s_mov_b32 s13, 0x80008 +; GFX7-NEXT: s_movk_i32 s12, 0xff +; GFX7-NEXT: s_mov_b32 s14, 0x80010 ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 2, v0 -; GFX7-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s5, s0, 8 -; GFX7-NEXT: s_and_b32 s5, s5, s18 -; GFX7-NEXT: s_lshr_b32 s6, s0, 16 -; GFX7-NEXT: s_lshr_b32 s7, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s18 +; GFX7-NEXT: s_bfe_u32 s9, s0, s13 +; GFX7-NEXT: s_lshr_b32 s5, s0, 24 +; GFX7-NEXT: s_and_b32 s8, s0, s12 +; GFX7-NEXT: s_bfe_u32 s0, s0, s14 +; GFX7-NEXT: s_lshl_b32 s9, s9, 8 +; GFX7-NEXT: s_or_b32 s8, s8, s9 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s8, s0 +; GFX7-NEXT: s_lshl_b32 s5, s5, 24 +; GFX7-NEXT: s_or_b32 s8, s0, s5 +; GFX7-NEXT: s_bfe_u32 s5, s1, s13 +; GFX7-NEXT: s_lshr_b32 s6, s1, 24 +; GFX7-NEXT: s_and_b32 s0, s1, s12 +; GFX7-NEXT: s_bfe_u32 s1, s1, s14 ; GFX7-NEXT: s_lshl_b32 s5, s5, 8 ; GFX7-NEXT: s_or_b32 s0, s0, s5 -; GFX7-NEXT: s_and_b32 s5, s6, s18 -; GFX7-NEXT: s_lshl_b32 s5, s5, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s5 -; GFX7-NEXT: s_lshl_b32 s5, s7, 24 -; GFX7-NEXT: s_lshr_b32 s9, s1, 8 -; GFX7-NEXT: s_or_b32 s8, s0, s5 -; GFX7-NEXT: s_lshr_b32 s10, s1, 16 -; GFX7-NEXT: s_lshr_b32 s11, s1, 24 -; GFX7-NEXT: s_and_b32 s0, s1, s18 -; GFX7-NEXT: s_and_b32 s1, s9, s18 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s10, s18 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s11, 24 -; GFX7-NEXT: s_lshr_b32 s12, s2, 8 +; GFX7-NEXT: s_lshl_b32 s1, s6, 24 ; GFX7-NEXT: s_or_b32 s9, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s12, s18 -; GFX7-NEXT: s_lshr_b32 s13, s2, 16 -; GFX7-NEXT: s_and_b32 s0, s2, s18 +; GFX7-NEXT: s_bfe_u32 s1, s2, s13 +; GFX7-NEXT: s_and_b32 s0, s2, s12 ; GFX7-NEXT: s_lshl_b32 s1, s1, 8 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s13, s18 +; GFX7-NEXT: s_bfe_u32 s1, s2, s14 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_lshr_b32 s14, s2, 24 +; GFX7-NEXT: s_lshr_b32 s7, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s14, 24 -; GFX7-NEXT: s_lshr_b32 s15, s3, 8 +; GFX7-NEXT: s_lshl_b32 s1, s7, 24 ; GFX7-NEXT: s_or_b32 s10, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s15, s18 -; GFX7-NEXT: s_lshr_b32 s16, s3, 16 -; GFX7-NEXT: s_and_b32 s0, s3, s18 +; GFX7-NEXT: s_bfe_u32 s1, s3, s13 +; GFX7-NEXT: s_and_b32 s0, s3, s12 ; GFX7-NEXT: s_lshl_b32 s1, s1, 8 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s16, s18 -; GFX7-NEXT: s_lshr_b32 s17, s3, 24 +; GFX7-NEXT: s_bfe_u32 s1, s3, s14 +; GFX7-NEXT: s_lshr_b32 s11, s3, 24 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 +; GFX7-NEXT: v_and_b32_e32 v0, 3, v0 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s17, 24 +; GFX7-NEXT: s_lshl_b32 s1, s11, 24 ; GFX7-NEXT: v_mov_b32_e32 v1, s8 ; GFX7-NEXT: v_mov_b32_e32 v2, s9 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX7-NEXT: s_or_b32 s11, s0, s1 ; GFX7-NEXT: v_mov_b32_e32 v3, s10 ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, 3, v0 -; GFX7-NEXT: s_and_b32 s4, s4, s18 +; GFX7-NEXT: s_and_b32 s4, s4, s12 ; GFX7-NEXT: v_lshl_b32_e32 v2, s4, v0 -; GFX7-NEXT: v_lshl_b32_e32 v0, s18, v0 +; GFX7-NEXT: v_lshl_b32_e32 v0, s12, v0 ; GFX7-NEXT: v_mov_b32_e32 v5, s11 ; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1] ; GFX7-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -5680,59 +5339,51 @@ ; GFX7-NEXT: v_mov_b32_e32 v0, s8 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 ; GFX7-NEXT: v_mov_b32_e32 v1, s9 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v8, s12, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 ; GFX7-NEXT: v_mov_b32_e32 v2, s10 ; GFX7-NEXT: v_mov_b32_e32 v3, s11 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v4 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_bfe_u32 v8, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] ; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s18, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s18, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s12, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX7-NEXT: v_bfe_u32 v5, v2, 8, 8 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s18, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v4, s12, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v5, v3, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v13 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s18, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s18, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v4, s12, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -5743,68 +5394,64 @@ ; GFX10-LABEL: insertelement_s_v16i8_s_v: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s6, 0x80008 ; GFX10-NEXT: s_movk_i32 s5, 0xff +; GFX10-NEXT: s_mov_b32 s7, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 2, v0 ; GFX10-NEXT: v_and_b32_e32 v0, 3, v0 -; GFX10-NEXT: v_mov_b32_e32 v9, 8 +; GFX10-NEXT: v_mov_b32_e32 v10, 8 +; GFX10-NEXT: v_mov_b32_e32 v12, 16 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, s5 -; GFX10-NEXT: v_xor_b32_e32 v2, -1, v2 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s6, s0, 8 -; GFX10-NEXT: s_lshr_b32 s7, s0, 16 -; GFX10-NEXT: s_lshr_b32 s9, s1, 8 -; GFX10-NEXT: s_and_b32 s6, s6, s5 +; GFX10-NEXT: s_bfe_u32 s13, s0, s6 ; GFX10-NEXT: s_lshr_b32 s8, s0, 24 -; GFX10-NEXT: s_lshr_b32 s10, s1, 16 -; GFX10-NEXT: s_and_b32 s7, s7, s5 -; GFX10-NEXT: s_and_b32 s9, s9, s5 -; GFX10-NEXT: s_and_b32 s0, s0, s5 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_lshr_b32 s11, s1, 24 -; GFX10-NEXT: s_and_b32 s1, s1, s5 -; GFX10-NEXT: s_lshl_b32 s9, s9, 8 -; GFX10-NEXT: s_and_b32 s10, s10, s5 -; GFX10-NEXT: s_or_b32 s0, s0, s6 -; GFX10-NEXT: s_lshl_b32 s7, s7, 16 -; GFX10-NEXT: s_lshr_b32 s12, s2, 8 +; GFX10-NEXT: s_and_b32 s12, s0, s5 +; GFX10-NEXT: s_bfe_u32 s0, s0, s7 +; GFX10-NEXT: s_lshl_b32 s13, s13, 8 +; GFX10-NEXT: s_bfe_u32 s15, s1, s6 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s12, s12, s13 +; GFX10-NEXT: s_lshr_b32 s9, s1, 24 +; GFX10-NEXT: s_and_b32 s14, s1, s5 +; GFX10-NEXT: s_bfe_u32 s1, s1, s7 +; GFX10-NEXT: s_lshl_b32 s15, s15, 8 +; GFX10-NEXT: s_bfe_u32 s17, s2, s6 ; GFX10-NEXT: s_lshl_b32 s8, s8, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s7 -; GFX10-NEXT: s_lshl_b32 s6, s10, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s9 -; GFX10-NEXT: s_lshr_b32 s13, s2, 16 +; GFX10-NEXT: s_or_b32 s0, s12, s0 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s13, s14, s15 ; GFX10-NEXT: s_or_b32 s8, s0, s8 -; GFX10-NEXT: s_or_b32 s0, s1, s6 -; GFX10-NEXT: s_and_b32 s6, s12, s5 -; GFX10-NEXT: s_lshr_b32 s14, s2, 24 -; GFX10-NEXT: s_and_b32 s2, s2, s5 -; GFX10-NEXT: s_lshl_b32 s6, s6, 8 -; GFX10-NEXT: s_and_b32 s7, s13, s5 -; GFX10-NEXT: s_lshl_b32 s1, s11, 24 -; GFX10-NEXT: s_or_b32 s2, s2, s6 -; GFX10-NEXT: s_lshr_b32 s15, s3, 8 -; GFX10-NEXT: s_lshl_b32 s6, s7, 16 -; GFX10-NEXT: s_or_b32 s9, s0, s1 -; GFX10-NEXT: s_or_b32 s0, s2, s6 -; GFX10-NEXT: s_and_b32 s2, s15, s5 -; GFX10-NEXT: s_lshl_b32 s1, s14, 24 -; GFX10-NEXT: s_lshr_b32 s16, s3, 16 +; GFX10-NEXT: s_lshr_b32 s10, s2, 24 +; GFX10-NEXT: s_and_b32 s16, s2, s5 +; GFX10-NEXT: s_lshl_b32 s0, s17, 8 +; GFX10-NEXT: s_bfe_u32 s2, s2, s7 +; GFX10-NEXT: s_lshl_b32 s9, s9, 24 +; GFX10-NEXT: s_or_b32 s1, s13, s1 +; GFX10-NEXT: s_or_b32 s0, s16, s0 +; GFX10-NEXT: s_lshl_b32 s2, s2, 16 +; GFX10-NEXT: s_or_b32 s9, s1, s9 +; GFX10-NEXT: s_or_b32 s0, s0, s2 +; GFX10-NEXT: s_bfe_u32 s2, s3, s6 +; GFX10-NEXT: s_lshl_b32 s1, s10, 24 ; GFX10-NEXT: v_mov_b32_e32 v1, s9 -; GFX10-NEXT: s_lshr_b32 s17, s3, 24 ; GFX10-NEXT: s_or_b32 s10, s0, s1 -; GFX10-NEXT: s_and_b32 s1, s16, s5 -; GFX10-NEXT: s_and_b32 s3, s3, s5 +; GFX10-NEXT: s_bfe_u32 s1, s3, s7 +; GFX10-NEXT: s_and_b32 s6, s3, s5 ; GFX10-NEXT: s_lshl_b32 s2, s2, 8 ; GFX10-NEXT: s_lshl_b32 s1, s1, 16 -; GFX10-NEXT: s_or_b32 s0, s3, s2 +; GFX10-NEXT: s_or_b32 s0, s6, s2 ; GFX10-NEXT: v_cndmask_b32_e32 v1, s8, v1, vcc_lo ; GFX10-NEXT: s_or_b32 s1, s0, s1 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 2, v4 -; GFX10-NEXT: s_lshl_b32 s2, s17, 24 +; GFX10-NEXT: s_lshr_b32 s11, s3, 24 +; GFX10-NEXT: v_xor_b32_e32 v2, -1, v2 +; GFX10-NEXT: s_lshl_b32 s2, s11, 24 +; GFX10-NEXT: s_mov_b32 s3, 8 ; GFX10-NEXT: s_or_b32 s11, s1, s2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v4 ; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s10, s0 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v4 ; GFX10-NEXT: s_and_b32 s2, s4, s5 ; GFX10-NEXT: v_lshlrev_b32_e64 v0, v0, s2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v4 @@ -5818,36 +5465,32 @@ ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v5, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v5, s1 -; GFX10-NEXT: s_mov_b32 s2, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v9, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX10-NEXT: v_and_or_b32 v4, v0, s5, v4 -; GFX10-NEXT: v_and_b32_sdwa v0, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: s_mov_b32 s2, 16 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v10, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v2 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v6, v0, s5, v6 +; GFX10-NEXT: v_and_or_b32 v9, v1, s5, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v6, v1, s5, v6 -; GFX10-NEXT: v_and_or_b32 v8, v2, s5, v8 -; GFX10-NEXT: v_and_or_b32 v9, v3, s5, v9 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_and_or_b32 v11, v2, s5, v11 +; GFX10-NEXT: v_and_or_b32 v10, v3, s5, v10 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v12, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_and_b32_sdwa v2, v2, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_and_b32_sdwa v3, v3, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX10-NEXT: v_or3_b32 v0, v4, v0, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX10-NEXT: v_or3_b32 v0, v6, v0, v4 +; GFX10-NEXT: v_or3_b32 v1, v9, v1, v5 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 -; GFX10-NEXT: v_or3_b32 v1, v6, v1, v7 -; GFX10-NEXT: v_or3_b32 v2, v8, v2, v10 -; GFX10-NEXT: v_or3_b32 v3, v9, v3, v11 +; GFX10-NEXT: v_or3_b32 v2, v11, v2, v7 +; GFX10-NEXT: v_or3_b32 v3, v10, v3, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -5861,67 +5504,61 @@ ; GFX9-LABEL: insertelement_s_v16i8_v_v: ; GFX9: ; %bb.0: ; GFX9-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX9-NEXT: s_movk_i32 s17, 0xff +; GFX9-NEXT: s_mov_b32 s12, 0x80008 +; GFX9-NEXT: s_movk_i32 s10, 0xff ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 2, v1 ; GFX9-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: s_lshr_b32 s4, s0, 8 -; GFX9-NEXT: s_and_b32 s4, s4, s17 -; GFX9-NEXT: s_lshr_b32 s5, s0, 16 -; GFX9-NEXT: s_lshr_b32 s6, s0, 24 -; GFX9-NEXT: s_and_b32 s0, s0, s17 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s4 -; GFX9-NEXT: s_and_b32 s4, s5, s17 -; GFX9-NEXT: s_lshl_b32 s4, s4, 16 -; GFX9-NEXT: s_or_b32 s0, s0, s4 -; GFX9-NEXT: s_lshl_b32 s4, s6, 24 -; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_bfe_u32 s13, s0, s12 +; GFX9-NEXT: s_and_b32 s11, s0, s10 +; GFX9-NEXT: s_lshl_b32 s13, s13, 8 +; GFX9-NEXT: s_or_b32 s11, s11, s13 +; GFX9-NEXT: s_mov_b32 s13, 0x80010 +; GFX9-NEXT: s_lshr_b32 s4, s0, 24 +; GFX9-NEXT: s_bfe_u32 s0, s0, s13 +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_or_b32 s0, s11, s0 +; GFX9-NEXT: s_lshl_b32 s4, s4, 24 +; GFX9-NEXT: s_bfe_u32 s11, s1, s12 ; GFX9-NEXT: s_or_b32 s4, s0, s4 -; GFX9-NEXT: s_lshr_b32 s9, s1, 16 -; GFX9-NEXT: s_lshr_b32 s10, s1, 24 -; GFX9-NEXT: s_and_b32 s0, s1, s17 -; GFX9-NEXT: s_and_b32 s1, s7, s17 -; GFX9-NEXT: s_lshl_b32 s1, s1, 8 -; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s9, s17 +; GFX9-NEXT: s_lshr_b32 s5, s1, 24 +; GFX9-NEXT: s_and_b32 s0, s1, s10 +; GFX9-NEXT: s_bfe_u32 s1, s1, s13 +; GFX9-NEXT: s_lshl_b32 s11, s11, 8 +; GFX9-NEXT: s_or_b32 s0, s0, s11 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s10, 24 -; GFX9-NEXT: s_lshr_b32 s11, s2, 8 +; GFX9-NEXT: s_lshl_b32 s1, s5, 24 ; GFX9-NEXT: s_or_b32 s5, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s11, s17 -; GFX9-NEXT: s_lshr_b32 s12, s2, 16 -; GFX9-NEXT: s_and_b32 s0, s2, s17 +; GFX9-NEXT: s_bfe_u32 s1, s2, s12 +; GFX9-NEXT: s_and_b32 s0, s2, s10 ; GFX9-NEXT: s_lshl_b32 s1, s1, 8 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s12, s17 +; GFX9-NEXT: s_bfe_u32 s1, s2, s13 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 -; GFX9-NEXT: s_lshr_b32 s13, s2, 24 +; GFX9-NEXT: s_lshr_b32 s6, s2, 24 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s13, 24 -; GFX9-NEXT: s_lshr_b32 s14, s3, 8 +; GFX9-NEXT: s_lshl_b32 s1, s6, 24 ; GFX9-NEXT: s_or_b32 s6, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s14, s17 -; GFX9-NEXT: s_lshr_b32 s15, s3, 16 -; GFX9-NEXT: s_and_b32 s0, s3, s17 +; GFX9-NEXT: s_bfe_u32 s1, s3, s12 +; GFX9-NEXT: s_and_b32 s0, s3, s10 ; GFX9-NEXT: s_lshl_b32 s1, s1, 8 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_and_b32 s1, s15, s17 -; GFX9-NEXT: s_lshr_b32 s16, s3, 24 +; GFX9-NEXT: s_bfe_u32 s1, s3, s13 +; GFX9-NEXT: s_lshr_b32 s7, s3, 24 ; GFX9-NEXT: s_lshl_b32 s1, s1, 16 ; GFX9-NEXT: s_or_b32 s0, s0, s1 -; GFX9-NEXT: s_lshl_b32 s1, s16, 24 +; GFX9-NEXT: s_lshl_b32 s1, s7, 24 ; GFX9-NEXT: v_mov_b32_e32 v2, s4 ; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX9-NEXT: s_or_b32 s7, s0, s1 ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX9-NEXT: v_mov_b32_e32 v5, s6 ; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s17 +; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s10 ; GFX9-NEXT: v_mov_b32_e32 v6, s7 ; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -5935,37 +5572,35 @@ ; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] ; GFX9-NEXT: s_mov_b32 s8, 8 -; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: s_mov_b32 s9, 16 +; GFX9-NEXT: v_and_or_b32 v8, v0, s10, v8 ; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, s9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX9-NEXT: v_or3_b32 v0, v8, v0, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, s8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v4, v0, s17, v4 -; GFX9-NEXT: v_and_b32_sdwa v0, v0, s17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX9-NEXT: v_and_or_b32 v4, v1, s10, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX9-NEXT: v_or3_b32 v0, v4, v0, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX9-NEXT: v_or3_b32 v1, v4, v1, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 8 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v5, v1, s17, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 -; GFX9-NEXT: v_or3_b32 v1, v5, v1, v6 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v5, v2, s17, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v3 -; GFX9-NEXT: v_and_b32_sdwa v2, v2, s17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v9 +; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX9-NEXT: v_mov_b32_e32 v8, 16 +; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v5, v2, s10, v5 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 ; GFX9-NEXT: v_or3_b32 v2, v5, v2, v6 -; GFX9-NEXT: v_and_or_b32 v4, v3, s17, v4 -; GFX9-NEXT: v_and_b32_sdwa v3, v3, s17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v11 +; GFX9-NEXT: v_and_or_b32 v4, v3, s10, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 24, v7 ; GFX9-NEXT: v_or3_b32 v3, v4, v3, v5 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 @@ -5975,67 +5610,61 @@ ; GFX8-LABEL: insertelement_s_v16i8_v_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX8-NEXT: s_movk_i32 s16, 0xff +; GFX8-NEXT: s_mov_b32 s10, 0x80008 +; GFX8-NEXT: s_movk_i32 s8, 0xff ; GFX8-NEXT: v_lshrrev_b32_e32 v4, 2, v1 ; GFX8-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX8-NEXT: s_waitcnt lgkmcnt(0) -; GFX8-NEXT: s_lshr_b32 s4, s0, 8 -; GFX8-NEXT: s_and_b32 s4, s4, s16 -; GFX8-NEXT: s_lshr_b32 s5, s0, 16 -; GFX8-NEXT: s_lshr_b32 s6, s0, 24 -; GFX8-NEXT: s_and_b32 s0, s0, s16 -; GFX8-NEXT: s_lshl_b32 s4, s4, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s4 -; GFX8-NEXT: s_and_b32 s4, s5, s16 -; GFX8-NEXT: s_lshl_b32 s4, s4, 16 -; GFX8-NEXT: s_or_b32 s0, s0, s4 -; GFX8-NEXT: s_lshl_b32 s4, s6, 24 -; GFX8-NEXT: s_lshr_b32 s7, s1, 8 +; GFX8-NEXT: s_bfe_u32 s11, s0, s10 +; GFX8-NEXT: s_and_b32 s9, s0, s8 +; GFX8-NEXT: s_lshl_b32 s11, s11, 8 +; GFX8-NEXT: s_or_b32 s9, s9, s11 +; GFX8-NEXT: s_mov_b32 s11, 0x80010 +; GFX8-NEXT: s_lshr_b32 s4, s0, 24 +; GFX8-NEXT: s_bfe_u32 s0, s0, s11 +; GFX8-NEXT: s_lshl_b32 s0, s0, 16 +; GFX8-NEXT: s_or_b32 s0, s9, s0 +; GFX8-NEXT: s_lshl_b32 s4, s4, 24 +; GFX8-NEXT: s_bfe_u32 s9, s1, s10 ; GFX8-NEXT: s_or_b32 s4, s0, s4 -; GFX8-NEXT: s_lshr_b32 s8, s1, 16 -; GFX8-NEXT: s_lshr_b32 s9, s1, 24 -; GFX8-NEXT: s_and_b32 s0, s1, s16 -; GFX8-NEXT: s_and_b32 s1, s7, s16 -; GFX8-NEXT: s_lshl_b32 s1, s1, 8 -; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s8, s16 +; GFX8-NEXT: s_lshr_b32 s5, s1, 24 +; GFX8-NEXT: s_and_b32 s0, s1, s8 +; GFX8-NEXT: s_bfe_u32 s1, s1, s11 +; GFX8-NEXT: s_lshl_b32 s9, s9, 8 +; GFX8-NEXT: s_or_b32 s0, s0, s9 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s9, 24 -; GFX8-NEXT: s_lshr_b32 s10, s2, 8 +; GFX8-NEXT: s_lshl_b32 s1, s5, 24 ; GFX8-NEXT: s_or_b32 s5, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s10, s16 -; GFX8-NEXT: s_lshr_b32 s11, s2, 16 -; GFX8-NEXT: s_and_b32 s0, s2, s16 +; GFX8-NEXT: s_bfe_u32 s1, s2, s10 +; GFX8-NEXT: s_and_b32 s0, s2, s8 ; GFX8-NEXT: s_lshl_b32 s1, s1, 8 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s11, s16 +; GFX8-NEXT: s_bfe_u32 s1, s2, s11 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 -; GFX8-NEXT: s_lshr_b32 s12, s2, 24 +; GFX8-NEXT: s_lshr_b32 s6, s2, 24 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s12, 24 -; GFX8-NEXT: s_lshr_b32 s13, s3, 8 +; GFX8-NEXT: s_lshl_b32 s1, s6, 24 ; GFX8-NEXT: s_or_b32 s6, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s13, s16 -; GFX8-NEXT: s_lshr_b32 s14, s3, 16 -; GFX8-NEXT: s_and_b32 s0, s3, s16 +; GFX8-NEXT: s_bfe_u32 s1, s3, s10 +; GFX8-NEXT: s_and_b32 s0, s3, s8 ; GFX8-NEXT: s_lshl_b32 s1, s1, 8 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_and_b32 s1, s14, s16 -; GFX8-NEXT: s_lshr_b32 s15, s3, 24 +; GFX8-NEXT: s_bfe_u32 s1, s3, s11 +; GFX8-NEXT: s_lshr_b32 s7, s3, 24 ; GFX8-NEXT: s_lshl_b32 s1, s1, 16 ; GFX8-NEXT: s_or_b32 s0, s0, s1 -; GFX8-NEXT: s_lshl_b32 s1, s15, 24 +; GFX8-NEXT: s_lshl_b32 s1, s7, 24 ; GFX8-NEXT: v_mov_b32_e32 v2, s4 ; GFX8-NEXT: v_mov_b32_e32 v3, s5 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX8-NEXT: s_or_b32 s7, s0, s1 ; GFX8-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX8-NEXT: v_mov_b32_e32 v5, s6 ; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s16 +; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s8 ; GFX8-NEXT: v_mov_b32_e32 v6, s7 ; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -6049,42 +5678,39 @@ ; GFX8-NEXT: v_mov_b32_e32 v3, s7 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] -; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX8-NEXT: v_mov_b32_e32 v12, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v12, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_mov_b32_e32 v12, s16 +; GFX8-NEXT: v_mov_b32_e32 v8, 8 +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v10, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX8-NEXT: v_or_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX8-NEXT: v_or_b32_e32 v0, v9, v0 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v0, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_or_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v1, v4, v1 ; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v5 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 8, v1 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 8 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX8-NEXT: v_or_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v1, v1, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v1, v5, v1 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v7 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v2 ; GFX8-NEXT: v_or_b32_sdwa v5, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v2, v2, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v3 ; GFX8-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v3, v3, v12 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_e32 v2, v5, v2 -; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v6 ; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 -; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v11 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX8-NEXT: v_or_b32_e32 v2, v2, v5 ; GFX8-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX8-NEXT: v_mov_b32_e32 v4, 0 @@ -6095,68 +5721,62 @@ ; GFX7-LABEL: insertelement_s_v16i8_v_v: ; GFX7: ; %bb.0: ; GFX7-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 -; GFX7-NEXT: s_movk_i32 s16, 0xff +; GFX7-NEXT: s_mov_b32 s10, 0x80008 +; GFX7-NEXT: s_movk_i32 s8, 0xff ; GFX7-NEXT: v_lshrrev_b32_e32 v4, 2, v1 ; GFX7-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX7-NEXT: s_waitcnt lgkmcnt(0) -; GFX7-NEXT: s_lshr_b32 s4, s0, 8 -; GFX7-NEXT: s_and_b32 s4, s4, s16 -; GFX7-NEXT: s_lshr_b32 s5, s0, 16 -; GFX7-NEXT: s_lshr_b32 s6, s0, 24 -; GFX7-NEXT: s_and_b32 s0, s0, s16 -; GFX7-NEXT: s_lshl_b32 s4, s4, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s4 -; GFX7-NEXT: s_and_b32 s4, s5, s16 -; GFX7-NEXT: s_lshl_b32 s4, s4, 16 -; GFX7-NEXT: s_or_b32 s0, s0, s4 -; GFX7-NEXT: s_lshl_b32 s4, s6, 24 -; GFX7-NEXT: s_lshr_b32 s7, s1, 8 +; GFX7-NEXT: s_bfe_u32 s11, s0, s10 +; GFX7-NEXT: s_and_b32 s9, s0, s8 +; GFX7-NEXT: s_lshl_b32 s11, s11, 8 +; GFX7-NEXT: s_or_b32 s9, s9, s11 +; GFX7-NEXT: s_mov_b32 s11, 0x80010 +; GFX7-NEXT: s_lshr_b32 s4, s0, 24 +; GFX7-NEXT: s_bfe_u32 s0, s0, s11 +; GFX7-NEXT: s_lshl_b32 s0, s0, 16 +; GFX7-NEXT: s_or_b32 s0, s9, s0 +; GFX7-NEXT: s_lshl_b32 s4, s4, 24 +; GFX7-NEXT: s_bfe_u32 s9, s1, s10 ; GFX7-NEXT: s_or_b32 s4, s0, s4 -; GFX7-NEXT: s_lshr_b32 s8, s1, 16 -; GFX7-NEXT: s_lshr_b32 s9, s1, 24 -; GFX7-NEXT: s_and_b32 s0, s1, s16 -; GFX7-NEXT: s_and_b32 s1, s7, s16 -; GFX7-NEXT: s_lshl_b32 s1, s1, 8 -; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s8, s16 +; GFX7-NEXT: s_lshr_b32 s5, s1, 24 +; GFX7-NEXT: s_and_b32 s0, s1, s8 +; GFX7-NEXT: s_bfe_u32 s1, s1, s11 +; GFX7-NEXT: s_lshl_b32 s9, s9, 8 +; GFX7-NEXT: s_or_b32 s0, s0, s9 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s9, 24 -; GFX7-NEXT: s_lshr_b32 s10, s2, 8 +; GFX7-NEXT: s_lshl_b32 s1, s5, 24 ; GFX7-NEXT: s_or_b32 s5, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s10, s16 -; GFX7-NEXT: s_lshr_b32 s11, s2, 16 -; GFX7-NEXT: s_and_b32 s0, s2, s16 +; GFX7-NEXT: s_bfe_u32 s1, s2, s10 +; GFX7-NEXT: s_and_b32 s0, s2, s8 ; GFX7-NEXT: s_lshl_b32 s1, s1, 8 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s11, s16 +; GFX7-NEXT: s_bfe_u32 s1, s2, s11 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 -; GFX7-NEXT: s_lshr_b32 s12, s2, 24 +; GFX7-NEXT: s_lshr_b32 s6, s2, 24 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s12, 24 -; GFX7-NEXT: s_lshr_b32 s13, s3, 8 +; GFX7-NEXT: s_lshl_b32 s1, s6, 24 ; GFX7-NEXT: s_or_b32 s6, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s13, s16 -; GFX7-NEXT: s_lshr_b32 s14, s3, 16 -; GFX7-NEXT: s_and_b32 s0, s3, s16 +; GFX7-NEXT: s_bfe_u32 s1, s3, s10 +; GFX7-NEXT: s_and_b32 s0, s3, s8 ; GFX7-NEXT: s_lshl_b32 s1, s1, 8 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_and_b32 s1, s14, s16 -; GFX7-NEXT: s_lshr_b32 s15, s3, 24 +; GFX7-NEXT: s_bfe_u32 s1, s3, s11 +; GFX7-NEXT: s_lshr_b32 s7, s3, 24 ; GFX7-NEXT: s_lshl_b32 s1, s1, 16 ; GFX7-NEXT: s_or_b32 s0, s0, s1 -; GFX7-NEXT: s_lshl_b32 s1, s15, 24 +; GFX7-NEXT: s_lshl_b32 s1, s7, 24 ; GFX7-NEXT: v_mov_b32_e32 v2, s4 ; GFX7-NEXT: v_mov_b32_e32 v3, s5 +; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v4 ; GFX7-NEXT: s_or_b32 s7, s0, s1 ; GFX7-NEXT: v_mov_b32_e32 v5, s6 ; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v4 ; GFX7-NEXT: v_lshlrev_b32_e32 v1, 3, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s16, v0 +; GFX7-NEXT: v_and_b32_e32 v0, s8, v0 ; GFX7-NEXT: v_lshlrev_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_lshl_b32_e32 v1, s16, v1 +; GFX7-NEXT: v_lshl_b32_e32 v1, s8, v1 ; GFX7-NEXT: v_mov_b32_e32 v6, s7 ; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] ; GFX7-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v4 @@ -6170,56 +5790,48 @@ ; GFX7-NEXT: v_mov_b32_e32 v3, s7 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v4 ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v5, s[4:5] -; GFX7-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v4 +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX7-NEXT: v_and_b32_e32 v8, s8, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_bfe_u32 v8, v1, 8, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v4 ; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v5, s[2:3] ; GFX7-NEXT: v_cndmask_b32_e64 v2, v2, v5, s[0:1] -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_and_b32_e32 v0, s16, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s16, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_and_b32_e32 v4, s8, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v8 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v5 +; GFX7-NEXT: v_bfe_u32 v5, v2, 8, 8 ; GFX7-NEXT: v_or_b32_e32 v1, v1, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s16, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v2 +; GFX7-NEXT: v_and_b32_e32 v4, s8, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; GFX7-NEXT: v_bfe_u32 v5, v3, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v13 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s16, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s16, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v4, s8, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v4, v4, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v3, v4, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: s_mov_b64 s[0:1], 0 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: s_mov_b32 s2, -1 @@ -6230,70 +5842,66 @@ ; GFX10-LABEL: insertelement_s_v16i8_v_v: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x0 +; GFX10-NEXT: s_mov_b32 s7, 0x80008 ; GFX10-NEXT: s_movk_i32 s8, 0xff +; GFX10-NEXT: s_mov_b32 s9, 0x80010 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 2, v1 ; GFX10-NEXT: v_and_b32_e32 v1, 3, v1 -; GFX10-NEXT: v_mov_b32_e32 v9, 8 +; GFX10-NEXT: v_mov_b32_e32 v10, 8 +; GFX10-NEXT: v_mov_b32_e32 v12, 16 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 3, v1 ; GFX10-NEXT: v_lshlrev_b32_e64 v3, v1, s8 ; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_xor_b32_e32 v1, -1, v3 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_lshr_b32 s4, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s0, 16 -; GFX10-NEXT: s_lshr_b32 s7, s1, 8 -; GFX10-NEXT: s_and_b32 s4, s4, s8 -; GFX10-NEXT: s_lshr_b32 s6, s0, 24 -; GFX10-NEXT: s_lshr_b32 s9, s1, 16 -; GFX10-NEXT: s_and_b32 s5, s5, s8 -; GFX10-NEXT: s_and_b32 s7, s7, s8 -; GFX10-NEXT: s_and_b32 s0, s0, s8 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_and_b32 s9, s9, s8 -; GFX10-NEXT: s_lshr_b32 s10, s1, 24 -; GFX10-NEXT: s_and_b32 s1, s1, s8 -; GFX10-NEXT: s_lshl_b32 s7, s7, 8 -; GFX10-NEXT: s_lshl_b32 s5, s5, 16 -; GFX10-NEXT: s_or_b32 s0, s0, s4 -; GFX10-NEXT: s_lshr_b32 s11, s2, 8 -; GFX10-NEXT: s_lshl_b32 s6, s6, 24 -; GFX10-NEXT: s_or_b32 s0, s0, s5 -; GFX10-NEXT: s_lshl_b32 s9, s9, 16 -; GFX10-NEXT: s_or_b32 s1, s1, s7 -; GFX10-NEXT: s_lshr_b32 s12, s2, 16 -; GFX10-NEXT: s_or_b32 s4, s0, s6 -; GFX10-NEXT: s_or_b32 s0, s1, s9 -; GFX10-NEXT: s_and_b32 s1, s11, s8 -; GFX10-NEXT: s_lshr_b32 s13, s2, 24 -; GFX10-NEXT: s_and_b32 s2, s2, s8 -; GFX10-NEXT: s_lshl_b32 s1, s1, 8 -; GFX10-NEXT: s_and_b32 s5, s12, s8 -; GFX10-NEXT: s_or_b32 s1, s2, s1 -; GFX10-NEXT: s_lshl_b32 s2, s5, 16 -; GFX10-NEXT: s_lshl_b32 s5, s10, 24 -; GFX10-NEXT: s_lshr_b32 s14, s3, 8 -; GFX10-NEXT: s_or_b32 s5, s0, s5 -; GFX10-NEXT: s_and_b32 s0, s14, s8 -; GFX10-NEXT: s_or_b32 s1, s1, s2 -; GFX10-NEXT: s_lshl_b32 s2, s13, 24 -; GFX10-NEXT: s_lshr_b32 s15, s3, 16 -; GFX10-NEXT: s_or_b32 s6, s1, s2 +; GFX10-NEXT: s_bfe_u32 s12, s0, s7 +; GFX10-NEXT: s_lshr_b32 s4, s0, 24 +; GFX10-NEXT: s_and_b32 s11, s0, s8 +; GFX10-NEXT: s_bfe_u32 s0, s0, s9 +; GFX10-NEXT: s_lshl_b32 s12, s12, 8 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_or_b32 s11, s11, s12 +; GFX10-NEXT: s_bfe_u32 s16, s2, s7 +; GFX10-NEXT: s_lshl_b32 s4, s4, 24 +; GFX10-NEXT: s_or_b32 s0, s11, s0 +; GFX10-NEXT: s_bfe_u32 s14, s1, s7 +; GFX10-NEXT: s_or_b32 s4, s0, s4 +; GFX10-NEXT: s_bfe_u32 s0, s2, s9 +; GFX10-NEXT: s_and_b32 s15, s2, s8 +; GFX10-NEXT: s_lshl_b32 s16, s16, 8 +; GFX10-NEXT: s_lshr_b32 s5, s1, 24 +; GFX10-NEXT: s_and_b32 s13, s1, s8 +; GFX10-NEXT: s_bfe_u32 s1, s1, s9 +; GFX10-NEXT: s_lshl_b32 s14, s14, 8 +; GFX10-NEXT: s_lshr_b32 s6, s2, 24 +; GFX10-NEXT: s_or_b32 s2, s15, s16 +; GFX10-NEXT: s_lshl_b32 s0, s0, 16 +; GFX10-NEXT: s_lshl_b32 s1, s1, 16 +; GFX10-NEXT: s_or_b32 s12, s13, s14 +; GFX10-NEXT: s_or_b32 s0, s2, s0 +; GFX10-NEXT: s_lshl_b32 s2, s6, 24 +; GFX10-NEXT: s_or_b32 s1, s12, s1 +; GFX10-NEXT: s_lshl_b32 s5, s5, 24 +; GFX10-NEXT: s_or_b32 s6, s0, s2 +; GFX10-NEXT: s_bfe_u32 s0, s3, s7 +; GFX10-NEXT: s_or_b32 s5, s1, s5 ; GFX10-NEXT: s_and_b32 s1, s3, s8 ; GFX10-NEXT: s_lshl_b32 s0, s0, 8 ; GFX10-NEXT: v_mov_b32_e32 v2, s5 ; GFX10-NEXT: s_or_b32 s0, s1, s0 -; GFX10-NEXT: s_and_b32 s1, s15, s8 -; GFX10-NEXT: s_lshr_b32 s16, s3, 24 +; GFX10-NEXT: s_bfe_u32 s1, s3, s9 +; GFX10-NEXT: s_lshr_b32 s10, s3, 24 ; GFX10-NEXT: s_lshl_b32 s1, s1, 16 ; GFX10-NEXT: v_cndmask_b32_e32 v2, s4, v2, vcc_lo ; GFX10-NEXT: s_or_b32 s1, s0, s1 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 2, v4 -; GFX10-NEXT: s_lshl_b32 s2, s16, 24 +; GFX10-NEXT: s_lshl_b32 s2, s10, 24 +; GFX10-NEXT: v_xor_b32_e32 v1, -1, v3 ; GFX10-NEXT: s_or_b32 s7, s1, s2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v4 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s6, s0 ; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v4 +; GFX10-NEXT: s_mov_b32 s3, 8 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s7, s1 ; GFX10-NEXT: v_and_or_b32 v5, v2, v1, v0 ; GFX10-NEXT: v_mov_b32_e32 v0, s4 @@ -6304,36 +5912,32 @@ ; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v5, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v5, s1 -; GFX10-NEXT: s_mov_b32 s2, 8 -; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v4, s2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v6, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v9, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX10-NEXT: v_and_or_b32 v4, v0, s8, v4 -; GFX10-NEXT: v_and_b32_sdwa v0, v0, s8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: s_mov_b32 s2, 16 +; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v6, s3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v10, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v2 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v6, v0, s8, v6 +; GFX10-NEXT: v_and_or_b32 v9, v1, s8, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 -; GFX10-NEXT: v_and_or_b32 v6, v1, s8, v6 -; GFX10-NEXT: v_and_or_b32 v8, v2, s8, v8 -; GFX10-NEXT: v_and_or_b32 v9, v3, s8, v9 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_and_or_b32 v11, v2, s8, v11 +; GFX10-NEXT: v_and_or_b32 v10, v3, s8, v10 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v12, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_and_b32_sdwa v2, v2, s8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_and_b32_sdwa v3, v3, s8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX10-NEXT: v_or3_b32 v0, v4, v0, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX10-NEXT: v_or3_b32 v0, v6, v0, v4 +; GFX10-NEXT: v_or3_b32 v1, v9, v1, v5 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 -; GFX10-NEXT: v_or3_b32 v1, v6, v1, v7 -; GFX10-NEXT: v_or3_b32 v2, v8, v2, v10 -; GFX10-NEXT: v_or3_b32 v3, v9, v3, v11 +; GFX10-NEXT: v_or3_b32 v2, v11, v2, v7 +; GFX10-NEXT: v_or3_b32 v3, v10, v3, v8 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -6348,85 +5952,79 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx4 v[3:6], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_mov_b32_e32 v0, 8 ; GFX9-NEXT: s_movk_i32 s6, 0xff -; GFX9-NEXT: v_lshrrev_b32_e32 v1, 2, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 2, v2 +; GFX9-NEXT: v_mov_b32_e32 v1, 16 ; GFX9-NEXT: v_and_b32_e32 v2, 3, v2 -; GFX9-NEXT: s_and_b32 s1, s2, s6 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v7 +; GFX9-NEXT: s_and_b32 s2, s2, s6 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1 -; GFX9-NEXT: v_lshlrev_b32_e64 v7, v2, s1 +; GFX9-NEXT: v_lshlrev_b32_e64 v8, v2, s2 ; GFX9-NEXT: v_lshlrev_b32_e64 v2, v2, s6 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v1 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v7 ; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v1 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v7 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v4 ; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, s0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX9-NEXT: v_and_b32_sdwa v16, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v17, v4, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v17, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v13 ; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v8 +; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_sdwa v18, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v19, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v5, v5, s6, v17 ; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v10 -; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 24, v6 -; GFX9-NEXT: v_and_b32_sdwa v18, v5, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX9-NEXT: v_and_or_b32 v5, v5, s6, v12 -; GFX9-NEXT: v_lshlrev_b32_sdwa v14, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or3_b32 v3, v3, v16, v9 -; GFX9-NEXT: v_or3_b32 v4, v4, v17, v11 -; GFX9-NEXT: v_and_b32_sdwa v19, v6, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v15, 24, v15 -; GFX9-NEXT: v_and_or_b32 v6, v6, s6, v14 -; GFX9-NEXT: v_or3_b32 v5, v5, v18, v13 -; GFX9-NEXT: v_cndmask_b32_e32 v8, v3, v4, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v1 -; GFX9-NEXT: v_or3_b32 v6, v6, v19, v15 -; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v5, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v6, s[2:3] -; GFX9-NEXT: v_and_or_b32 v2, v8, v2, v7 -; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v2, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v2, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v5, v2, s[0:1] +; GFX9-NEXT: v_or3_b32 v3, v3, v14, v9 +; GFX9-NEXT: v_or3_b32 v4, v4, v16, v10 +; GFX9-NEXT: v_and_or_b32 v13, v6, s6, v19 +; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; GFX9-NEXT: v_or3_b32 v5, v5, v18, v11 +; GFX9-NEXT: v_cndmask_b32_e32 v9, v3, v4, vcc +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v7 +; GFX9-NEXT: v_or3_b32 v6, v13, v6, v12 +; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v5, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v6, s[2:3] +; GFX9-NEXT: v_and_or_b32 v2, v9, v2, v8 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v2, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[2:3] -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v13, v1, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v14, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v15, v4, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v1, v1, s6, v5 -; GFX9-NEXT: v_and_b32_sdwa v16, v2, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX9-NEXT: v_and_or_b32 v5, v2, s6, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v7 -; GFX9-NEXT: v_or3_b32 v0, v1, v13, v6 -; GFX9-NEXT: v_or3_b32 v1, v3, v14, v8 -; GFX9-NEXT: v_or3_b32 v2, v4, v15, v10 -; GFX9-NEXT: v_or3_b32 v3, v5, v16, v11 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v1, v3, s6, v10 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v8 +; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v12 +; GFX9-NEXT: v_and_or_b32 v5, v5, s6, v14 +; GFX9-NEXT: v_and_or_b32 v8, v2, s6, v0 +; GFX9-NEXT: v_or3_b32 v0, v1, v11, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX9-NEXT: v_or3_b32 v1, v4, v13, v6 +; GFX9-NEXT: v_or3_b32 v2, v5, v15, v7 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_or3_b32 v3, v8, v16, v9 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm @@ -6434,96 +6032,89 @@ ; GFX8-LABEL: insertelement_v_v16i8_s_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx4 v[3:6], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v7, s0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 2, v2 +; GFX8-NEXT: v_mov_b32_e32 v7, 8 +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 2, v2 ; GFX8-NEXT: v_and_b32_e32 v2, 3, v2 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_and_b32 s1, s2, s0 ; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v2 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 -; GFX8-NEXT: v_lshlrev_b32_e64 v9, v2, s1 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v9 +; GFX8-NEXT: v_lshlrev_b32_e64 v10, v2, s1 ; GFX8-NEXT: v_lshlrev_b32_e64 v2, v2, s0 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v8 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v8 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v9 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v9 ; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v8 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v9 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 8, v5 -; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v16, 8, v6 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v4 -; GFX8-NEXT: v_and_b32_sdwa v18, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v4, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v19, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v5 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v19 -; GFX8-NEXT: v_and_b32_sdwa v10, v5, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v5, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v18 -; GFX8-NEXT: v_lshrrev_b32_e32 v17, 24, v6 -; GFX8-NEXT: v_or_b32_sdwa v5, v6, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v6, v6, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v15 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v10 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v11 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v12 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v17 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v13 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v0, v3, vcc -; GFX8-NEXT: v_or_b32_e32 v5, v5, v14 -; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v4, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v5, s[2:3] -; GFX8-NEXT: v_and_b32_e32 v2, v6, v2 -; GFX8-NEXT: v_or_b32_e32 v2, v2, v9 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v15, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v16, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v14, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v7, v2, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v2, v3, v15 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v16 -; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX8-NEXT: v_or_b32_e32 v4, v1, v7 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v18, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v19, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_or_b32_sdwa v4, v5, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v12 ; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_or_b32_e32 v1, v2, v9 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v11 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v12 -; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v16 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 24, v6 +; GFX8-NEXT: v_or_b32_sdwa v5, v6, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v6, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_e32 v1, v4, v18 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v11 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v12 +; GFX8-NEXT: v_or_b32_e32 v4, v5, v6 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v13 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v0, vcc +; GFX8-NEXT: v_or_b32_e32 v4, v4, v14 +; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v1, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v4, s[2:3] +; GFX8-NEXT: v_and_b32_e32 v2, v5, v2 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v2, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, v2, s[2:3] +; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v2, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v11 +; GFX8-NEXT: v_or_b32_e32 v9, v0, v13 +; GFX8-NEXT: v_or_b32_e32 v10, v1, v15 ; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v14 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_or_b32_e32 v8, v2, v8 +; GFX8-NEXT: v_or_b32_e32 v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v9, v5 +; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_or_b32_e32 v2, v10, v6 +; GFX8-NEXT: v_or_b32_e32 v3, v8, v7 ; GFX8-NEXT: v_mov_b32_e32 v5, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NEXT: s_endpgm @@ -6548,110 +6139,94 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v17 ; GFX7-NEXT: s_mov_b32 s10, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 8, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_and_b32_e32 v8, s6, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_and_b32_e32 v9, s6, v9 -; GFX7-NEXT: v_and_b32_e32 v11, s6, v11 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v6 -; GFX7-NEXT: v_and_b32_e32 v12, s6, v12 -; GFX7-NEXT: v_and_b32_e32 v14, s6, v14 -; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 24, v6 -; GFX7-NEXT: v_and_b32_e32 v15, s6, v15 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_and_b32_e32 v6, s6, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX7-NEXT: v_or_b32_e32 v1, v3, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v11 +; GFX7-NEXT: v_bfe_u32 v10, v3, 8, 8 +; GFX7-NEXT: v_bfe_u32 v12, v4, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v4 +; GFX7-NEXT: v_bfe_u32 v14, v5, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v9, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v11, s6, v4 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v5 +; GFX7-NEXT: v_bfe_u32 v16, v6, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v13, s6, v5 +; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 8 +; GFX7-NEXT: v_or_b32_e32 v9, v9, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_or_b32_e32 v10, v11, v12 ; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v12 -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v14 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v7 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v10 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v13 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v6 +; GFX7-NEXT: v_and_b32_e32 v15, s6, v6 +; GFX7-NEXT: v_bfe_u32 v6, v6, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v16, 8, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v3, v9, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v4, v10, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_or_b32_e32 v11, v13, v14 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_or_b32_e32 v12, v15, v16 +; GFX7-NEXT: v_or_b32_e32 v5, v11, v5 +; GFX7-NEXT: v_or_b32_e32 v0, v3, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v4, v1 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v7 ; GFX7-NEXT: v_cndmask_b32_e32 v5, v0, v1, vcc -; GFX7-NEXT: v_or_b32_e32 v4, v4, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX7-NEXT: v_or_b32_e32 v6, v12, v6 +; GFX7-NEXT: v_or_b32_e32 v4, v6, v8 ; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v3, s[0:1] ; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v4, s[2:3] ; GFX7-NEXT: v_and_b32_e32 v2, v5, v2 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v18 -; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v2, s[0:1] +; GFX7-NEXT: v_bfe_u32 v9, v0, 8, 8 +; GFX7-NEXT: v_bfe_u32 v11, v1, 8, 8 ; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v2, s[2:3] -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v10 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v11 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_bfe_u32 v13, v3, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v8, s6, v0 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v12, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v9, v10, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v8, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v9, v1 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v10, v12, v13 +; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX7-NEXT: v_bfe_u32 v5, v4, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v10, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v4 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4 ; GFX7-NEXT: v_and_b32_e32 v3, s6, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v13 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v5 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; GFX7-NEXT: s_endpgm @@ -6659,86 +6234,80 @@ ; GFX10-LABEL: insertelement_v_v16i8_s_v: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx4 v[3:6], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v1, 8 ; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: v_mov_b32_e32 v1, 8 +; GFX10-NEXT: s_mov_b32 s1, 16 ; GFX10-NEXT: s_movk_i32 s3, 0xff ; GFX10-NEXT: v_and_b32_e32 v0, 3, v2 +; GFX10-NEXT: v_mov_b32_e32 v7, 16 ; GFX10-NEXT: v_lshrrev_b32_e32 v2, 2, v2 -; GFX10-NEXT: s_and_b32 s1, s2, s3 ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4 ; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v5 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, s0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v15, v3, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v16, v4, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v5 -; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v7 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, s1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v12 ; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_lshrrev_b32_e32 v13, 8, v6 -; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v14 +; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX10-NEXT: v_lshlrev_b32_sdwa v17, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v6 +; GFX10-NEXT: v_or3_b32 v3, v3, v13, v8 +; GFX10-NEXT: v_lshlrev_b32_sdwa v18, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_or3_b32 v4, v4, v15, v9 +; GFX10-NEXT: v_and_or_b32 v5, v5, s3, v16 ; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v1, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v17, v5, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v6 -; GFX10-NEXT: v_or3_b32 v3, v3, v15, v8 -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v12 -; GFX10-NEXT: v_and_or_b32 v5, v5, s3, v11 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v1, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_or3_b32 v4, v4, v16, v10 -; GFX10-NEXT: v_and_b32_sdwa v18, v6, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v14 -; GFX10-NEXT: v_or3_b32 v5, v5, v17, v7 -; GFX10-NEXT: v_and_or_b32 v6, v6, s3, v9 -; GFX10-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v6, v6, s3, v18 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v11 +; GFX10-NEXT: v_cndmask_b32_e32 v9, v3, v4, vcc_lo +; GFX10-NEXT: v_or3_b32 v5, v5, v17, v10 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 2, v2 -; GFX10-NEXT: v_lshlrev_b32_e64 v9, v0, s3 +; GFX10-NEXT: s_and_b32 s1, s2, s3 +; GFX10-NEXT: v_lshlrev_b32_e64 v10, v0, s3 +; GFX10-NEXT: v_or3_b32 v6, v6, v12, v8 ; GFX10-NEXT: v_lshlrev_b32_e64 v0, v0, s1 -; GFX10-NEXT: v_or3_b32 v6, v6, v18, v8 +; GFX10-NEXT: v_cndmask_b32_e64 v8, v9, v5, s0 ; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v2 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v5, s0 -; GFX10-NEXT: v_xor_b32_e32 v8, -1, v9 -; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v6, s1 -; GFX10-NEXT: v_and_or_b32 v0, v7, v8, v0 +; GFX10-NEXT: v_xor_b32_e32 v9, -1, v10 +; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v2 +; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v6, s1 +; GFX10-NEXT: v_and_or_b32 v0, v8, v9, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v3, v0, s2 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v0, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, v0, s0 ; GFX10-NEXT: v_cndmask_b32_e64 v0, v6, v0, s1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v13, v2, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v14, v3, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v15, v4, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v2, v2, s3, v5 -; GFX10-NEXT: v_and_b32_sdwa v16, v0, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v5, v0, s3, v1 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v7, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v2, v2, s3, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v14 ; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v7 -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v9 -; GFX10-NEXT: v_or3_b32 v0, v2, v13, v6 -; GFX10-NEXT: v_or3_b32 v1, v3, v14, v8 -; GFX10-NEXT: v_or3_b32 v3, v5, v16, v11 -; GFX10-NEXT: v_or3_b32 v2, v4, v15, v10 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v10, v0, s3, v1 +; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX10-NEXT: v_or3_b32 v0, v2, v11, v5 +; GFX10-NEXT: v_or3_b32 v2, v4, v15, v8 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 +; GFX10-NEXT: v_or3_b32 v1, v3, v13, v6 +; GFX10-NEXT: v_or3_b32 v3, v10, v7, v9 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -6752,85 +6321,79 @@ ; GFX9-LABEL: insertelement_v_v16i8_v_s: ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx4 v[3:6], v[0:1], off -; GFX9-NEXT: s_and_b32 s1, s2, 3 -; GFX9-NEXT: s_lshl_b32 s1, s1, 3 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_mov_b32_e32 v0, 8 ; GFX9-NEXT: s_movk_i32 s6, 0xff +; GFX9-NEXT: v_mov_b32_e32 v1, 16 ; GFX9-NEXT: s_lshr_b32 s4, s2, 2 -; GFX9-NEXT: s_lshl_b32 s1, s6, s1 +; GFX9-NEXT: s_and_b32 s2, s2, 3 +; GFX9-NEXT: s_lshl_b32 s2, s2, 3 ; GFX9-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 -; GFX9-NEXT: s_not_b32 s5, s1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX9-NEXT: s_lshl_b32 s2, s6, s2 +; GFX9-NEXT: s_not_b32 s5, s2 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v2, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v4 -; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v5 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v6 -; GFX9-NEXT: v_and_b32_sdwa v14, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v2, v3, s6, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v4 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, s1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v11 ; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_and_b32_sdwa v15, v4, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v17, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v5, v5, s6, v15 ; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_and_or_b32 v3, v4, s6, v8 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v6 -; GFX9-NEXT: v_and_b32_sdwa v16, v5, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_and_or_b32 v4, v5, s6, v10 -; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_or3_b32 v2, v2, v14, v7 -; GFX9-NEXT: v_or3_b32 v3, v3, v15, v9 -; GFX9-NEXT: v_and_b32_sdwa v17, v6, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v5, v6, s6, v12 -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX9-NEXT: v_or3_b32 v4, v4, v16, v11 -; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v3, vcc +; GFX9-NEXT: v_or3_b32 v3, v3, v12, v7 +; GFX9-NEXT: v_or3_b32 v4, v4, v14, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v18, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v6, v6, s6, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_or3_b32 v5, v5, v16, v9 +; GFX9-NEXT: v_cndmask_b32_e32 v7, v3, v4, vcc ; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], s4, 2 -; GFX9-NEXT: v_or3_b32 v5, v5, v17, v13 -; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v4, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v5, s[2:3] -; GFX9-NEXT: v_and_or_b32 v1, v6, s5, v1 +; GFX9-NEXT: v_or3_b32 v6, v6, v18, v10 +; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v5, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, v6, s[2:3] +; GFX9-NEXT: v_and_or_b32 v2, v7, s5, v2 ; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 -; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v1, vcc -; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v1, s[0:1] -; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v1, s[4:5] -; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, v1, s[2:3] -; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v1 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v2 -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v1 -; GFX9-NEXT: v_lshlrev_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v13, v2, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v14, v3, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v15, v4, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v2, v2, s6, v5 -; GFX9-NEXT: v_and_b32_sdwa v16, v1, s6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX9-NEXT: v_and_or_b32 v5, v1, s6, v0 -; GFX9-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX9-NEXT: v_and_or_b32 v3, v3, s6, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v9 -; GFX9-NEXT: v_or3_b32 v0, v2, v13, v6 -; GFX9-NEXT: v_or3_b32 v1, v3, v14, v8 -; GFX9-NEXT: v_or3_b32 v2, v4, v15, v10 -; GFX9-NEXT: v_or3_b32 v3, v5, v16, v11 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc +; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v2, s[0:1] +; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, v2, s[4:5] +; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[2:3] +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v8, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v1, v3, s6, v10 +; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v7 +; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v8 +; GFX9-NEXT: v_and_or_b32 v4, v4, s6, v12 +; GFX9-NEXT: v_and_or_b32 v5, v5, s6, v14 +; GFX9-NEXT: v_and_or_b32 v8, v2, s6, v0 +; GFX9-NEXT: v_or3_b32 v0, v1, v11, v3 +; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX9-NEXT: v_or3_b32 v1, v4, v13, v6 +; GFX9-NEXT: v_or3_b32 v2, v5, v15, v7 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: v_or3_b32 v3, v8, v16, v9 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm @@ -6839,95 +6402,88 @@ ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx4 v[3:6], v[0:1] ; GFX8-NEXT: s_and_b32 s1, s2, 3 -; GFX8-NEXT: s_lshl_b32 s1, s1, 3 -; GFX8-NEXT: v_mov_b32_e32 v8, s1 -; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: v_mov_b32_e32 v0, 8 -; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v7, s0 +; GFX8-NEXT: s_lshl_b32 s1, s1, 3 +; GFX8-NEXT: v_mov_b32_e32 v7, 8 +; GFX8-NEXT: v_mov_b32_e32 v1, 16 +; GFX8-NEXT: v_mov_b32_e32 v9, s1 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 +; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX8-NEXT: s_lshr_b32 s4, s2, 2 +; GFX8-NEXT: s_movk_i32 s0, 0xff ; GFX8-NEXT: s_lshl_b32 s0, s0, s1 ; GFX8-NEXT: v_cmp_eq_u32_e64 vcc, s4, 1 ; GFX8-NEXT: s_not_b32 s5, s0 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], s4, 2 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v3 +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 ; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v4 -; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v8, 24, v9 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v10, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v10 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v1 +; GFX8-NEXT: v_or_b32_sdwa v5, v5, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v6 +; GFX8-NEXT: v_lshlrev_b32_sdwa v18, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v14 +; GFX8-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD ; GFX8-NEXT: v_lshlrev_b32_e32 v10, 24, v11 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX8-NEXT: v_and_b32_sdwa v16, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v17, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v4, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v0, v16 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v17 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_and_b32_sdwa v18, v5, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v5, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v18 -; GFX8-NEXT: v_lshrrev_b32_e32 v15, 24, v6 -; GFX8-NEXT: v_and_b32_sdwa v19, v6, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v5, v6, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v0, v0, v8 -; GFX8-NEXT: v_or_b32_e32 v3, v3, v10 -; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v15 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v19 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v12 -; GFX8-NEXT: v_cndmask_b32_e32 v6, v0, v3, vcc -; GFX8-NEXT: v_or_b32_e32 v5, v5, v14 -; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v4, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v5, s[2:3] -; GFX8-NEXT: v_and_b32_e32 v6, s5, v6 -; GFX8-NEXT: v_or_b32_e32 v2, v6, v2 +; GFX8-NEXT: v_or_b32_e32 v1, v5, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v12 +; GFX8-NEXT: v_or_b32_e32 v5, v6, v18 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v4 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v9 +; GFX8-NEXT: v_or_b32_e32 v4, v5, v11 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v10 +; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v1, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v4, s[2:3] +; GFX8-NEXT: v_and_b32_e32 v5, s5, v5 +; GFX8-NEXT: v_or_b32_e32 v2, v5, v2 ; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v5, v2, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v0 -; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v15, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v16, v4, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v14, v0, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v7, v2, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v3, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX8-NEXT: v_or_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_e32 v2, v3, v15 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v16 -; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX8-NEXT: v_or_b32_e32 v4, v1, v7 -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_or_b32_e32 v1, v2, v9 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v11 -; GFX8-NEXT: v_or_b32_e32 v3, v4, v12 -; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, v2, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, v2, s[2:3] +; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v2, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v9 +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v3, v3, v11 +; GFX8-NEXT: v_or_b32_e32 v9, v0, v13 +; GFX8-NEXT: v_or_b32_e32 v10, v1, v15 ; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v14 -; GFX8-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX8-NEXT: v_or_b32_e32 v8, v2, v8 +; GFX8-NEXT: v_or_b32_e32 v0, v3, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v9, v5 +; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_or_b32_e32 v2, v10, v6 +; GFX8-NEXT: v_or_b32_e32 v3, v8, v7 ; GFX8-NEXT: v_mov_b32_e32 v5, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NEXT: s_endpgm @@ -6951,111 +6507,95 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 s[2:3], s4, 3 ; GFX7-NEXT: s_mov_b32 s10, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 8, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 8, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 8, v5 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_and_b32_e32 v8, s6, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 8, v6 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_and_b32_e32 v9, s6, v9 -; GFX7-NEXT: v_and_b32_e32 v11, s6, v11 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v8, 8, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 24, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v6 -; GFX7-NEXT: v_and_b32_e32 v12, s6, v12 -; GFX7-NEXT: v_and_b32_e32 v14, s6, v14 -; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 24, v6 -; GFX7-NEXT: v_and_b32_e32 v15, s6, v15 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX7-NEXT: v_and_b32_e32 v6, s6, v6 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX7-NEXT: v_or_b32_e32 v2, v3, v9 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v12 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v11 +; GFX7-NEXT: v_bfe_u32 v10, v3, 8, 8 +; GFX7-NEXT: v_bfe_u32 v12, v4, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v3 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v4 +; GFX7-NEXT: v_bfe_u32 v14, v5, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v9, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v11, s6, v4 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v5 +; GFX7-NEXT: v_bfe_u32 v16, v6, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v13, s6, v5 +; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 8 +; GFX7-NEXT: v_or_b32_e32 v9, v9, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_or_b32_e32 v10, v11, v12 ; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v13, 24, v13 -; GFX7-NEXT: v_or_b32_e32 v3, v4, v12 -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v15 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v14 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v7 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v10 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v16, 24, v16 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v13 +; GFX7-NEXT: v_lshrrev_b32_e32 v8, 24, v6 +; GFX7-NEXT: v_and_b32_e32 v15, s6, v6 +; GFX7-NEXT: v_bfe_u32 v6, v6, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v16, 8, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v3, v9, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v4, v10, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_or_b32_e32 v11, v13, v14 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_or_b32_e32 v12, v15, v16 +; GFX7-NEXT: v_or_b32_e32 v5, v11, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v3, v1 +; GFX7-NEXT: v_or_b32_e32 v2, v4, v2 +; GFX7-NEXT: v_or_b32_e32 v3, v5, v7 ; GFX7-NEXT: v_cndmask_b32_e32 v5, v1, v2, vcc -; GFX7-NEXT: v_or_b32_e32 v4, v4, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX7-NEXT: v_or_b32_e32 v6, v12, v6 +; GFX7-NEXT: v_or_b32_e32 v4, v6, v8 ; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v3, s[0:1] ; GFX7-NEXT: v_cndmask_b32_e64 v5, v5, v4, s[2:3] ; GFX7-NEXT: v_and_b32_e32 v5, s5, v5 ; GFX7-NEXT: v_or_b32_e32 v0, v5, v0 ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], s4, 0 -; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v0, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc +; GFX7-NEXT: v_cndmask_b32_e64 v1, v1, v0, s[4:5] ; GFX7-NEXT: v_cndmask_b32_e64 v3, v3, v0, s[0:1] +; GFX7-NEXT: v_bfe_u32 v9, v1, 8, 8 +; GFX7-NEXT: v_bfe_u32 v11, v2, 8, 8 ; GFX7-NEXT: v_cndmask_b32_e64 v4, v4, v0, s[2:3] -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v0, s6, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v2 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 -; GFX7-NEXT: v_and_b32_e32 v1, s6, v7 -; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v2 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 8, v1 -; GFX7-NEXT: v_or_b32_e32 v1, v2, v1 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v1 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v2 +; GFX7-NEXT: v_bfe_u32 v13, v3, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v8, s6, v1 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v10, s6, v2 +; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v11, 8, v11 +; GFX7-NEXT: v_or_b32_e32 v8, v8, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v12, s6, v3 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 ; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v9 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v2, s6, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v10 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_and_b32_e32 v3, s6, v11 +; GFX7-NEXT: v_or_b32_e32 v9, v10, v11 +; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v8, v1 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 ; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v10, v12, v13 +; GFX7-NEXT: v_or_b32_e32 v2, v9, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v1, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v2, v5 +; GFX7-NEXT: v_bfe_u32 v5, v4, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v10, v3 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v12 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v4 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v4 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4 ; GFX7-NEXT: v_and_b32_e32 v3, s6, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v13 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v5, s6, v5 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, s6, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v5 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v15 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; GFX7-NEXT: s_endpgm @@ -7063,85 +6603,79 @@ ; GFX10-LABEL: insertelement_v_v16i8_v_s: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx4 v[3:6], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v0, 8 ; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: v_mov_b32_e32 v0, 8 +; GFX10-NEXT: s_mov_b32 s1, 16 ; GFX10-NEXT: s_movk_i32 s3, 0xff +; GFX10-NEXT: v_mov_b32_e32 v1, 16 ; GFX10-NEXT: s_lshr_b32 s4, s2, 2 -; GFX10-NEXT: s_and_b32 s1, s2, 3 ; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_lo, s4, 1 -; GFX10-NEXT: s_lshl_b32 s2, s1, 3 -; GFX10-NEXT: v_cmp_eq_u32_e64 s1, s4, 3 -; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: s_lshl_b32 s2, s3, s2 -; GFX10-NEXT: s_not_b32 s2, s2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v1, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 8, v4 ; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v5 -; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v0, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v5 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 8, v6 -; GFX10-NEXT: v_and_b32_sdwa v14, v3, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v1, v3, s3, v1 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, s0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, s1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v11 ; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX10-NEXT: v_and_or_b32 v3, v4, s3, v8 -; GFX10-NEXT: v_and_b32_sdwa v15, v4, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v13 +; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v6 +; GFX10-NEXT: v_or3_b32 v3, v3, v12, v7 +; GFX10-NEXT: v_lshlrev_b32_sdwa v17, v0, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_or3_b32 v4, v4, v14, v8 +; GFX10-NEXT: v_and_or_b32 v5, v5, s3, v15 ; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v16, v5, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v13, 24, v6 -; GFX10-NEXT: v_or3_b32 v1, v1, v14, v7 -; GFX10-NEXT: v_lshlrev_b32_e32 v4, 24, v11 -; GFX10-NEXT: v_and_or_b32 v5, v5, s3, v10 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_or3_b32 v3, v3, v15, v9 -; GFX10-NEXT: v_and_b32_sdwa v17, v6, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v13 -; GFX10-NEXT: v_or3_b32 v4, v5, v16, v4 -; GFX10-NEXT: v_and_or_b32 v6, v6, s3, v8 -; GFX10-NEXT: v_cndmask_b32_e32 v5, v1, v3, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v6, v6, s3, v17 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v10 +; GFX10-NEXT: v_cndmask_b32_e32 v8, v3, v4, vcc_lo +; GFX10-NEXT: v_or3_b32 v5, v5, v16, v9 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, s4, 2 -; GFX10-NEXT: v_or3_b32 v6, v6, v17, v7 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v4, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v6, s1 -; GFX10-NEXT: v_and_or_b32 v2, v5, s2, v2 +; GFX10-NEXT: s_and_b32 s1, s2, 3 +; GFX10-NEXT: v_or3_b32 v6, v6, v11, v7 +; GFX10-NEXT: s_lshl_b32 s2, s1, 3 +; GFX10-NEXT: v_cmp_eq_u32_e64 s1, s4, 3 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v8, v5, s0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX10-NEXT: s_lshl_b32 s2, s3, s2 +; GFX10-NEXT: s_not_b32 s2, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, v6, s1 +; GFX10-NEXT: v_and_or_b32 v2, v7, s2, v2 ; GFX10-NEXT: v_cmp_eq_u32_e64 s2, s4, 0 -; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v2, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, v2, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v2, s2 +; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc_lo +; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v2, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, v2, s2 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v2, s1 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v2 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v0, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v13, v1, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v14, v3, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v15, v4, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v1, v1, s3, v5 -; GFX10-NEXT: v_and_b32_sdwa v16, v2, s3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v5, v2, s3, v0 -; GFX10-NEXT: v_lshlrev_b32_e32 v8, 24, v8 -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v9 -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX10-NEXT: v_and_or_b32 v3, v3, s3, v7 -; GFX10-NEXT: v_or3_b32 v0, v1, v13, v6 -; GFX10-NEXT: v_or3_b32 v2, v4, v15, v10 -; GFX10-NEXT: v_or3_b32 v1, v3, v14, v8 -; GFX10-NEXT: v_or3_b32 v3, v5, v16, v11 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX10-NEXT: v_lshrrev_b32_e32 v8, 24, v5 +; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v1, v3, s3, v10 +; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v8 +; GFX10-NEXT: v_and_or_b32 v4, v4, s3, v12 +; GFX10-NEXT: v_and_or_b32 v5, v5, s3, v14 +; GFX10-NEXT: v_and_or_b32 v8, v2, s3, v0 +; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX10-NEXT: v_or3_b32 v0, v1, v11, v3 +; GFX10-NEXT: v_or3_b32 v1, v4, v13, v6 +; GFX10-NEXT: v_or3_b32 v2, v5, v15, v7 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 +; GFX10-NEXT: v_or3_b32 v3, v8, v16, v9 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm @@ -7156,85 +6690,79 @@ ; GFX9: ; %bb.0: ; GFX9-NEXT: global_load_dwordx4 v[4:7], v[0:1], off ; GFX9-NEXT: s_mov_b32 s0, 8 +; GFX9-NEXT: s_mov_b32 s1, 16 ; GFX9-NEXT: v_mov_b32_e32 v1, 8 -; GFX9-NEXT: s_movk_i32 s1, 0xff -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 2, v3 +; GFX9-NEXT: s_movk_i32 s2, 0xff +; GFX9-NEXT: v_mov_b32_e32 v8, 16 ; GFX9-NEXT: v_mov_b32_e32 v0, 0xff +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, s0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, s1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v5, v5, s2, v15 +; GFX9-NEXT: v_lshrrev_b32_e32 v15, 2, v3 +; GFX9-NEXT: v_and_or_b32 v4, v4, s2, v13 +; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_lshlrev_b32_sdwa v17, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v7 ; GFX9-NEXT: v_and_b32_e32 v3, 3, v3 -; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v8 +; GFX9-NEXT: v_lshlrev_b32_sdwa v18, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v19, v1, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v6, v6, v0, v17 +; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; GFX9-NEXT: v_or3_b32 v4, v4, v14, v9 +; GFX9-NEXT: v_or3_b32 v5, v5, v16, v10 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v15 +; GFX9-NEXT: v_and_or_b32 v13, v7, v0, v19 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 3, v3 +; GFX9-NEXT: v_lshlrev_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v12 +; GFX9-NEXT: v_or3_b32 v6, v6, v18, v11 +; GFX9-NEXT: v_cndmask_b32_e32 v9, v4, v5, vcc +; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v15 ; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, v3, v0 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v8 -; GFX9-NEXT: v_xor_b32_e32 v3, -1, v3 -; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v8 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 8, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 8, v6 -; GFX9-NEXT: v_lshrrev_b32_e32 v15, 8, v7 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 24, v5 -; GFX9-NEXT: v_lshlrev_b32_sdwa v9, s0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v11, v1, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_b32_sdwa v17, v4, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v18, v5, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v5, v5, s1, v11 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v12 -; GFX9-NEXT: v_lshrrev_b32_e32 v14, 24, v6 -; GFX9-NEXT: v_lshrrev_b32_e32 v16, 24, v7 -; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v1, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v1, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_and_or_b32 v4, v4, s1, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX9-NEXT: v_and_b32_sdwa v19, v6, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v6, v6, s1, v13 -; GFX9-NEXT: v_and_or_b32 v9, v7, v0, v15 -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v14 -; GFX9-NEXT: v_and_b32_sdwa v7, v7, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v13, 24, v16 -; GFX9-NEXT: v_or3_b32 v4, v4, v17, v10 -; GFX9-NEXT: v_or3_b32 v5, v5, v18, v11 -; GFX9-NEXT: v_or3_b32 v7, v9, v7, v13 -; GFX9-NEXT: v_or3_b32 v6, v6, v19, v12 -; GFX9-NEXT: v_cndmask_b32_e32 v9, v4, v5, vcc -; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v8 +; GFX9-NEXT: v_or3_b32 v7, v13, v7, v12 ; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v6, s[0:1] +; GFX9-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v15 +; GFX9-NEXT: v_xor_b32_e32 v3, -1, v3 ; GFX9-NEXT: v_cndmask_b32_e64 v9, v9, v7, s[2:3] ; GFX9-NEXT: v_and_or_b32 v2, v9, v3, v2 +; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v15 ; GFX9-NEXT: v_cndmask_b32_e64 v3, v4, v2, s[4:5] ; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v2, vcc ; GFX9-NEXT: v_cndmask_b32_e64 v5, v6, v2, s[0:1] ; GFX9-NEXT: v_cndmask_b32_e64 v2, v7, v2, s[2:3] -; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v10, 8, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 8, v3 -; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v11, 24, v5 -; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_sdwa v10, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v3 -; GFX9-NEXT: v_and_b32_sdwa v15, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_b32_sdwa v16, v5, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_sdwa v6, v1, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX9-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX9-NEXT: v_and_or_b32 v4, v4, v0, v8 -; GFX9-NEXT: v_and_or_b32 v5, v5, v0, v10 -; GFX9-NEXT: v_and_b32_sdwa v14, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_and_or_b32 v3, v3, v0, v6 -; GFX9-NEXT: v_and_or_b32 v6, v2, v0, v1 -; GFX9-NEXT: v_and_b32_sdwa v17, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX9-NEXT: v_lshrrev_b32_e32 v9, 24, v5 +; GFX9-NEXT: v_lshlrev_b32_sdwa v13, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v15, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX9-NEXT: v_lshrrev_b32_e32 v10, 24, v2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v11, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v14, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v16, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX9-NEXT: v_and_or_b32 v4, v4, v0, v13 ; GFX9-NEXT: v_lshlrev_b32_e32 v7, 24, v7 -; GFX9-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX9-NEXT: v_or3_b32 v1, v4, v15, v9 -; GFX9-NEXT: v_or3_b32 v2, v5, v16, v11 +; GFX9-NEXT: v_and_or_b32 v5, v5, v0, v15 +; GFX9-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX9-NEXT: v_lshlrev_b32_sdwa v12, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_and_or_b32 v3, v3, v0, v11 +; GFX9-NEXT: v_and_or_b32 v11, v2, v0, v1 +; GFX9-NEXT: v_lshlrev_b32_sdwa v8, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX9-NEXT: v_lshlrev_b32_e32 v6, 24, v6 +; GFX9-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX9-NEXT: v_or3_b32 v1, v4, v14, v7 +; GFX9-NEXT: v_or3_b32 v2, v5, v16, v9 ; GFX9-NEXT: v_mov_b32_e32 v4, 0 -; GFX9-NEXT: v_or3_b32 v0, v3, v14, v7 -; GFX9-NEXT: v_or3_b32 v3, v6, v17, v12 +; GFX9-NEXT: v_or3_b32 v0, v3, v12, v6 +; GFX9-NEXT: v_or3_b32 v3, v11, v8, v10 ; GFX9-NEXT: v_mov_b32_e32 v5, 0 ; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX9-NEXT: s_endpgm @@ -7242,96 +6770,88 @@ ; GFX8-LABEL: insertelement_v_v16i8_v_v: ; GFX8: ; %bb.0: ; GFX8-NEXT: flat_load_dwordx4 v[4:7], v[0:1] -; GFX8-NEXT: s_movk_i32 s0, 0xff -; GFX8-NEXT: v_mov_b32_e32 v1, 8 -; GFX8-NEXT: v_mov_b32_e32 v8, 8 -; GFX8-NEXT: v_mov_b32_e32 v9, s0 -; GFX8-NEXT: v_mov_b32_e32 v0, 0xff -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 2, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v11, 2, v3 ; GFX8-NEXT: v_and_b32_e32 v3, 3, v3 -; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v10 +; GFX8-NEXT: v_mov_b32_e32 v1, 8 +; GFX8-NEXT: v_mov_b32_e32 v9, 8 +; GFX8-NEXT: v_mov_b32_e32 v8, 16 ; GFX8-NEXT: v_lshlrev_b32_e32 v3, 3, v3 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v10 +; GFX8-NEXT: v_mov_b32_e32 v0, 0xff +; GFX8-NEXT: v_lshlrev_b32_e32 v0, v3, v0 +; GFX8-NEXT: v_mov_b32_e32 v10, 16 ; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_e32 v3, v3, v0 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v10 -; GFX8-NEXT: v_xor_b32_e32 v3, -1, v3 -; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v10 +; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v11 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v11 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[2:3], 3, v11 +; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0 +; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v11 ; GFX8-NEXT: s_waitcnt vmcnt(0) -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 8, v5 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 8, v4 -; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v15, 8, v6 -; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v8, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v17, 8, v7 -; GFX8-NEXT: v_and_b32_sdwa v19, v4, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshrrev_b32_e32 v14, 24, v5 -; GFX8-NEXT: v_or_b32_sdwa v4, v5, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v9, v5, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v8, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v16, 24, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v14 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v9 -; GFX8-NEXT: v_and_b32_sdwa v11, v6, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v5, v6, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v8, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v4 +; GFX8-NEXT: v_lshrrev_b32_e32 v12, 24, v5 +; GFX8-NEXT: v_lshlrev_b32_sdwa v16, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v4, v4, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v17, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v19, v9, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v6 +; GFX8-NEXT: v_lshrrev_b32_e32 v14, 24, v7 +; GFX8-NEXT: v_lshlrev_b32_sdwa v18, v10, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v5, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_sdwa v6, v7, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v10, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3 +; GFX8-NEXT: v_or_b32_e32 v4, v4, v16 ; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v12 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v19 -; GFX8-NEXT: v_lshrrev_b32_e32 v18, 24, v7 -; GFX8-NEXT: v_or_b32_sdwa v6, v7, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v7, v7, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v16 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v11 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v12 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v13 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v8 +; GFX8-NEXT: v_lshlrev_b32_e32 v13, 24, v13 +; GFX8-NEXT: v_lshlrev_b32_e32 v14, 24, v14 +; GFX8-NEXT: v_or_b32_e32 v5, v5, v18 +; GFX8-NEXT: v_or_b32_e32 v3, v4, v3 ; GFX8-NEXT: v_or_b32_e32 v6, v6, v7 -; GFX8-NEXT: v_lshlrev_b32_e32 v15, 24, v18 -; GFX8-NEXT: v_or_b32_e32 v5, v5, v14 -; GFX8-NEXT: v_cndmask_b32_e32 v7, v1, v4, vcc -; GFX8-NEXT: v_or_b32_e32 v6, v6, v15 -; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v5, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v6, s[2:3] -; GFX8-NEXT: v_and_b32_e32 v3, v7, v3 -; GFX8-NEXT: v_or_b32_e32 v2, v3, v2 -; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v2, vcc -; GFX8-NEXT: v_cndmask_b32_e64 v4, v5, v2, s[0:1] -; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v2, s[4:5] -; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[2:3] -; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v10, 8, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v12, 8, v2 -; GFX8-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v8, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v8, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v1 -; GFX8-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX8-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX8-NEXT: v_lshrrev_b32_e32 v13, 24, v2 -; GFX8-NEXT: v_and_b32_sdwa v14, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v1, v1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v15, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v16, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v3, v3, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_or_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD -; GFX8-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX8-NEXT: v_or_b32_e32 v1, v1, v12 +; GFX8-NEXT: v_or_b32_e32 v4, v5, v13 +; GFX8-NEXT: v_or_b32_e32 v5, v6, v14 +; GFX8-NEXT: v_cndmask_b32_e32 v6, v3, v1, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v4, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v5, s[2:3] +; GFX8-NEXT: v_and_b32_e32 v0, v6, v0 +; GFX8-NEXT: v_or_b32_e32 v0, v0, v2 +; GFX8-NEXT: v_cndmask_b32_e64 v2, v3, v0, s[4:5] +; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v0, vcc +; GFX8-NEXT: v_cndmask_b32_e64 v3, v4, v0, s[0:1] +; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, v0, s[2:3] +; GFX8-NEXT: v_lshlrev_b32_sdwa v8, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v12, v9, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v4, 24, v2 +; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v11, v10, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX8-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v13, v10, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v1, v1, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v14, v9, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshlrev_b32_sdwa v9, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX8-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v0 +; GFX8-NEXT: v_lshlrev_b32_sdwa v15, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v3, v3, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_sdwa v10, v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD +; GFX8-NEXT: v_lshlrev_b32_e32 v4, 24, v4 +; GFX8-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v13 +; GFX8-NEXT: v_or_b32_e32 v2, v2, v11 +; GFX8-NEXT: v_or_b32_e32 v8, v0, v10 ; GFX8-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX8-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX8-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX8-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX8-NEXT: v_or_b32_e32 v1, v1, v14 -; GFX8-NEXT: v_or_b32_e32 v5, v2, v0 -; GFX8-NEXT: v_or_b32_e32 v4, v4, v16 +; GFX8-NEXT: v_lshlrev_b32_e32 v7, 24, v7 ; GFX8-NEXT: v_or_b32_e32 v3, v3, v15 -; GFX8-NEXT: v_or_b32_e32 v0, v1, v6 -; GFX8-NEXT: v_or_b32_e32 v1, v3, v9 -; GFX8-NEXT: v_or_b32_e32 v2, v4, v11 -; GFX8-NEXT: v_or_b32_e32 v3, v5, v12 +; GFX8-NEXT: v_or_b32_e32 v0, v2, v4 +; GFX8-NEXT: v_or_b32_e32 v1, v1, v5 ; GFX8-NEXT: v_mov_b32_e32 v4, 0 +; GFX8-NEXT: v_or_b32_e32 v2, v3, v6 +; GFX8-NEXT: v_or_b32_e32 v3, v8, v7 ; GFX8-NEXT: v_mov_b32_e32 v5, 0 ; GFX8-NEXT: flat_store_dwordx4 v[4:5], v[0:3] ; GFX8-NEXT: s_endpgm @@ -7356,111 +6876,95 @@ ; GFX7-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v19 ; GFX7-NEXT: s_mov_b32 s10, -1 ; GFX7-NEXT: s_waitcnt vmcnt(0) -; GFX7-NEXT: v_lshrrev_b32_e32 v0, 8, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 8, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 8, v6 -; GFX7-NEXT: v_and_b32_e32 v0, s0, v0 -; GFX7-NEXT: v_and_b32_e32 v10, s0, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 24, v5 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 8, v7 -; GFX7-NEXT: v_and_b32_e32 v1, s0, v1 -; GFX7-NEXT: v_and_b32_e32 v11, s0, v11 -; GFX7-NEXT: v_and_b32_e32 v13, s0, v13 -; GFX7-NEXT: v_and_b32_e32 v4, s0, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v0, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v5, s0, v5 -; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 24, v6 -; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v7 -; GFX7-NEXT: v_and_b32_e32 v14, v14, v8 -; GFX7-NEXT: v_and_b32_e32 v16, v16, v8 -; GFX7-NEXT: v_and_b32_e32 v6, s0, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 -; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v13, 8, v13 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v18, 24, v7 -; GFX7-NEXT: v_and_b32_e32 v17, v17, v8 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v1 -; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX7-NEXT: v_and_b32_e32 v7, v7, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v12, 24, v12 -; GFX7-NEXT: v_or_b32_e32 v1, v4, v11 -; GFX7-NEXT: v_lshlrev_b32_e32 v14, 16, v14 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v13 +; GFX7-NEXT: v_bfe_u32 v12, v4, 8, 8 +; GFX7-NEXT: v_bfe_u32 v14, v5, 8, 8 +; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v4 +; GFX7-NEXT: v_lshrrev_b32_e32 v1, 24, v5 +; GFX7-NEXT: v_bfe_u32 v16, v6, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v11, s0, v4 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_and_b32_e32 v13, s0, v5 +; GFX7-NEXT: v_bfe_u32 v5, v5, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v14 +; GFX7-NEXT: v_lshrrev_b32_e32 v9, 24, v6 +; GFX7-NEXT: v_bfe_u32 v18, v7, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v15, v6, v8 +; GFX7-NEXT: v_bfe_u32 v6, v6, 16, 8 +; GFX7-NEXT: v_or_b32_e32 v11, v11, v12 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_or_b32_e32 v12, v13, v14 ; GFX7-NEXT: v_lshlrev_b32_e32 v16, 8, v16 -; GFX7-NEXT: v_lshlrev_b32_e32 v15, 24, v15 -; GFX7-NEXT: v_or_b32_e32 v4, v5, v14 -; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v17 -; GFX7-NEXT: v_or_b32_e32 v6, v7, v16 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v9 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v12 -; GFX7-NEXT: v_or_b32_e32 v5, v6, v17 -; GFX7-NEXT: v_lshlrev_b32_e32 v18, 24, v18 -; GFX7-NEXT: v_or_b32_e32 v4, v4, v15 +; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v7 +; GFX7-NEXT: v_and_b32_e32 v17, v7, v8 +; GFX7-NEXT: v_bfe_u32 v7, v7, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v18, 8, v18 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 24, v0 +; GFX7-NEXT: v_or_b32_e32 v4, v11, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 24, v1 +; GFX7-NEXT: v_or_b32_e32 v5, v12, v5 +; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v6 +; GFX7-NEXT: v_or_b32_e32 v13, v15, v16 +; GFX7-NEXT: v_lshlrev_b32_e32 v9, 24, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v7 +; GFX7-NEXT: v_or_b32_e32 v14, v17, v18 +; GFX7-NEXT: v_or_b32_e32 v6, v13, v6 +; GFX7-NEXT: v_or_b32_e32 v0, v4, v0 +; GFX7-NEXT: v_or_b32_e32 v1, v5, v1 +; GFX7-NEXT: v_or_b32_e32 v4, v6, v9 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX7-NEXT: v_or_b32_e32 v7, v14, v7 ; GFX7-NEXT: v_cndmask_b32_e32 v6, v0, v1, vcc ; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], 2, v19 -; GFX7-NEXT: v_or_b32_e32 v5, v5, v18 +; GFX7-NEXT: v_or_b32_e32 v5, v7, v10 ; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, v4, s[0:1] ; GFX7-NEXT: v_cndmask_b32_e64 v6, v6, v5, s[2:3] ; GFX7-NEXT: v_and_b32_e32 v3, v6, v3 ; GFX7-NEXT: v_or_b32_e32 v2, v3, v2 ; GFX7-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[4:5] +; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc +; GFX7-NEXT: v_bfe_u32 v10, v0, 8, 8 ; GFX7-NEXT: v_cndmask_b32_e64 v3, v4, v2, s[0:1] ; GFX7-NEXT: v_cndmask_b32_e64 v4, v5, v2, s[2:3] -; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc -; GFX7-NEXT: v_lshrrev_b32_e32 v2, 8, v0 -; GFX7-NEXT: v_and_b32_e32 v2, v2, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v0 -; GFX7-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; GFX7-NEXT: v_and_b32_e32 v0, v0, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX7-NEXT: v_lshrrev_b32_e32 v2, 24, v0 +; GFX7-NEXT: v_bfe_u32 v12, v1, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v9, v0, v8 +; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v10, 8, v10 +; GFX7-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX7-NEXT: v_bfe_u32 v14, v3, 8, 8 +; GFX7-NEXT: v_and_b32_e32 v11, v1, v8 +; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0 +; GFX7-NEXT: v_or_b32_e32 v9, v9, v10 +; GFX7-NEXT: v_lshlrev_b32_e32 v12, 8, v12 +; GFX7-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX7-NEXT: v_and_b32_e32 v13, v3, v8 +; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v2 +; GFX7-NEXT: v_or_b32_e32 v0, v9, v0 +; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX7-NEXT: v_or_b32_e32 v10, v11, v12 +; GFX7-NEXT: v_lshlrev_b32_e32 v14, 8, v14 ; GFX7-NEXT: v_or_b32_e32 v0, v0, v2 -; GFX7-NEXT: v_and_b32_e32 v2, v7, v8 -; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v1 -; GFX7-NEXT: v_lshrrev_b32_e32 v10, 24, v1 -; GFX7-NEXT: v_and_b32_e32 v1, v1, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_and_b32_e32 v2, v9, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshlrev_b32_e32 v2, 24, v10 -; GFX7-NEXT: v_lshrrev_b32_e32 v11, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v1, v1, v2 -; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v13, 24, v3 -; GFX7-NEXT: v_and_b32_e32 v2, v3, v8 -; GFX7-NEXT: v_and_b32_e32 v3, v11, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 8, v3 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_and_b32_e32 v3, v12, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v13 -; GFX7-NEXT: v_lshrrev_b32_e32 v14, 8, v4 +; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3 +; GFX7-NEXT: v_or_b32_e32 v11, v13, v14 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX7-NEXT: v_or_b32_e32 v1, v10, v1 +; GFX7-NEXT: v_or_b32_e32 v1, v1, v5 +; GFX7-NEXT: v_bfe_u32 v5, v4, 8, 8 +; GFX7-NEXT: v_or_b32_e32 v2, v11, v2 +; GFX7-NEXT: v_lshlrev_b32_e32 v3, 24, v6 ; GFX7-NEXT: v_or_b32_e32 v2, v2, v3 -; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v4 -; GFX7-NEXT: v_lshrrev_b32_e32 v16, 24, v4 +; GFX7-NEXT: v_lshrrev_b32_e32 v7, 24, v4 ; GFX7-NEXT: v_and_b32_e32 v3, v4, v8 -; GFX7-NEXT: v_and_b32_e32 v4, v14, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 8, v4 -; GFX7-NEXT: v_and_b32_e32 v5, v5, v8 -; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_and_b32_e32 v4, v15, v8 -; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v5 +; GFX7-NEXT: v_bfe_u32 v4, v4, 16, 8 +; GFX7-NEXT: v_lshlrev_b32_e32 v5, 8, v5 +; GFX7-NEXT: v_or_b32_e32 v3, v3, v5 ; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 -; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v16 -; GFX7-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v5 -; GFX7-NEXT: v_or_b32_e32 v0, v0, v6 +; GFX7-NEXT: v_lshlrev_b32_e32 v4, 24, v7 ; GFX7-NEXT: v_or_b32_e32 v3, v3, v4 ; GFX7-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 ; GFX7-NEXT: s_endpgm @@ -7468,86 +6972,80 @@ ; GFX10-LABEL: insertelement_v_v16i8_v_v: ; GFX10: ; %bb.0: ; GFX10-NEXT: global_load_dwordx4 v[4:7], v[0:1], off -; GFX10-NEXT: v_mov_b32_e32 v8, 8 ; GFX10-NEXT: s_mov_b32 s0, 8 -; GFX10-NEXT: s_movk_i32 s1, 0xff +; GFX10-NEXT: v_mov_b32_e32 v8, 8 +; GFX10-NEXT: s_mov_b32 s1, 16 +; GFX10-NEXT: s_movk_i32 s2, 0xff ; GFX10-NEXT: v_and_b32_e32 v0, 3, v3 +; GFX10-NEXT: v_mov_b32_e32 v9, 16 ; GFX10-NEXT: v_lshrrev_b32_e32 v3, 2, v3 ; GFX10-NEXT: v_mov_b32_e32 v1, 0xff ; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3 -; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v3 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 8, v5 ; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v5 -; GFX10-NEXT: v_lshrrev_b32_e32 v13, 8, v6 -; GFX10-NEXT: v_lshlrev_b32_sdwa v9, s0, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v8, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v17, v4, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v18, v5, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v14, 24, v6 -; GFX10-NEXT: v_and_or_b32 v4, v4, s1, v9 +; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v5 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, s0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, s0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v12, 24, v6 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, s1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v17, s1, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v4, v4, s2, v14 ; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 -; GFX10-NEXT: v_lshrrev_b32_e32 v15, 8, v7 -; GFX10-NEXT: v_and_or_b32 v5, v5, s1, v11 +; GFX10-NEXT: v_and_or_b32 v5, v5, s2, v16 +; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v11 +; GFX10-NEXT: v_lshlrev_b32_sdwa v18, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v19, v9, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshrrev_b32_e32 v13, 24, v7 +; GFX10-NEXT: v_or3_b32 v4, v4, v15, v10 +; GFX10-NEXT: v_or3_b32 v5, v5, v17, v11 +; GFX10-NEXT: v_lshlrev_b32_sdwa v20, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_and_or_b32 v6, v6, v1, v18 ; GFX10-NEXT: v_lshlrev_b32_e32 v12, 24, v12 -; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v8, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v19, v6, s1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshrrev_b32_e32 v16, 24, v7 -; GFX10-NEXT: v_or3_b32 v4, v4, v17, v10 -; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v14 -; GFX10-NEXT: v_and_or_b32 v6, v6, s1, v13 -; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v8, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_or3_b32 v5, v5, v18, v12 -; GFX10-NEXT: v_and_b32_sdwa v20, v7, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v16 -; GFX10-NEXT: v_or3_b32 v6, v6, v19, v9 -; GFX10-NEXT: v_and_or_b32 v7, v7, v1, v11 -; GFX10-NEXT: v_cndmask_b32_e32 v9, v4, v5, vcc_lo +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v9, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v13 +; GFX10-NEXT: v_and_or_b32 v7, v7, v1, v20 +; GFX10-NEXT: v_cndmask_b32_e32 v11, v4, v5, vcc_lo +; GFX10-NEXT: v_or3_b32 v6, v6, v19, v12 ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 2, v3 -; GFX10-NEXT: v_lshlrev_b32_e32 v11, v0, v1 +; GFX10-NEXT: v_lshlrev_b32_e32 v12, v0, v1 +; GFX10-NEXT: v_or3_b32 v7, v7, v14, v10 ; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 3, v3 -; GFX10-NEXT: v_or3_b32 v7, v7, v20, v10 ; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v6, s0 -; GFX10-NEXT: v_xor_b32_e32 v2, -1, v11 -; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v7, s1 -; GFX10-NEXT: v_and_or_b32 v0, v9, v2, v0 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v11, v6, s0 +; GFX10-NEXT: v_xor_b32_e32 v2, -1, v12 +; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v3 +; GFX10-NEXT: v_cndmask_b32_e64 v10, v10, v7, s1 +; GFX10-NEXT: v_and_or_b32 v0, v10, v2, v0 ; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, v0, s2 ; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v0, s0 ; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v0, vcc_lo ; GFX10-NEXT: v_cndmask_b32_e64 v0, v7, v0, s1 -; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v10, 8, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 8, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v12, 8, v0 -; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v2 -; GFX10-NEXT: v_lshrrev_b32_e32 v9, 24, v3 -; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v4 -; GFX10-NEXT: v_lshrrev_b32_e32 v13, 24, v0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v10, v8, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v8, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 -; GFX10-NEXT: v_and_b32_sdwa v14, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v15, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_b32_sdwa v16, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v2, v2, v1, v5 -; GFX10-NEXT: v_and_b32_sdwa v17, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v2 +; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v4 +; GFX10-NEXT: v_lshlrev_b32_sdwa v11, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v15, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshrrev_b32_e32 v6, 24, v3 +; GFX10-NEXT: v_lshrrev_b32_e32 v10, 24, v0 +; GFX10-NEXT: v_lshlrev_b32_sdwa v13, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v8, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1 +; GFX10-NEXT: v_lshlrev_b32_sdwa v12, v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v16, v9, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v2, v2, v1, v11 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 24, v5 +; GFX10-NEXT: v_and_or_b32 v4, v4, v1, v15 +; GFX10-NEXT: v_lshlrev_b32_e32 v7, 24, v7 +; GFX10-NEXT: v_lshlrev_b32_sdwa v14, v9, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v9, v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v8, v0, v1, v8 +; GFX10-NEXT: v_and_or_b32 v3, v3, v1, v13 ; GFX10-NEXT: v_lshlrev_b32_e32 v6, 24, v6 -; GFX10-NEXT: v_and_or_b32 v5, v0, v1, v8 -; GFX10-NEXT: v_lshlrev_b32_e32 v9, 24, v9 -; GFX10-NEXT: v_lshlrev_b32_e32 v11, 24, v11 -; GFX10-NEXT: v_lshlrev_b32_e32 v12, 24, v13 -; GFX10-NEXT: v_and_or_b32 v3, v3, v1, v7 -; GFX10-NEXT: v_and_or_b32 v4, v4, v1, v10 -; GFX10-NEXT: v_or3_b32 v0, v2, v14, v6 -; GFX10-NEXT: v_or3_b32 v1, v3, v15, v9 -; GFX10-NEXT: v_or3_b32 v2, v4, v16, v11 -; GFX10-NEXT: v_or3_b32 v3, v5, v17, v12 +; GFX10-NEXT: v_lshlrev_b32_e32 v10, 24, v10 +; GFX10-NEXT: v_or3_b32 v0, v2, v12, v5 +; GFX10-NEXT: v_or3_b32 v2, v4, v16, v7 ; GFX10-NEXT: v_mov_b32_e32 v4, 0 +; GFX10-NEXT: v_or3_b32 v1, v3, v14, v6 +; GFX10-NEXT: v_or3_b32 v3, v8, v9, v10 ; GFX10-NEXT: v_mov_b32_e32 v5, 0 ; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off ; GFX10-NEXT: s_endpgm Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sbfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sbfx.mir @@ -0,0 +1,50 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck %s + +# The only instruction selection cases for G_SBFX/G_UBFX are the 64-bit +# vector versions. All other versions, scalar and 32-bit vector, are +# expanded during register bank selection. + +--- +name: sbfx_s32_vii +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; CHECK-LABEL: name: sbfx_s32_vii + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec + ; CHECK: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 10, implicit $exec + ; CHECK: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY]], [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec + ; CHECK: S_ENDPGM 0, implicit [[V_BFE_I32_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = G_CONSTANT i32 2 + %2:vgpr(s32) = G_CONSTANT i32 10 + %3:vgpr(s32) = G_SBFX %0, %1(s32), %2 + S_ENDPGM 0, implicit %3 +... + +--- +name: sbfx_s32_vvv +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK-LABEL: name: sbfx_s32_vvv + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec + ; CHECK: S_ENDPGM 0, implicit [[V_BFE_I32_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr1 + %2:vgpr(s32) = COPY $vgpr2 + %3:vgpr(s32) = G_SBFX %0, %1(s32), %2 + S_ENDPGM 0, implicit %3 +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ubfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ubfx.mir @@ -0,0 +1,78 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck %s + +# The only simple instruction selection for G_SBFX/G_UBFX are the 64-bit +# vector versions. All other versions are expanded during register bank +# selection. + +--- +name: ubfx_s32_vii +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; WAVE64-LABEL: name: ubfx_s32_vii + ; WAVE64: liveins: $vgpr0 + ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; WAVE64: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec + ; WAVE64: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 10, implicit $exec + ; WAVE64: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec + ; WAVE64: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + ; WAVE32-LABEL: name: ubfx_s32_vii + ; WAVE32: liveins: $vgpr0 + ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; WAVE32: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec + ; WAVE32: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 10, implicit $exec + ; WAVE32: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec + ; WAVE32: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + ; CHECK-LABEL: name: ubfx_s32_vii + ; CHECK: liveins: $vgpr0 + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec + ; CHECK: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 10, implicit $exec + ; CHECK: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec + ; CHECK: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = G_CONSTANT i32 2 + %2:vgpr(s32) = G_CONSTANT i32 10 + %3:vgpr(s32) = G_UBFX %0, %1(s32), %2 + S_ENDPGM 0, implicit %3 +... + +--- +name: ubfx_s32_vvv +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2 + ; WAVE64-LABEL: name: ubfx_s32_vvv + ; WAVE64: liveins: $vgpr0, $vgpr1, $vgpr2 + ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; WAVE64: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; WAVE64: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec + ; WAVE64: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + ; WAVE32-LABEL: name: ubfx_s32_vvv + ; WAVE32: liveins: $vgpr0, $vgpr1, $vgpr2 + ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; WAVE32: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; WAVE32: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec + ; WAVE32: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + ; CHECK-LABEL: name: ubfx_s32_vvv + ; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2 + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2 + ; CHECK: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[COPY]], [[COPY1]], [[COPY2]], implicit $exec + ; CHECK: S_ENDPGM 0, implicit [[V_BFE_U32_e64_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr1 + %2:vgpr(s32) = COPY $vgpr2 + %3:vgpr(s32) = G_UBFX %0, %1(s32), %2 + S_ENDPGM 0, implicit %3 +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sbfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sbfx.mir @@ -0,0 +1,104 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +... +--- +name: test_sbfx_s32 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GCN-LABEL: name: test_sbfx_s32 + ; GCN: %copy:_(s32) = COPY $vgpr0 + ; GCN: %offset:_(s32) = COPY $vgpr1 + ; GCN: %width:_(s32) = COPY $vgpr2 + ; GCN: %sbfx:_(s32) = G_SBFX %copy, %offset(s32), %width + ; GCN: $vgpr0 = COPY %sbfx(s32) + %copy:_(s32) = COPY $vgpr0 + %offset:_(s32) = COPY $vgpr1 + %width:_(s32) = COPY $vgpr2 + %sbfx:_(s32) = G_SBFX %copy, %offset(s32), %width + $vgpr0 = COPY %sbfx(s32) +... + +--- +name: test_sbfx_s64 +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3 + + ; GVN-LABEL: name: test_sbfx_s64 + ; GVN: %copy:_(s64) = COPY $vgpr0_vgpr1 + ; GVN: %offset:_(s32) = COPY $vgpr2 + ; GVN: %width:_(s32) = COPY $vgpr3 + ; GVN: %sbfx:_(s64) = G_SBFX %copy, %offset(s32), %width + ; GVN: $vgpr0_vgpr1 = COPY %sbfx(s64) + %copy:_(s64) = COPY $vgpr0_vgpr1 + %offset:_(s32) = COPY $vgpr2 + %width:_(s32) = COPY $vgpr3 + %sbfx:_(s64) = G_SBFX %copy, %offset(s32), %width + $vgpr0_vgpr1 = COPY %sbfx(s64) +... + +--- +name: test_sbfx_s8 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GVN-LABEL: name: test_sbfx_s8 + ; GVN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GVN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GVN: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GVN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; GVN: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GVN: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]] + ; GVN: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) + ; GVN: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; GVN: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; GVN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY5]], [[AND]](s32), [[AND1]] + ; GVN: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SBFX]](s32) + ; GVN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY6]], 8 + ; GVN: $vgpr0 = COPY [[SEXT_INREG]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %copy:_(s8) = G_TRUNC %0 + %offset:_(s8) = G_TRUNC %1 + %width:_(s8) = G_TRUNC %2 + %sbfx:_(s8) = G_SBFX %copy, %offset, %width + %4:_(s32) = G_SEXT %sbfx + $vgpr0 = COPY %4 +... + +--- +name: test_sbfx_s16 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GVN-LABEL: name: test_sbfx_s16 + ; GVN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GVN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GVN: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GVN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; GVN: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GVN: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]] + ; GVN: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) + ; GVN: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; GVN: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; GVN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY5]], [[AND]](s32), [[AND1]] + ; GVN: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SBFX]](s32) + ; GVN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY6]], 16 + ; GVN: $vgpr0 = COPY [[SEXT_INREG]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %copy:_(s16) = G_TRUNC %0 + %offset:_(s16) = G_TRUNC %1 + %width:_(s16) = G_TRUNC %2 + %sbfx:_(s16) = G_SBFX %copy, %offset, %width + %4:_(s32) = G_SEXT %sbfx + $vgpr0 = COPY %4 +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ubfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ubfx.mir @@ -0,0 +1,105 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck --check-prefix=GCN %s +... +--- +name: test_ubfx_s32 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GCN-LABEL: name: test_ubfx_s32 + ; GCN: %copy:_(s32) = COPY $vgpr0 + ; GCN: %offset:_(s32) = COPY $vgpr1 + ; GCN: %width:_(s32) = COPY $vgpr2 + ; GCN: %ubfx:_(s32) = G_UBFX %copy, %offset(s32), %width + ; GCN: $vgpr0 = COPY %ubfx(s32) + %copy:_(s32) = COPY $vgpr0 + %offset:_(s32) = COPY $vgpr1 + %width:_(s32) = COPY $vgpr2 + %ubfx:_(s32) = G_UBFX %copy, %offset(s32), %width + $vgpr0 = COPY %ubfx(s32) +... + +--- +name: test_ubfx_s64 +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3 + + ; GCN-LABEL: name: test_ubfx_s64 + ; GCN: %copy:_(s64) = COPY $vgpr0_vgpr1 + ; GCN: %offset:_(s32) = COPY $vgpr2 + ; GCN: %width:_(s32) = COPY $vgpr3 + ; GCN: %ubfx:_(s64) = G_UBFX %copy, %offset(s32), %width + ; GCN: $vgpr0_vgpr1 = COPY %ubfx(s64) + %copy:_(s64) = COPY $vgpr0_vgpr1 + %offset:_(s32) = COPY $vgpr2 + %width:_(s32) = COPY $vgpr3 + %ubfx:_(s64) = G_UBFX %copy, %offset(s32), %width + $vgpr0_vgpr1 = COPY %ubfx(s64) +... + +--- +name: test_ubfx_s8 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GCN-LABEL: name: test_ubfx_s8 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GCN: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; GCN: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GCN: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]] + ; GCN: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) + ; GCN: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; GCN: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; GCN: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY5]], [[AND]](s32), [[AND1]] + ; GCN: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UBFX]](s32) + ; GCN: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]] + ; GCN: $vgpr0 = COPY [[AND2]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %copy:_(s8) = G_TRUNC %0 + %offset:_(s8) = G_TRUNC %1 + %width:_(s8) = G_TRUNC %2 + %ubfx:_(s8) = G_UBFX %copy, %offset, %width + %4:_(s32) = G_ZEXT %ubfx + $vgpr0 = COPY %4 +... + +--- +name: test_ubfx_s16 +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; GCN-LABEL: name: test_ubfx_s16 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GCN: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; GCN: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GCN: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]] + ; GCN: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) + ; GCN: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; GCN: [[COPY5:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; GCN: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY5]], [[AND]](s32), [[AND1]] + ; GCN: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UBFX]](s32) + ; GCN: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C]] + ; GCN: $vgpr0 = COPY [[AND2]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %copy:_(s16) = G_TRUNC %0 + %offset:_(s16) = G_TRUNC %1 + %width:_(s16) = G_TRUNC %2 + %sbfx:_(s16) = G_UBFX %copy, %offset, %width + %4:_(s32) = G_ZEXT %sbfx + $vgpr0 = COPY %4 +... + Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ubfe.ll @@ -983,8 +983,7 @@ ; GFX6-NEXT: s_mov_b32 s6, -1 ; GFX6-NEXT: s_mov_b32 s7, 0xf000 ; GFX6-NEXT: s_waitcnt lgkmcnt(0) -; GFX6-NEXT: s_lshr_b32 s0, s0, 6 -; GFX6-NEXT: s_and_b32 s0, s0, 7 +; GFX6-NEXT: s_bfe_u32 s0, s0, 0x30006 ; GFX6-NEXT: v_mov_b32_e32 v0, s0 ; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GFX6-NEXT: s_endpgm Index: llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.workitem.id.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.workitem.id.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.workitem.id.ll @@ -36,8 +36,7 @@ ; CO-V2-NOT: v1 ; CO-V2: {{buffer|flat}}_store_dword {{.*}}v1 -; PACKED-TID: v_lshrrev_b32_e32 [[ID:v[0-9]+]], 10, v0 -; PACKED-TID: v_and_b32_e32 [[ID]], 0x3ff, [[ID]] +; PACKED-TID: v_bfe_u32 [[ID:v[0-9]+]], v0, 10, 10 ; PACKED-TID: {{buffer|flat|global}}_store_dword {{.*}}[[ID]] ; PACKED-TID: .amdhsa_system_vgpr_workitem_id 1 define amdgpu_kernel void @test_workitem_id_y(i32 addrspace(1)* %out) #1 { @@ -55,8 +54,7 @@ ; CO-V2-NOT: v2 ; CO-V2: {{buffer|flat}}_store_dword {{.*}}v2 -; PACKED-TID: v_lshrrev_b32_e32 [[ID:v[0-9]+]], 20, v0 -; PACKED-TID: v_and_b32_e32 [[ID]], 0x3ff, [[ID]] +; PACKED-TID: v_bfe_u32 [[ID:v[0-9]+]], v0, 20, 10 ; PACKED-TID: {{buffer|flat|global}}_store_dword {{.*}}[[ID]] ; PACKED-TID: .amdhsa_system_vgpr_workitem_id 2 define amdgpu_kernel void @test_workitem_id_z(i32 addrspace(1)* %out) #1 { @@ -110,8 +108,8 @@ } ; ALL-LABEL: {{^}}test_workitem_id_y_func: -; HSA: v_lshrrev_b32_e32 v2, 10, v2 -; MESA: v_lshrrev_b32_e32 v2, 10, v2 +; HSA: v_bfe_u32 v2, v2, 10, 10 +; MESA: v_bfe_u32 v2, v2, 10, 10 define void @test_workitem_id_y_func(i32 addrspace(1)* %out) #1 { %id = call i32 @llvm.amdgcn.workitem.id.y() store i32 %id, i32 addrspace(1)* %out @@ -119,8 +117,8 @@ } ; ALL-LABEL: {{^}}test_workitem_id_z_func: -; HSA: v_lshrrev_b32_e32 v2, 20, v2 -; MESA: v_lshrrev_b32_e32 v2, 20, v2 +; HSA: v_bfe_u32 v2, v2, 20, 10 +; MESA: v_bfe_u32 v2, v2, 20, 10 define void @test_workitem_id_z_func(i32 addrspace(1)* %out) #1 { %id = call i32 @llvm.amdgcn.workitem.id.z() store i32 %id, i32 addrspace(1)* %out Index: llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-sbfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-sbfx.mir @@ -0,0 +1,151 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s + +--- +name: bfe_sext_inreg_ashr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: bfe_sext_inreg_ashr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GCN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C1]] + ; GCN: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 4 + %2:_(s32) = G_ASHR %0, %1(s32) + %3:_(s32) = COPY %2(s32) + %4:_(s32) = G_SEXT_INREG %3, 16 + $vgpr0 = COPY %4(s32) +... + +--- +name: bfe_sext_inreg_lshr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: bfe_sext_inreg_lshr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GCN: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C1]] + ; GCN: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 4 + %2:_(s32) = G_LSHR %0, %1(s32) + %3:_(s32) = COPY %2(s32) + %4:_(s32) = G_SEXT_INREG %3, 16 + $vgpr0 = COPY %4(s32) +... + +--- +name: bfe_sext_inreg_ashr_s64 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: bfe_sext_inreg_ashr_s64 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GCN: [[SBFX:%[0-9]+]]:_(s64) = G_SBFX [[COPY]], [[C]](s32), [[C1]] + ; GCN: $vgpr0_vgpr1 = COPY [[SBFX]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 4 + %2:_(s64) = G_ASHR %0, %1(s32) + %3:_(s64) = COPY %2(s64) + %4:_(s64) = G_SEXT_INREG %3, 16 + $vgpr0_vgpr1 = COPY %4(s64) +... + +--- +name: toobig_sext_inreg_ashr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: toobig_sext_inreg_ashr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32) + ; GCN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ASHR]], 20 + ; GCN: $vgpr0 = COPY [[SEXT_INREG]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 16 + %2:_(s32) = G_ASHR %0, %1(s32) + %3:_(s32) = COPY %2(s32) + %4:_(s32) = G_SEXT_INREG %3, 20 + $vgpr0 = COPY %4(s32) +... + +--- +name: toobig_sext_inreg_ashr_s64 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: toobig_sext_inreg_ashr_s64 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; GCN: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64) + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C]](s32) + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GCN: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[UV1]], [[C1]](s32) + ; GCN: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[ASHR1]](s32), [[ASHR]](s32) + ; GCN: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[MV]], 32 + ; GCN: $vgpr0_vgpr1 = COPY [[SEXT_INREG]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 40 + %2:_(s64) = G_ASHR %0, %1(s32) + %3:_(s64) = COPY %2(s64) + %4:_(s64) = G_SEXT_INREG %3, 32 + $vgpr0_vgpr1 = COPY %4(s64) +... + +--- +name: var_sext_inreg_ashr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1 + + ; GCN-LABEL: name: var_sext_inreg_ashr_s32 + ; GCN: liveins: $vgpr0, $vgpr1 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32) + ; GCN: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ASHR]], 10 + ; GCN: $vgpr0 = COPY [[SEXT_INREG]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = G_ASHR %0, %1(s32) + %3:_(s32) = COPY %2(s32) + %4:_(s32) = G_SEXT_INREG %3, 10 + $vgpr0 = COPY %4(s32) +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-ubfx.mir @@ -0,0 +1,103 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck --check-prefix=GCN %s + +--- +name: bfe_and_lshr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: bfe_and_lshr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 5 + ; GCN: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C]](s32), [[C1]] + ; GCN: $vgpr0 = COPY [[UBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 8 + %2:_(s32) = G_LSHR %0, %1(s32) + %3:_(s32) = G_CONSTANT i32 31 + %4:_(s32) = G_AND %2, %3 + $vgpr0 = COPY %4(s32) + +... + +--- +name: bfe_and_lshr_s64 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; GCN-LABEL: name: bfe_and_lshr_s64 + ; GCN: liveins: $vgpr0_vgpr1 + ; GCN: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 + ; GCN: [[UBFX:%[0-9]+]]:_(s64) = G_UBFX [[COPY]], [[C]](s32), [[C1]] + ; GCN: $vgpr0_vgpr1 = COPY [[UBFX]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 8 + %2:_(s64) = G_LSHR %0, %1(s32) + %3:_(s64) = G_CONSTANT i64 1023 + %4:_(s64) = G_AND %2, %3 + $vgpr0_vgpr1 = COPY %4(s64) + +... + +--- +name: toobig_and_lshr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: toobig_and_lshr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 28 + ; GCN: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; GCN: $vgpr0 = COPY [[LSHR]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 28 + %2:_(s32) = G_LSHR %0, %1(s32) + %3:_(s32) = G_CONSTANT i32 511 + %4:_(s32) = G_AND %2, %3 + $vgpr0 = COPY %4(s32) + +... + +--- +name: bfe_and_ashr_s32 +legalized: true +tracksRegLiveness: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; GCN-LABEL: name: bfe_and_ashr_s32 + ; GCN: liveins: $vgpr0 + ; GCN: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GCN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GCN: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32) + ; GCN: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31 + ; GCN: [[AND:%[0-9]+]]:_(s32) = G_AND [[ASHR]], [[C1]] + ; GCN: $vgpr0 = COPY [[AND]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 8 + %2:_(s32) = G_ASHR %0, %1(s32) + %3:_(s32) = G_CONSTANT i32 31 + %4:_(s32) = G_AND %2, %3 + $vgpr0 = COPY %4(s32) + +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sbfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-sbfx.mir @@ -0,0 +1,357 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-fast -verify-machineinstrs -o - %s | FileCheck %s +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-greedy -verify-machineinstrs -o - %s | FileCheck %s + +... + +# Generate the 3 operand vector bitfield extract instructions for 32-bit +# operations only. +--- +name: test_sbfx_s32_vvv +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; CHECK-LABEL: name: test_sbfx_s32_vvv + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_sbfx_s32_vii +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_sbfx_s32_vii + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 10 + %2:_(s32) = G_CONSTANT i32 4 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_sbfx_s32_vss +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0, $sgpr0, $sgpr1 + + ; CHECK-LABEL: name: test_sbfx_s32_vss + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY3]](s32), [[COPY4]] + ; CHECK: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr0 + %2:_(s32) = COPY $sgpr1 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +# Expand to a sequence that implements the 64-bit bitfield extract using +# shifts and masks. +--- +name: test_sbfx_s64_vvv +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: test_sbfx_s64_vvv + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 + ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr2 + %2:_(s32) = COPY $vgpr3 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_sbfx_s64_vss +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $sgpr0, $sgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_vss + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +# If the offset and width are constants, use the 32-bit bitfield extract, +# and merge to create a 64-bit result. +--- +name: test_sbfx_s64_vii_small +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_vii_small + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64) + ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[UV]], [[C2]](s32), [[COPY2]] + ; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 31 + ; CHECK: [[ASHR1:%[0-9]+]]:vgpr(s32) = G_ASHR [[SBFX]], [[C3]](s32) + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[SBFX]](s32), [[ASHR1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 31 + %2:_(s32) = G_CONSTANT i32 4 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_sbfx_s64_vii_big +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_vii_big + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 8 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 40 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64) + ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 8 + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[UV1]], [[C2]](s32), [[C3]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UV]](s32), [[SBFX]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 8 + %2:_(s32) = G_CONSTANT i32 40 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_sbfx_s64_svv +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1, $vgpr0, $vgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_svv + ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) + ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY3]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +# Expand to a sequence that combines the offset and width for the two operand +# version of the 32-bit instruction. +--- +name: test_sbfx_s32_svv +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0, $vgpr0, $vgpr1 + + ; CHECK-LABEL: name: test_sbfx_s32_svv + ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY3]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[SBFX]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_sbfx_s32_sss +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0, $sgpr1, $sgpr3 + + ; CHECK-LABEL: name: test_sbfx_s32_sss + ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]] + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_I32_:%[0-9]+]]:sreg_32(s32) = S_BFE_I32 [[COPY]](s32), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0 = COPY [[S_BFE_I32_]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $sgpr0 = COPY %3(s32) +... + +--- +name: test_sbfx_s32_sii +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0 + + ; CHECK-LABEL: name: test_sbfx_s32_sii + ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]] + ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_I32_:%[0-9]+]]:sreg_32(s32) = S_BFE_I32 [[COPY]](s32), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0 = COPY [[S_BFE_I32_]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = G_CONSTANT i32 1 + %2:_(s32) = G_CONSTANT i32 10 + %3:_(s32) = G_SBFX %0, %1(s32), %2 + $sgpr0 = COPY %3(s32) +... + +# Expand to a sequence that combines the offset and width for the two operand +# version of the 64-bit scalar instruction. +--- +name: test_sbfx_s64_sss +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1, $sgpr0, $sgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_sss + ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]] + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_I64_:%[0-9]+]]:sreg_64(s64) = S_BFE_I64 [[COPY]](s64), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_I64_]](s64) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = COPY $sgpr0 + %2:_(s32) = COPY $sgpr1 + %3:_(s64) = G_SBFX %0, %1(s32), %2 + $sgpr0_sgpr1 = COPY %3(s64) +... + +--- +name: test_sbfx_s64_sii +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1 + + ; CHECK-LABEL: name: test_sbfx_s64_sii + ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]] + ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_I64_:%[0-9]+]]:sreg_64(s64) = S_BFE_I64 [[COPY]](s64), [[OR]](s32), implicit-def $scc + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = G_CONSTANT i32 1 + %2:_(s32) = G_CONSTANT i32 10 + %3:_(s64) = G_SBFX %0, %1(s32), %2 +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ubfx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-ubfx.mir @@ -0,0 +1,357 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-fast -verify-machineinstrs -o - %s | FileCheck %s +# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-greedy -verify-machineinstrs -o - %s | FileCheck %s + +... + +# Generate the 3 operand vector bitfield extract instructions for 32-bit +# operations only. +--- +name: test_ubfx_s32_vvv +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0, $vgpr1, $vgpr2 + + ; CHECK-LABEL: name: test_ubfx_s32_vvv + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[UBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_ubfx_s32_vii +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0 + + ; CHECK-LABEL: name: test_ubfx_s32_vii + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[UBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_CONSTANT i32 10 + %2:_(s32) = G_CONSTANT i32 4 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_ubfx_s32_vss +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0, $sgpr0, $sgpr1 + + ; CHECK-LABEL: name: test_ubfx_s32_vss + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) + ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32) + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY]], [[COPY3]](s32), [[COPY4]] + ; CHECK: $vgpr0 = COPY [[UBFX]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $sgpr0 + %2:_(s32) = COPY $sgpr1 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +# Expand to a sequence that implements the 64-bit bitfield extract using +# shifts and masks. +--- +name: test_ubfx_s64_vvv +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3 + + ; CHECK-LABEL: name: test_ubfx_s64_vvv + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr2 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr3 + ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr2 + %2:_(s32) = COPY $vgpr3 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_ubfx_s64_vss +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1, $sgpr0, $sgpr1 + + ; CHECK-LABEL: name: test_ubfx_s64_vss + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +# If the offset and width are constants, use the 32-bit bitfield extract, +# and merge to create a 64-bit result. +--- +name: test_ubfx_s64_vii_small +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ubfx_s64_vii_small + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64) + ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[UV]], [[C2]](s32), [[COPY2]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UBFX]](s32), [[C2]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 31 + %2:_(s32) = G_CONSTANT i32 4 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_ubfx_s64_vii_big +legalized: true + +body: | + bb.0.entry: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ubfx_s64_vii_big + ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 8 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 40 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32) + ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64) + ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0 + ; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 8 + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[UV1]], [[C2]](s32), [[C3]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UV]](s32), [[UBFX]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s32) = G_CONSTANT i32 8 + %2:_(s32) = G_CONSTANT i32 40 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +--- +name: test_ubfx_s64_svv +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1, $vgpr0, $vgpr1 + + ; CHECK-LABEL: name: test_ubfx_s64_svv + ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64) + ; CHECK: [[LSHR:%[0-9]+]]:vgpr(s64) = G_LSHR [[COPY3]], [[COPY1]](s32) + ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[LSHR]](s64) + ; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 1 + ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[C]], [[COPY2]](s32) + ; CHECK: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[SHL]](s64) + ; CHECK: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[USUBO:%[0-9]+]]:vgpr(s32), [[USUBO1:%[0-9]+]]:vcc(s1) = G_USUBO [[UV2]], [[C1]] + ; CHECK: [[USUBE:%[0-9]+]]:vgpr(s32), [[USUBE1:%[0-9]+]]:vcc(s1) = G_USUBE [[UV3]], [[C1]], [[USUBO1]] + ; CHECK: [[AND:%[0-9]+]]:vgpr(s32) = G_AND [[UV]], [[USUBO]] + ; CHECK: [[AND1:%[0-9]+]]:vgpr(s32) = G_AND [[UV1]], [[USUBE]] + ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[AND]](s32), [[AND1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $vgpr0_vgpr1 = COPY %3(s64) +... + +# Expand to a sequence that combines the offset and width for the two operand +# version of the 32-bit instruction. +--- +name: test_ubfx_s32_svv +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0, $vgpr0, $vgpr1 + + ; CHECK-LABEL: name: test_ubfx_s32_svv + ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 + ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 + ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32) + ; CHECK: [[UBFX:%[0-9]+]]:vgpr(s32) = G_UBFX [[COPY3]], [[COPY1]](s32), [[COPY2]] + ; CHECK: $vgpr0 = COPY [[UBFX]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $vgpr0 + %2:_(s32) = COPY $vgpr1 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $vgpr0 = COPY %3(s32) +... + +--- +name: test_ubfx_s32_sss +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0, $sgpr1, $sgpr2 + + ; CHECK-LABEL: name: test_ubfx_s32_sss + ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]] + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_U32_:%[0-9]+]]:sreg_32(s32) = S_BFE_U32 [[COPY]](s32), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0 = COPY [[S_BFE_U32_]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = COPY $sgpr1 + %2:_(s32) = COPY $sgpr2 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $sgpr0 = COPY %3(s32) +... + +--- +name: test_ubfx_s32_sii +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0, $sgpr1, $sgpr2 + + ; CHECK-LABEL: name: test_ubfx_s32_sii + ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]] + ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_U32_:%[0-9]+]]:sreg_32(s32) = S_BFE_U32 [[COPY]](s32), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0 = COPY [[S_BFE_U32_]](s32) + %0:_(s32) = COPY $sgpr0 + %1:_(s32) = G_CONSTANT i32 1 + %2:_(s32) = G_CONSTANT i32 10 + %3:_(s32) = G_UBFX %0, %1(s32), %2 + $sgpr0 = COPY %3(s32) +... + +# Expand to a sequence that combines the offset and width for the two operand +# version of the 64-bit scalar instruction. +--- +name: test_ubfx_s64_sss +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1, $sgpr2, $sgpr3 + + ; CHECK-LABEL: name: test_ubfx_s64_sss + ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 + ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr3 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]] + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_U64_:%[0-9]+]]:sreg_64(s64) = S_BFE_U64 [[COPY]](s64), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]](s64) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = COPY $sgpr2 + %2:_(s32) = COPY $sgpr3 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $sgpr0_sgpr1 = COPY %3(s64) +... + +--- +name: test_ubfx_s64_sii +legalized: true + +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1 + + ; CHECK-LABEL: name: test_ubfx_s64_sii + ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1 + ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10 + ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63 + ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]] + ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32) + ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[S_BFE_U64_:%[0-9]+]]:sreg_64(s64) = S_BFE_U64 [[COPY]](s64), [[OR]](s32), implicit-def $scc + ; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_U64_]](s64) + %0:_(s64) = COPY $sgpr0_sgpr1 + %1:_(s32) = G_CONSTANT i32 1 + %2:_(s32) = G_CONSTANT i32 10 + %3:_(s64) = G_UBFX %0, %1(s32), %2 + $sgpr0_sgpr1 = COPY %3(s64) +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll @@ -604,33 +604,34 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_mov_b32 s4, 8 ; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 ; GFX9-NEXT: v_mov_b32_e32 v8, 0xffff ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_and_or_b32 v2, v3, v8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_add_i16 v0, v0, v1 clamp ; GFX9-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] -; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_pk_add_i16 v1, v2, v3 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 8 ; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -638,33 +639,34 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX10-NEXT: v_mov_b32_e32 v7, 0xffff ; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_or_b32 v3, v3, v5, v4 +; GFX10-NEXT: v_and_or_b32 v0, v0, v7, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, v7, v6 +; GFX10-NEXT: v_and_or_b32 v2, v3, v7, v4 +; GFX10-NEXT: v_and_or_b32 v3, v8, v7, v5 +; GFX10-NEXT: v_mov_b32_e32 v4, 24 ; GFX10-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_and_or_b32 v2, v8, v5, v2 +; GFX10-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_add_i16 v0, v0, v1 clamp -; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v2 op_sel_hi:[0,1] +; GFX10-NEXT: v_pk_add_i16 v1, v2, v3 clamp +; GFX10-NEXT: v_mov_b32_e32 v2, 8 ; GFX10-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_add_i16 v1, v3, v1 clamp -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 @@ -831,46 +833,47 @@ ; ; GFX9-LABEL: s_saddsat_v4i8: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s4, s0, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 8 ; GFX9-NEXT: s_lshr_b32 s4, s0, 16 -; GFX9-NEXT: s_mov_b32 s3, 0x80008 -; GFX9-NEXT: s_lshr_b32 s5, s1, 8 -; GFX9-NEXT: s_lshl_b32 s0, s0, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4 -; GFX9-NEXT: s_lshr_b32 s4, s2, 16 -; GFX9-NEXT: s_lshr_b32 s6, s1, 16 -; GFX9-NEXT: s_lshr_b32 s7, s1, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s1, 16 -; GFX9-NEXT: s_lshl_b32 s2, s2, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4 -; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s7 -; GFX9-NEXT: s_lshl_b32 s1, s1, s3 -; GFX9-NEXT: s_lshl_b32 s5, s5, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s4, 16 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s6 +; GFX9-NEXT: s_lshr_b32 s6, s0, 16 +; GFX9-NEXT: s_mov_b32 s4, 0x80008 +; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_lshl_b32 s0, s0, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s6 +; GFX9-NEXT: s_lshr_b32 s6, s3, 16 +; GFX9-NEXT: s_lshr_b32 s8, s1, 16 +; GFX9-NEXT: s_lshr_b32 s9, s1, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s1, 16 +; GFX9-NEXT: s_lshl_b32 s3, s3, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s6 +; GFX9-NEXT: s_pack_ll_b32_b16 s6, s8, s9 +; GFX9-NEXT: s_lshl_b32 s1, s1, s4 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s6, 16 +; GFX9-NEXT: s_lshl_b32 s4, s6, s4 +; GFX9-NEXT: s_lshl_b32 s6, s7, 8 ; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_lshl_b32 s3, s4, s3 -; GFX9-NEXT: s_lshl_b32 s4, s5, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s6 ; GFX9-NEXT: v_pk_add_i16 v0, s0, v0 clamp -; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_pk_add_i16 v1, s3, v1 clamp +; GFX9-NEXT: s_mov_b32 s2, 8 ; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s0, 0xff -; GFX9-NEXT: v_pk_add_i16 v1, s2, v1 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s0, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s0, v1 +; GFX9-NEXT: s_mov_b32 s5, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: ; return to shader part epilog @@ -878,42 +881,43 @@ ; GFX10-LABEL: s_saddsat_v4i8: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s3, s0, 16 ; GFX10-NEXT: s_lshr_b32 s4, s0, 24 ; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX10-NEXT: s_lshr_b32 s4, s0, 16 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 +; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s6, s1, 16 ; GFX10-NEXT: s_lshr_b32 s7, s1, 24 +; GFX10-NEXT: s_lshl_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_lshr_b32 s8, s0, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s7 +; GFX10-NEXT: s_lshr_b32 s8, s2, 16 ; GFX10-NEXT: s_lshr_b32 s5, s1, 16 -; GFX10-NEXT: s_mov_b32 s2, 0x80008 +; GFX10-NEXT: s_lshr_b32 s6, s4, 16 +; GFX10-NEXT: s_lshl_b32 s2, s2, s3 ; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s1, s1, s2 +; GFX10-NEXT: s_lshl_b32 s1, s1, s3 ; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX10-NEXT: s_lshl_b32 s3, s4, s3 +; GFX10-NEXT: s_lshl_b32 s4, s6, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s8 -; GFX10-NEXT: s_lshr_b32 s4, s3, 16 -; GFX10-NEXT: s_lshr_b32 s5, s6, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s8 +; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 ; GFX10-NEXT: v_pk_add_i16 v0, s0, s1 clamp -; GFX10-NEXT: s_lshl_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_lshl_b32 s0, s6, s2 -; GFX10-NEXT: s_lshl_b32 s1, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX10-NEXT: v_pk_add_i16 v1, s2, s3 clamp +; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_add_i16 v1, s2, s0 clamp -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX10-NEXT: v_and_b32_e32 v3, s0, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_b32_e32 v3, s1, v1 +; GFX10-NEXT: s_mov_b32 s0, 24 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/sbfx.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/sbfx.ll @@ -0,0 +1,202 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - < %s | FileCheck --check-prefix=GFX10 %s + +; Test vector signed bitfield extract. +define signext i8 @v_ashr_i8_i32(i32 %value) { +; GCN-LABEL: v_ashr_i8_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_i32 v0, v0, 4, 8 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_ashr_i8_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_bfe_i32 v0, v0, 4, 8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = ashr i32 %value, 4 + %2 = trunc i32 %1 to i8 + ret i8 %2 +} + +define signext i16 @v_ashr_i16_i32(i32 %value) { +; GCN-LABEL: v_ashr_i16_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_i32 v0, v0, 9, 16 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_ashr_i16_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_bfe_i32 v0, v0, 9, 16 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = ashr i32 %value, 9 + %2 = trunc i32 %1 to i16 + ret i16 %2 +} + +define signext i8 @v_lshr_i8_i32(i32 %value) { +; GCN-LABEL: v_lshr_i8_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_i32 v0, v0, 4, 8 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_lshr_i8_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_bfe_i32 v0, v0, 4, 8 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = lshr i32 %value, 4 + %2 = trunc i32 %1 to i8 + ret i8 %2 +} + +define signext i16 @v_lshr_i16_i32(i32 %value) { +; GCN-LABEL: v_lshr_i16_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_i32 v0, v0, 9, 16 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_lshr_i16_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_bfe_i32 v0, v0, 9, 16 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = lshr i32 %value, 9 + %2 = trunc i32 %1 to i16 + ret i16 %2 +} + +; Test vector bitfield extract for 64-bits. +define i64 @v_ashr_i64(i64 %value) { +; GCN-LABEL: v_ashr_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_ashrrev_i64 v[0:1], 10, v[0:1] +; GCN-NEXT: v_bfe_i32 v0, v0, 0, 4 +; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_ashr_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_ashrrev_i64 v[0:1], 10, v[0:1] +; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 4 +; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = ashr i64 %value, 10 + %2 = shl i64 %1, 60 + %3 = ashr i64 %2, 60 + ret i64 %3 +} + +define i64 @v_lshr_i64(i64 %value) { +; GCN-LABEL: v_lshr_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_ashrrev_i64 v[0:1], 10, v[0:1] +; GCN-NEXT: v_bfe_i32 v0, v0, 0, 4 +; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_lshr_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_ashrrev_i64 v[0:1], 10, v[0:1] +; GFX10-NEXT: v_bfe_i32 v0, v0, 0, 4 +; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = lshr i64 %value, 10 + %2 = shl i64 %1, 60 + %3 = ashr i64 %2, 60 + ret i64 %3 +} + +; Test scalar signed bitfield extract. +define amdgpu_ps signext i8 @s_ashr_i8_i32(i32 inreg %value) { +; GCN-LABEL: s_ashr_i8_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_i32 s0, s0, 0x80004 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_ashr_i8_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_i32 s0, s0, 0x80004 +; GFX10-NEXT: ; return to shader part epilog + %1 = ashr i32 %value, 4 + %2 = trunc i32 %1 to i8 + ret i8 %2 +} + +define amdgpu_ps signext i16 @s_ashr_i16_i32(i32 inreg %value) { +; GCN-LABEL: s_ashr_i16_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_i32 s0, s0, 0x100009 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_ashr_i16_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_i32 s0, s0, 0x100009 +; GFX10-NEXT: ; return to shader part epilog + %1 = ashr i32 %value, 9 + %2 = trunc i32 %1 to i16 + ret i16 %2 +} + +define amdgpu_ps signext i8 @s_lshr_i8_i32(i32 inreg %value) { +; GCN-LABEL: s_lshr_i8_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_i32 s0, s0, 0x80004 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_lshr_i8_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_i32 s0, s0, 0x80004 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i32 %value, 4 + %2 = trunc i32 %1 to i8 + ret i8 %2 +} + +define amdgpu_ps signext i16 @s_lshr_i16_i32(i32 inreg %value) { +; GCN-LABEL: s_lshr_i16_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_i32 s0, s0, 0x100009 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_lshr_i16_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_i32 s0, s0, 0x100009 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i32 %value, 9 + %2 = trunc i32 %1 to i16 + ret i16 %2 +} + +; Test scalar bitfield extract for 64-bits. +define amdgpu_ps i64 @s_ashr_i64(i64 inreg %value) { +; GCN-LABEL: s_ashr_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x40001 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_ashr_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x40001 +; GFX10-NEXT: ; return to shader part epilog + %1 = ashr i64 %value, 1 + %2 = shl i64 %1, 60 + %3 = ashr i64 %2, 60 + ret i64 %3 +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll @@ -604,33 +604,34 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_mov_b32 s4, 8 ; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 ; GFX9-NEXT: v_mov_b32_e32 v8, 0xffff ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_and_or_b32 v2, v3, v8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1 clamp ; GFX9-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] -; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_pk_sub_i16 v1, v2, v3 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 8 ; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -638,33 +639,34 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX10-NEXT: v_mov_b32_e32 v7, 0xffff ; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_or_b32 v3, v3, v5, v4 +; GFX10-NEXT: v_and_or_b32 v0, v0, v7, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, v7, v6 +; GFX10-NEXT: v_and_or_b32 v2, v3, v7, v4 +; GFX10-NEXT: v_and_or_b32 v3, v8, v7, v5 +; GFX10-NEXT: v_mov_b32_e32 v4, 24 ; GFX10-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_and_or_b32 v2, v8, v5, v2 +; GFX10-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1 clamp -; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v2 op_sel_hi:[0,1] +; GFX10-NEXT: v_pk_sub_i16 v1, v2, v3 clamp +; GFX10-NEXT: v_mov_b32_e32 v2, 8 ; GFX10-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_sub_i16 v1, v3, v1 clamp -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 @@ -831,46 +833,47 @@ ; ; GFX9-LABEL: s_ssubsat_v4i8: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s4, s0, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 8 ; GFX9-NEXT: s_lshr_b32 s4, s0, 16 -; GFX9-NEXT: s_mov_b32 s3, 0x80008 -; GFX9-NEXT: s_lshr_b32 s5, s1, 8 -; GFX9-NEXT: s_lshl_b32 s0, s0, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4 -; GFX9-NEXT: s_lshr_b32 s4, s2, 16 -; GFX9-NEXT: s_lshr_b32 s6, s1, 16 -; GFX9-NEXT: s_lshr_b32 s7, s1, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s1, 16 -; GFX9-NEXT: s_lshl_b32 s2, s2, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4 -; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s7 -; GFX9-NEXT: s_lshl_b32 s1, s1, s3 -; GFX9-NEXT: s_lshl_b32 s5, s5, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s4, 16 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s6 +; GFX9-NEXT: s_lshr_b32 s6, s0, 16 +; GFX9-NEXT: s_mov_b32 s4, 0x80008 +; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_lshl_b32 s0, s0, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s6 +; GFX9-NEXT: s_lshr_b32 s6, s3, 16 +; GFX9-NEXT: s_lshr_b32 s8, s1, 16 +; GFX9-NEXT: s_lshr_b32 s9, s1, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s1, 16 +; GFX9-NEXT: s_lshl_b32 s3, s3, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s6 +; GFX9-NEXT: s_pack_ll_b32_b16 s6, s8, s9 +; GFX9-NEXT: s_lshl_b32 s1, s1, s4 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s6, 16 +; GFX9-NEXT: s_lshl_b32 s4, s6, s4 +; GFX9-NEXT: s_lshl_b32 s6, s7, 8 ; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_lshl_b32 s3, s4, s3 -; GFX9-NEXT: s_lshl_b32 s4, s5, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s6 ; GFX9-NEXT: v_pk_sub_i16 v0, s0, v0 clamp -; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_pk_sub_i16 v1, s3, v1 clamp +; GFX9-NEXT: s_mov_b32 s2, 8 ; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s0, 0xff -; GFX9-NEXT: v_pk_sub_i16 v1, s2, v1 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; GFX9-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s0, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s0, v1 +; GFX9-NEXT: s_mov_b32 s5, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: ; return to shader part epilog @@ -878,42 +881,43 @@ ; GFX10-LABEL: s_ssubsat_v4i8: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s3, s0, 16 ; GFX10-NEXT: s_lshr_b32 s4, s0, 24 ; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX10-NEXT: s_lshr_b32 s4, s0, 16 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 +; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s6, s1, 16 ; GFX10-NEXT: s_lshr_b32 s7, s1, 24 +; GFX10-NEXT: s_lshl_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_lshr_b32 s8, s0, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s7 +; GFX10-NEXT: s_lshr_b32 s8, s2, 16 ; GFX10-NEXT: s_lshr_b32 s5, s1, 16 -; GFX10-NEXT: s_mov_b32 s2, 0x80008 +; GFX10-NEXT: s_lshr_b32 s6, s4, 16 +; GFX10-NEXT: s_lshl_b32 s2, s2, s3 ; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s1, s1, s2 +; GFX10-NEXT: s_lshl_b32 s1, s1, s3 ; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX10-NEXT: s_lshl_b32 s3, s4, s3 +; GFX10-NEXT: s_lshl_b32 s4, s6, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s8 -; GFX10-NEXT: s_lshr_b32 s4, s3, 16 -; GFX10-NEXT: s_lshr_b32 s5, s6, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s8 +; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 ; GFX10-NEXT: v_pk_sub_i16 v0, s0, s1 clamp -; GFX10-NEXT: s_lshl_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_lshl_b32 s0, s6, s2 -; GFX10-NEXT: s_lshl_b32 s1, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX10-NEXT: v_pk_sub_i16 v1, s2, s3 clamp +; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_sub_i16 v1, s2, s0 clamp -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX10-NEXT: v_and_b32_e32 v3, s0, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_b32_e32 v3, s1, v1 +; GFX10-NEXT: s_mov_b32 s0, 24 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll @@ -433,33 +433,34 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_mov_b32 s4, 8 ; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 ; GFX9-NEXT: v_mov_b32_e32 v8, 0xffff ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_and_or_b32 v2, v3, v8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 clamp ; GFX9-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] -; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_pk_add_u16 v1, v2, v3 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 8 ; GFX9-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -467,33 +468,34 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX10-NEXT: v_mov_b32_e32 v7, 0xffff ; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_or_b32 v3, v3, v5, v4 +; GFX10-NEXT: v_and_or_b32 v0, v0, v7, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, v7, v6 +; GFX10-NEXT: v_and_or_b32 v2, v3, v7, v4 +; GFX10-NEXT: v_and_or_b32 v3, v8, v7, v5 +; GFX10-NEXT: v_mov_b32_e32 v4, 24 ; GFX10-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_and_or_b32 v2, v8, v5, v2 +; GFX10-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_add_u16 v0, v0, v1 clamp -; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v2 op_sel_hi:[0,1] +; GFX10-NEXT: v_pk_add_u16 v1, v2, v3 clamp +; GFX10-NEXT: v_mov_b32_e32 v2, 8 ; GFX10-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_add_u16 v1, v3, v1 clamp -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 @@ -585,46 +587,47 @@ ; ; GFX9-LABEL: s_uaddsat_v4i8: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s4, s0, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 8 ; GFX9-NEXT: s_lshr_b32 s4, s0, 16 -; GFX9-NEXT: s_mov_b32 s3, 0x80008 -; GFX9-NEXT: s_lshr_b32 s5, s1, 8 -; GFX9-NEXT: s_lshl_b32 s0, s0, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4 -; GFX9-NEXT: s_lshr_b32 s4, s2, 16 -; GFX9-NEXT: s_lshr_b32 s6, s1, 16 -; GFX9-NEXT: s_lshr_b32 s7, s1, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s1, 16 -; GFX9-NEXT: s_lshl_b32 s2, s2, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4 -; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s7 -; GFX9-NEXT: s_lshl_b32 s1, s1, s3 -; GFX9-NEXT: s_lshl_b32 s5, s5, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s4, 16 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s6 +; GFX9-NEXT: s_lshr_b32 s6, s0, 16 +; GFX9-NEXT: s_mov_b32 s4, 0x80008 +; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_lshl_b32 s0, s0, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s6 +; GFX9-NEXT: s_lshr_b32 s6, s3, 16 +; GFX9-NEXT: s_lshr_b32 s8, s1, 16 +; GFX9-NEXT: s_lshr_b32 s9, s1, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s1, 16 +; GFX9-NEXT: s_lshl_b32 s3, s3, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s6 +; GFX9-NEXT: s_pack_ll_b32_b16 s6, s8, s9 +; GFX9-NEXT: s_lshl_b32 s1, s1, s4 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s6, 16 +; GFX9-NEXT: s_lshl_b32 s4, s6, s4 +; GFX9-NEXT: s_lshl_b32 s6, s7, 8 ; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_lshl_b32 s3, s4, s3 -; GFX9-NEXT: s_lshl_b32 s4, s5, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s6 ; GFX9-NEXT: v_pk_add_u16 v0, s0, v0 clamp -; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_pk_add_u16 v1, s3, v1 clamp +; GFX9-NEXT: s_mov_b32 s2, 8 ; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s0, 0xff -; GFX9-NEXT: v_pk_add_u16 v1, s2, v1 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; GFX9-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s0, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s0, v1 +; GFX9-NEXT: s_mov_b32 s5, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: ; return to shader part epilog @@ -632,42 +635,43 @@ ; GFX10-LABEL: s_uaddsat_v4i8: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s3, s0, 16 ; GFX10-NEXT: s_lshr_b32 s4, s0, 24 ; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX10-NEXT: s_lshr_b32 s4, s0, 16 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 +; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s6, s1, 16 ; GFX10-NEXT: s_lshr_b32 s7, s1, 24 +; GFX10-NEXT: s_lshl_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_lshr_b32 s8, s0, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s7 +; GFX10-NEXT: s_lshr_b32 s8, s2, 16 ; GFX10-NEXT: s_lshr_b32 s5, s1, 16 -; GFX10-NEXT: s_mov_b32 s2, 0x80008 +; GFX10-NEXT: s_lshr_b32 s6, s4, 16 +; GFX10-NEXT: s_lshl_b32 s2, s2, s3 ; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s1, s1, s2 +; GFX10-NEXT: s_lshl_b32 s1, s1, s3 ; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX10-NEXT: s_lshl_b32 s3, s4, s3 +; GFX10-NEXT: s_lshl_b32 s4, s6, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s8 -; GFX10-NEXT: s_lshr_b32 s4, s3, 16 -; GFX10-NEXT: s_lshr_b32 s5, s6, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s8 +; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 ; GFX10-NEXT: v_pk_add_u16 v0, s0, s1 clamp -; GFX10-NEXT: s_lshl_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_lshl_b32 s0, s6, s2 -; GFX10-NEXT: s_lshl_b32 s1, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX10-NEXT: v_pk_add_u16 v1, s2, s3 clamp +; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_add_u16 v1, s2, s0 clamp -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX10-NEXT: v_and_b32_e32 v3, s0, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_b32_e32 v3, s1, v1 +; GFX10-NEXT: s_mov_b32 s0, 24 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 Index: llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/ubfx.ll @@ -0,0 +1,112 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - < %s | FileCheck --check-prefix=GCN %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - < %s | FileCheck --check-prefix=GFX10 %s + +; Test vector bitfield extract. +define i32 @v_srl_mask_i32(i32 %value) { +; GCN-LABEL: v_srl_mask_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_bfe_u32 v0, v0, 8, 5 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_srl_mask_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_bfe_u32 v0, v0, 8, 5 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = lshr i32 %value, 8 + %2 = and i32 %1, 31 + ret i32 %2 +} + +; Test scalar bitfield extract. +define amdgpu_ps i32 @s_srl_mask_i32(i32 inreg %value) { +; GCN-LABEL: s_srl_mask_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_u32 s0, s0, 0x50008 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_srl_mask_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_u32 s0, s0, 0x50008 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i32 %value, 8 + %2 = and i32 %1, 31 + ret i32 %2 +} + +; Don't generate G_UBFX if the offset + width is too big. +define amdgpu_ps i32 @s_srl_big_mask_i32(i32 inreg %value) { +; GCN-LABEL: s_srl_big_mask_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_lshr_b32 s0, s0, 30 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_srl_big_mask_i32: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_lshr_b32 s0, s0, 30 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i32 %value, 30 + %2 = and i32 %1, 31 + ret i32 %2 +} + +; Test vector bitfield extract for 64-bits. +define i64 @v_srl_mask_i64(i64 %value) { +; GCN-LABEL: v_srl_mask_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_lshrrev_b64 v[0:1], 25, v[0:1] +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: v_bfe_u32 v0, v0, 0, 10 +; GCN-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: v_srl_mask_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: v_lshrrev_b64 v[0:1], 25, v[0:1] +; GFX10-NEXT: v_mov_b32_e32 v1, 0 +; GFX10-NEXT: v_bfe_u32 v0, v0, 0, 10 +; GFX10-NEXT: s_setpc_b64 s[30:31] + %1 = lshr i64 %value, 25 + %2 = and i64 %1, 1023 + ret i64 %2 +} + +; Test scalar bitfield extract for 64-bits. +define amdgpu_ps i64 @s_srl_mask_i64(i64 inreg %value) { +; GCN-LABEL: s_srl_mask_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_bfe_u64 s[0:1], s[0:1], 0xa0019 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_srl_mask_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_bfe_u64 s[0:1], s[0:1], 0xa0019 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i64 %value, 25 + %2 = and i64 %1, 1023 + ret i64 %2 +} + +; Don't generate G_UBFX if the offset + width is too big. +define amdgpu_ps i64 @s_srl_big_mask_i64(i64 inreg %value) { +; GCN-LABEL: s_srl_big_mask_i64: +; GCN: ; %bb.0: +; GCN-NEXT: s_lshr_b32 s0, s1, 28 +; GCN-NEXT: s_mov_b32 s1, 0 +; GCN-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: s_srl_big_mask_i64: +; GFX10: ; %bb.0: +; GFX10-NEXT: s_lshr_b32 s0, s1, 28 +; GFX10-NEXT: s_mov_b32 s1, 0 +; GFX10-NEXT: ; return to shader part epilog + %1 = lshr i64 %value, 60 + %2 = and i64 %1, 63 + ret i64 %2 +} Index: llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll +++ llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll @@ -421,33 +421,34 @@ ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX9-NEXT: s_mov_b32 s4, 8 ; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0 ; GFX9-NEXT: v_mov_b32_e32 v8, 0xffff ; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX9-NEXT: v_and_or_b32 v0, v0, v8, v2 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v4 -; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_and_or_b32 v2, v3, v8, v2 +; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1 +; GFX9-NEXT: v_and_or_b32 v1, v1, v8, v5 ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v7 +; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX9-NEXT: v_and_or_b32 v3, v6, v8, v3 ; GFX9-NEXT: v_pk_sub_u16 v0, v0, v1 clamp ; GFX9-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX9-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] -; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s4, 0xff ; GFX9-NEXT: v_pk_sub_u16 v1, v2, v3 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] +; GFX9-NEXT: v_mov_b32_e32 v2, 8 ; GFX9-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s4, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s4, v1 +; GFX9-NEXT: v_mov_b32_e32 v3, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: s_setpc_b64 s[30:31] ; @@ -455,33 +456,34 @@ ; GFX10: ; %bb.0: ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 -; GFX10-NEXT: s_mov_b32 s4, 8 ; GFX10-NEXT: v_lshrrev_b32_e32 v4, 24, v0 +; GFX10-NEXT: v_lshrrev_b32_e32 v5, 24, v1 +; GFX10-NEXT: s_mov_b32 s4, 8 +; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 ; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD ; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v1 -; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; GFX10-NEXT: v_mov_b32_e32 v7, 0xffff ; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10-NEXT: v_and_or_b32 v0, v0, v5, v2 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, v6 -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v7 +; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX10-NEXT: s_movk_i32 s4, 0xff -; GFX10-NEXT: v_and_or_b32 v3, v3, v5, v4 +; GFX10-NEXT: v_and_or_b32 v0, v0, v7, v2 +; GFX10-NEXT: v_and_or_b32 v1, v1, v7, v6 +; GFX10-NEXT: v_and_or_b32 v2, v3, v7, v4 +; GFX10-NEXT: v_and_or_b32 v3, v8, v7, v5 +; GFX10-NEXT: v_mov_b32_e32 v4, 24 ; GFX10-NEXT: v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_and_or_b32 v2, v8, v5, v2 +; GFX10-NEXT: v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1] ; GFX10-NEXT: v_pk_sub_u16 v0, v0, v1 clamp -; GFX10-NEXT: v_pk_lshlrev_b16 v1, 8, v2 op_sel_hi:[0,1] +; GFX10-NEXT: v_pk_sub_u16 v1, v2, v3 clamp +; GFX10-NEXT: v_mov_b32_e32 v2, 8 ; GFX10-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_sub_u16 v1, v3, v1 clamp -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_b32_e32 v3, s4, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 @@ -569,46 +571,47 @@ ; ; GFX9-LABEL: s_usubsat_v4i8: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_lshr_b32 s2, s0, 8 -; GFX9-NEXT: s_lshr_b32 s3, s0, 16 -; GFX9-NEXT: s_lshr_b32 s4, s0, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX9-NEXT: s_lshr_b32 s3, s0, 8 ; GFX9-NEXT: s_lshr_b32 s4, s0, 16 -; GFX9-NEXT: s_mov_b32 s3, 0x80008 -; GFX9-NEXT: s_lshr_b32 s5, s1, 8 -; GFX9-NEXT: s_lshl_b32 s0, s0, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4 -; GFX9-NEXT: s_lshr_b32 s4, s2, 16 -; GFX9-NEXT: s_lshr_b32 s6, s1, 16 -; GFX9-NEXT: s_lshr_b32 s7, s1, 24 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s1, 16 -; GFX9-NEXT: s_lshl_b32 s2, s2, s3 -; GFX9-NEXT: s_lshl_b32 s4, s4, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4 -; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s7 -; GFX9-NEXT: s_lshl_b32 s1, s1, s3 -; GFX9-NEXT: s_lshl_b32 s5, s5, 8 -; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX9-NEXT: s_lshr_b32 s5, s4, 16 +; GFX9-NEXT: s_lshr_b32 s6, s0, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s6 +; GFX9-NEXT: s_lshr_b32 s6, s0, 16 +; GFX9-NEXT: s_mov_b32 s4, 0x80008 +; GFX9-NEXT: s_lshr_b32 s7, s1, 8 +; GFX9-NEXT: s_lshl_b32 s0, s0, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s6 +; GFX9-NEXT: s_lshr_b32 s6, s3, 16 +; GFX9-NEXT: s_lshr_b32 s8, s1, 16 +; GFX9-NEXT: s_lshr_b32 s9, s1, 24 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s1, 16 +; GFX9-NEXT: s_lshl_b32 s3, s3, s4 +; GFX9-NEXT: s_lshl_b32 s6, s6, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s6 +; GFX9-NEXT: s_pack_ll_b32_b16 s6, s8, s9 +; GFX9-NEXT: s_lshl_b32 s1, s1, s4 +; GFX9-NEXT: s_lshl_b32 s7, s7, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7 +; GFX9-NEXT: s_lshr_b32 s7, s6, 16 +; GFX9-NEXT: s_lshl_b32 s4, s6, s4 +; GFX9-NEXT: s_lshl_b32 s6, s7, 8 ; GFX9-NEXT: v_mov_b32_e32 v0, s1 -; GFX9-NEXT: s_lshl_b32 s3, s4, s3 -; GFX9-NEXT: s_lshl_b32 s4, s5, 8 +; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s6 ; GFX9-NEXT: v_pk_sub_u16 v0, s0, v0 clamp -; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX9-NEXT: v_mov_b32_e32 v1, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_pk_sub_u16 v1, s3, v1 clamp +; GFX9-NEXT: s_mov_b32 s2, 8 ; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX9-NEXT: s_movk_i32 s0, 0xff -; GFX9-NEXT: v_pk_sub_u16 v1, s2, v1 clamp -; GFX9-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v2 ; GFX9-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] +; GFX9-NEXT: s_movk_i32 s0, 0xff +; GFX9-NEXT: v_lshlrev_b32_sdwa v2, s2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_and_or_b32 v0, v0, s0, v2 ; GFX9-NEXT: v_and_b32_e32 v2, s0, v1 +; GFX9-NEXT: s_mov_b32 s5, 24 ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 ; GFX9-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX9-NEXT: v_readfirstlane_b32 s0, v0 ; GFX9-NEXT: ; return to shader part epilog @@ -616,42 +619,43 @@ ; GFX10-LABEL: s_usubsat_v4i8: ; GFX10: ; %bb.0: ; GFX10-NEXT: s_lshr_b32 s2, s0, 8 -; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s3, s0, 16 ; GFX10-NEXT: s_lshr_b32 s4, s0, 24 ; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 +; GFX10-NEXT: s_lshr_b32 s4, s0, 16 +; GFX10-NEXT: s_mov_b32 s3, 0x80008 +; GFX10-NEXT: s_lshr_b32 s5, s1, 8 ; GFX10-NEXT: s_lshr_b32 s6, s1, 16 ; GFX10-NEXT: s_lshr_b32 s7, s1, 24 +; GFX10-NEXT: s_lshl_b32 s0, s0, s3 +; GFX10-NEXT: s_lshl_b32 s4, s4, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_lshr_b32 s8, s0, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4 +; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s7 +; GFX10-NEXT: s_lshr_b32 s8, s2, 16 ; GFX10-NEXT: s_lshr_b32 s5, s1, 16 -; GFX10-NEXT: s_mov_b32 s2, 0x80008 +; GFX10-NEXT: s_lshr_b32 s6, s4, 16 +; GFX10-NEXT: s_lshl_b32 s2, s2, s3 ; GFX10-NEXT: s_lshl_b32 s8, s8, 8 -; GFX10-NEXT: s_lshl_b32 s0, s0, s2 -; GFX10-NEXT: s_lshl_b32 s1, s1, s2 +; GFX10-NEXT: s_lshl_b32 s1, s1, s3 ; GFX10-NEXT: s_lshl_b32 s5, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s7 +; GFX10-NEXT: s_lshl_b32 s3, s4, s3 +; GFX10-NEXT: s_lshl_b32 s4, s6, 8 ; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s8 -; GFX10-NEXT: s_lshr_b32 s4, s3, 16 -; GFX10-NEXT: s_lshr_b32 s5, s6, 16 +; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s8 +; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4 ; GFX10-NEXT: v_pk_sub_u16 v0, s0, s1 clamp -; GFX10-NEXT: s_lshl_b32 s3, s3, s2 -; GFX10-NEXT: s_lshl_b32 s4, s4, 8 -; GFX10-NEXT: s_lshl_b32 s0, s6, s2 -; GFX10-NEXT: s_lshl_b32 s1, s5, 8 -; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4 -; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX10-NEXT: v_pk_sub_u16 v1, s2, s3 clamp +; GFX10-NEXT: s_mov_b32 s0, 8 +; GFX10-NEXT: s_movk_i32 s1, 0xff ; GFX10-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1] -; GFX10-NEXT: v_pk_sub_u16 v1, s2, s0 clamp -; GFX10-NEXT: s_movk_i32 s0, 0xff -; GFX10-NEXT: v_and_b32_sdwa v2, v0, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD ; GFX10-NEXT: v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1] -; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2 -; GFX10-NEXT: v_and_b32_e32 v3, s0, v1 -; GFX10-NEXT: v_and_b32_sdwa v1, v1, s0 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD -; GFX10-NEXT: v_and_or_b32 v0, v0, s0, v2 +; GFX10-NEXT: v_lshlrev_b32_sdwa v2, s0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_b32_e32 v3, s1, v1 +; GFX10-NEXT: s_mov_b32 s0, 24 +; GFX10-NEXT: v_lshlrev_b32_sdwa v1, s0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 +; GFX10-NEXT: v_and_or_b32 v0, v0, s1, v2 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3 ; GFX10-NEXT: v_or3_b32 v0, v0, v2, v1 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 Index: llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp =================================================================== --- llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp +++ llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp @@ -1670,3 +1670,125 @@ EXPECT_EQ(0u, Res.One.getZExtValue()); EXPECT_EQ(0xFFFFFFFFFFFFFFF8u, Res.Zero.getZExtValue()); } + +TEST_F(AMDGPUGISelMITest, TestKnownBitsUBFX) { + StringRef MIRString = + " %3:_(s32) = G_IMPLICIT_DEF\n" + " %4:_(s32) = G_CONSTANT i32 12\n" + " %5:_(s32) = G_CONSTANT i32 8\n" + " %6:_(s32) = G_UBFX %3, %4(s32), %5\n" + " %ubfx_copy:_(s32) = COPY %6\n" + " %7:_(s32) = G_CONSTANT i32 28672\n" + " %8:_(s32) = G_UBFX %7, %4(s32), %5\n" + " %ubfx_copy_val:_(s32) = COPY %8\n" + " %9:_(s32) = G_IMPLICIT_DEF\n" + " %10:_(s32) = G_IMPLICIT_DEF\n" + " %11:_(s32) = G_UBFX %3, %9(s32), %10\n" + " %ubfx_copy_unk:_(s32) = COPY %11\n" + " %12:_(s32) = G_UBFX %3, %9(s32), %5\n" + " %ubfx_copy_unk_off:_(s32) = COPY %12\n" + " %13:_(s32) = G_UBFX %3, %4(s32), %10\n" + " %ubfx_copy_unk_width:_(s32) = COPY %13\n"; + setUp(MIRString); + if (!TM) + return; + Register CopyBfxReg = Copies[Copies.size() - 5]; + Register CopyValBfxReg = Copies[Copies.size() - 4]; + Register CopyUnkBfxReg = Copies[Copies.size() - 3]; + Register CopyUnkOffBfxReg = Copies[Copies.size() - 2]; + Register CopyUnkWidthBfxReg = Copies[Copies.size() - 1]; + + MachineInstr *CopyBfx = MRI->getVRegDef(CopyBfxReg); + unsigned SrcReg = CopyBfx->getOperand(1).getReg(); + MachineInstr *CopyValBfx = MRI->getVRegDef(CopyValBfxReg); + unsigned ValSrcReg = CopyValBfx->getOperand(1).getReg(); + MachineInstr *CopyUnkBfx = MRI->getVRegDef(CopyUnkBfxReg); + unsigned UnkSrcReg = CopyUnkBfx->getOperand(1).getReg(); + MachineInstr *CopyUnkOffBfx = MRI->getVRegDef(CopyUnkOffBfxReg); + unsigned UnkOffSrcReg = CopyUnkOffBfx->getOperand(1).getReg(); + MachineInstr *CopyUnkWidthBfx = MRI->getVRegDef(CopyUnkWidthBfxReg); + unsigned UnkWidthSrcReg = CopyUnkWidthBfx->getOperand(1).getReg(); + + GISelKnownBits Info(*MF); + + KnownBits Res1 = Info.getKnownBits(SrcReg); + EXPECT_EQ(0u, Res1.One.getZExtValue()); + EXPECT_EQ(0xffffff00u, Res1.Zero.getZExtValue()); + + KnownBits Res2 = Info.getKnownBits(ValSrcReg); + EXPECT_EQ(7u, Res2.One.getZExtValue()); + EXPECT_EQ(0xfffffff8u, Res2.Zero.getZExtValue()); + + KnownBits Res3 = Info.getKnownBits(UnkSrcReg); + EXPECT_EQ(0u, Res3.One.getZExtValue()); + EXPECT_EQ(0u, Res3.Zero.getZExtValue()); + + KnownBits Res4 = Info.getKnownBits(UnkOffSrcReg); + EXPECT_EQ(0u, Res4.One.getZExtValue()); + EXPECT_EQ(0xffffff00u, Res4.Zero.getZExtValue()); + + KnownBits Res5 = Info.getKnownBits(UnkWidthSrcReg); + EXPECT_EQ(0u, Res5.One.getZExtValue()); + EXPECT_EQ(0xfff00000u, Res5.Zero.getZExtValue()); +} + +TEST_F(AMDGPUGISelMITest, TestKnownBitsSBFX) { + StringRef MIRString = + " %3:_(s32) = G_IMPLICIT_DEF\n" + " %4:_(s32) = G_CONSTANT i32 8\n" + " %5:_(s32) = G_CONSTANT i32 4\n" + " %6:_(s32) = G_SBFX %3, %4(s32), %5\n" + " %sbfx_copy:_(s32) = COPY %6\n" + " %7:_(s32) = G_CONSTANT i32 2047\n" + " %8:_(s32) = G_SBFX %7, %4(s32), %5\n" + " %sbfx_copy_val:_(s32) = COPY %8\n" + " %9:_(s32) = G_CONSTANT i32 2048\n" + " %10:_(s32) = G_SBFX %9, %4(s32), %5\n" + " %sbfx_copy_neg_val:_(s32) = COPY %10\n" + " %11:_(s32) = G_IMPLICIT_DEF\n" + " %12:_(s32) = G_SBFX %7, %11(s32), %5\n" + " %sbfx_copy_unk_off:_(s32) = COPY %12\n" + " %13:_(s32) = G_SBFX %9, %4(s32), %11\n" + " %sbfx_copy_unk_width:_(s32) = COPY %13\n"; + setUp(MIRString); + if (!TM) + return; + Register CopyBfxReg = Copies[Copies.size() - 5]; + Register CopyValBfxReg = Copies[Copies.size() - 4]; + Register CopyNegValBfxReg = Copies[Copies.size() - 3]; + Register CopyUnkOffBfxReg = Copies[Copies.size() - 2]; + Register CopyUnkWidthBfxReg = Copies[Copies.size() - 1]; + + MachineInstr *CopyBfx = MRI->getVRegDef(CopyBfxReg); + unsigned SrcReg = CopyBfx->getOperand(1).getReg(); + MachineInstr *CopyValBfx = MRI->getVRegDef(CopyValBfxReg); + unsigned ValSrcReg = CopyValBfx->getOperand(1).getReg(); + MachineInstr *CopyNegValBfx = MRI->getVRegDef(CopyNegValBfxReg); + unsigned NegValSrcReg = CopyNegValBfx->getOperand(1).getReg(); + MachineInstr *CopyUnkOffBfx = MRI->getVRegDef(CopyUnkOffBfxReg); + unsigned UnkOffSrcReg = CopyUnkOffBfx->getOperand(1).getReg(); + MachineInstr *CopyUnkWidthBfx = MRI->getVRegDef(CopyUnkWidthBfxReg); + unsigned UnkWidthSrcReg = CopyUnkWidthBfx->getOperand(1).getReg(); + + GISelKnownBits Info(*MF); + + KnownBits Res1 = Info.getKnownBits(SrcReg); + EXPECT_EQ(0u, Res1.One.getZExtValue()); + EXPECT_EQ(0u, Res1.Zero.getZExtValue()); + + KnownBits Res2 = Info.getKnownBits(ValSrcReg); + EXPECT_EQ(7u, Res2.One.getZExtValue()); + EXPECT_EQ(0xfffffff8u, Res2.Zero.getZExtValue()); + + KnownBits Res3 = Info.getKnownBits(NegValSrcReg); + EXPECT_EQ(0xfffffff8u, Res3.One.getZExtValue()); + EXPECT_EQ(7u, Res3.Zero.getZExtValue()); + + KnownBits Res4 = Info.getKnownBits(UnkOffSrcReg); + EXPECT_EQ(0u, Res4.One.getZExtValue()); + EXPECT_EQ(0u, Res4.Zero.getZExtValue()); + + KnownBits Res5 = Info.getKnownBits(UnkWidthSrcReg); + EXPECT_EQ(0u, Res5.One.getZExtValue()); + EXPECT_EQ(0u, Res5.Zero.getZExtValue()); +}