Index: llvm/lib/Target/AMDGPU/AMDGPUCombine.td =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUCombine.td +++ llvm/lib/Target/AMDGPU/AMDGPUCombine.td @@ -26,6 +26,16 @@ [{ return matchUCharToFloat(*${itofp}, MRI, *MF, Helper); }]), (apply [{ applyUCharToFloat(*${itofp}); }])>; +def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">; + +def cvt_f32_ubyteN : GICombineRule< + (defs root:$cvt_f32_ubyteN, cvt_f32_ubyteN_matchdata:$matchinfo), + (match (wip_match_opcode G_AMDGPU_CVT_F32_UBYTE0, + G_AMDGPU_CVT_F32_UBYTE1, + G_AMDGPU_CVT_F32_UBYTE2, + G_AMDGPU_CVT_F32_UBYTE3):$cvt_f32_ubyteN, + [{ return matchCvtF32UByteN(*${cvt_f32_ubyteN}, MRI, *MF, ${matchinfo}); }]), + (apply [{ applyCvtF32UByteN(*${cvt_f32_ubyteN}, ${matchinfo}); }])>; // Combines which should only apply on SI/VI def gfx6gfx7_combines : GICombineGroup<[fcmp_select_to_fmin_fmax_legacy]>; @@ -38,7 +48,8 @@ } def AMDGPUPostLegalizerCombinerHelper: GICombinerHelper< - "AMDGPUGenPostLegalizerCombinerHelper", [all_combines, - gfx6gfx7_combines, uchar_to_float]> { + "AMDGPUGenPostLegalizerCombinerHelper", + [all_combines, gfx6gfx7_combines, + uchar_to_float, cvt_f32_ubyteN]> { let DisableRuleOption = "amdgpupostlegalizercombiner-disable-rule"; } Index: llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp @@ -165,6 +165,60 @@ MI.eraseFromParent(); } +// FIXME: Should be able to have 2 separate matchdatas rather than custom struct +// boilerplate. +struct CvtF32UByteMatchInfo { + Register CvtVal; + unsigned ShiftOffset; +}; + +static bool matchCvtF32UByteN(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineFunction &MF, + CvtF32UByteMatchInfo &MatchInfo) { + Register SrcReg = MI.getOperand(1).getReg(); + + // Look through G_ZEXT. + mi_match(SrcReg, MRI, m_GZExt(m_Reg(SrcReg))); + + Register Src0; + int64_t ShiftAmt; + bool IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); + if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { + const unsigned Offset = MI.getOpcode() - AMDGPU::G_AMDGPU_CVT_F32_UBYTE0; + + unsigned ShiftOffset = 8 * Offset; + if (IsShr) + ShiftOffset += ShiftAmt; + else + ShiftOffset -= ShiftAmt; + + MatchInfo.CvtVal = Src0; + MatchInfo.ShiftOffset = ShiftOffset; + return ShiftOffset < 32 && ShiftOffset >= 8 && (ShiftOffset % 8) == 0; + } + + // TODO: Simplify demanded bits. + return false; +} + +static void applyCvtF32UByteN(MachineInstr &MI, + const CvtF32UByteMatchInfo &MatchInfo) { + MachineIRBuilder B(MI); + unsigned NewOpc = AMDGPU::G_AMDGPU_CVT_F32_UBYTE0 + MatchInfo.ShiftOffset / 8; + + const LLT S32 = LLT::scalar(32); + Register CvtSrc = MatchInfo.CvtVal; + LLT SrcTy = B.getMRI()->getType(MatchInfo.CvtVal); + if (SrcTy != S32) { + assert(SrcTy.isScalar() && SrcTy.getSizeInBits() >= 8); + CvtSrc = B.buildAnyExt(S32, CvtSrc).getReg(0); + } + + assert(MI.getOpcode() != NewOpc); + B.buildInstr(NewOpc, {MI.getOperand(0)}, {CvtSrc}, MI.getFlags()); + MI.eraseFromParent(); +} + #define AMDGPUPOSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS #include "AMDGPUGenPostLegalizeGICombiner.inc" #undef AMDGPUPOSTLEGALIZERCOMBINERHELPER_GENCOMBINERHELPER_DEPS Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -9824,6 +9824,8 @@ SDValue Src = N->getOperand(0); SDValue Shift = N->getOperand(0); + + // TODO: Extend type shouldn't matter (assuming legal types). if (Shift.getOpcode() == ISD::ZERO_EXTEND) Shift = Shift.getOperand(0); Index: llvm/test/CodeGen/AMDGPU/GlobalISel/combine-amdgpu-cvt-f32-ubyte.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/combine-amdgpu-cvt-f32-ubyte.mir @@ -0,0 +1,600 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: cvt_f32_ubyte0_lshr_0 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_lshr_0 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 0 + ; CHECK: %shift:_(s32) = G_LSHR %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 0 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_lshr_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_lshr_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_lshr_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_lshr_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_lshr_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_lshr_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_lshr_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_lshr_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_LSHR %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_lshr_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_lshr_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 16 + ; CHECK: %shift:_(s32) = G_LSHR %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_lshr_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_lshr_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_LSHR %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte3_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte3_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 8 + ; CHECK: %shift:_(s32) = G_LSHR %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_LSHR %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_zext_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_zext_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %trunc:_(s16) = G_TRUNC %arg(s32) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %trunc(s16) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 [[ANYEXT]] + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %trunc:_(s16) = G_TRUNC %arg + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s16) = G_LSHR %trunc, %shiftamt + %zext:_(s32) = G_ZEXT %shift + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %zext + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_zext_lshr_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_zext_lshr_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %trunc:_(s16) = G_TRUNC %arg(s32) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %trunc(s16) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 [[ANYEXT]] + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %trunc:_(s16) = G_TRUNC %arg + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s16) = G_LSHR %trunc, %shiftamt + %zext:_(s32) = G_ZEXT %shift + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %zext + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_zext_lshr_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_zext_lshr_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %trunc:_(s16) = G_TRUNC %arg(s32) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %trunc(s16) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 [[ANYEXT]] + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %trunc:_(s16) = G_TRUNC %arg + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s16) = G_LSHR %trunc, %shiftamt + %zext:_(s32) = G_ZEXT %shift + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %zext + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_zext_lshr_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_zext_lshr_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %trunc:_(s16) = G_TRUNC %arg(s32) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %trunc(s16) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 [[ANYEXT]] + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %trunc:_(s16) = G_TRUNC %arg + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s16) = G_LSHR %trunc, %shiftamt + %zext:_(s32) = G_ZEXT %shift + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %zext + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_shl_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_shl_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 8 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_shl_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_shl_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 8 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_shl_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_shl_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte3_shl_8 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte3_shl_8 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 8 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_shl_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_shl_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 16 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_shl_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_shl_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 16 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_shl_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_shl_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 16 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte3_shl_16 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte3_shl_16 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte0_shl_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte0_shl_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE0 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte1_shl_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_shl_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte2_shl_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte2_shl_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE2 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte3_shl_24 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte3_shl_24 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 24 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 24 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + $vgpr0 = COPY %result +... + +# Shift amount is wrong +--- +name: cvt_f32_ubyte1_shl_7 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte1_shl_7 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %shiftamt:_(s32) = G_CONSTANT i32 7 + ; CHECK: %shift:_(s32) = G_SHL %arg, %shiftamt(s32) + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 7 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %shift + $vgpr0 = COPY %result +... + +--- +name: cvt_f32_ubyte3_shl_17 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + + ; CHECK-LABEL: name: cvt_f32_ubyte3_shl_17 + ; CHECK: liveins: $vgpr0 + ; CHECK: %arg:_(s32) = COPY $vgpr0 + ; CHECK: %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE1 %arg + ; CHECK: $vgpr0 = COPY %result(s32) + %arg:_(s32) = COPY $vgpr0 + %shiftamt:_(s32) = G_CONSTANT i32 16 + %shift:_(s32) = G_SHL %arg, %shiftamt + %result:_(s32) = G_AMDGPU_CVT_F32_UBYTE3 %shift + $vgpr0 = COPY %result +... Index: llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/GlobalISel/cvt_f32_ubyte.ll @@ -0,0 +1,1152 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-- -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s +; RUN: llc -global-isel -mtriple=amdgcn-- -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s + +declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone +declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone + +define float @v_uitofp_i32_to_f32_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_i32_to_f32_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_i32_to_f32_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_setpc_b64 s[30:31] + %masked = and i32 %arg0, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_sitofp_i32_to_f32_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_sitofp_i32_to_f32_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_sitofp_i32_to_f32_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_setpc_b64 s[30:31] + %masked = and i32 %arg0, 255 + %cvt = sitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_to_f32_lshr7_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f32_lshr7_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 7, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f32_lshr7_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 7, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.7 = lshr i32 %arg0, 7 + %masked = and i32 %lshr.7, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_to_f32_lshr8_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f32_lshr8_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f32_lshr8_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.8 = lshr i32 %arg0, 8 + %masked = and i32 %lshr.8, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_to_f32_multi_use_lshr8_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f32_multi_use_lshr8_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f32_multi_use_lshr8_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; VI-NEXT: flat_store_dword v[0:1], v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.8 = lshr i32 %arg0, 8 + store i32 %lshr.8, i32 addrspace(1)* undef + %masked = and i32 %lshr.8, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_to_f32_lshr16_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f32_lshr16_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f32_lshr16_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, 0xff +; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 16 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_to_f32_lshr24_mask255(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_to_f32_lshr24_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_f32_ubyte3_e32 v0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 24 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to float + ret float %cvt +} + +define float @v_uitofp_i8_to_f32(i8 %arg0) nounwind { +; SI-LABEL: v_uitofp_i8_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_i8_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_setpc_b64 s[30:31] + %cvt = uitofp i8 %arg0 to float + ret float %cvt +} + +define <2 x float> @v_uitofp_v2i8_to_v2f32(i16 %arg0) nounwind { +; SI-LABEL: v_uitofp_v2i8_to_v2f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v0 +; SI-NEXT: s_movk_i32 s4, 0xff +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 +; SI-NEXT: v_and_b32_e32 v0, s4, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_v2i8_to_v2f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_movk_i32 s4, 0xff +; VI-NEXT: v_mov_b32_e32 v1, s4 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v0 +; VI-NEXT: v_mov_b32_e32 v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] + %val = bitcast i16 %arg0 to <2 x i8> + %cvt = uitofp <2 x i8> %val to <2 x float> + ret <2 x float> %cvt +} + +define <3 x float> @v_uitofp_v3i8_to_v3f32(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_v3i8_to_v3f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v1, 0xffff, v0 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; SI-NEXT: s_movk_i32 s4, 0xff +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 +; SI-NEXT: v_and_b32_e32 v0, s4, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v1 +; SI-NEXT: v_and_b32_e32 v2, s4, v2 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_v3i8_to_v3f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_movk_i32 s4, 0xff +; VI-NEXT: v_mov_b32_e32 v2, s4 +; VI-NEXT: v_and_b32_sdwa v1, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 +; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; VI-NEXT: v_mov_b32_e32 v0, v3 +; VI-NEXT: s_setpc_b64 s[30:31] + %trunc = trunc i32 %arg0 to i24 + %val = bitcast i24 %trunc to <3 x i8> + %cvt = uitofp <3 x i8> %val to <3 x float> + ret <3 x float> %cvt +} + +define <4 x float> @v_uitofp_v4i8_to_v4f32(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_v4i8_to_v4f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0xffff +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v0 +; SI-NEXT: v_and_b32_e32 v3, s4, v2 +; SI-NEXT: s_movk_i32 s4, 0xff +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v1 +; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v3 +; SI-NEXT: v_and_b32_e32 v0, s4, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v1 +; SI-NEXT: v_and_b32_e32 v2, s4, v2 +; SI-NEXT: v_and_b32_e32 v3, s4, v3 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_v4i8_to_v4f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_movk_i32 s4, 0xff +; VI-NEXT: v_mov_b32_e32 v5, s4 +; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_and_b32_sdwa v0, v0, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v0 +; VI-NEXT: v_and_b32_sdwa v0, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_e32 v3, v0 +; VI-NEXT: v_mov_b32_e32 v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] + %val = bitcast i32 %arg0 to <4 x i8> + %cvt = uitofp <4 x i8> %val to <4 x float> + ret <4 x float> %cvt +} + +define <4 x float> @v_uitofp_unpack_i32_to_v4f32(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_unpack_i32_to_v4f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_movk_i32 s4, 0xff +; SI-NEXT: v_and_b32_e32 v1, s4, v0 +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1 +; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; SI-NEXT: v_and_b32_e32 v1, s4, v1 +; SI-NEXT: v_and_b32_e32 v2, s4, v2 +; SI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 +; SI-NEXT: v_mov_b32_e32 v0, v4 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_unpack_i32_to_v4f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: s_movk_i32 s4, 0xff +; VI-NEXT: v_mov_b32_e32 v2, s4 +; VI-NEXT: v_lshrrev_b32_e32 v1, 8, v0 +; VI-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte3_e32 v3, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v2 +; VI-NEXT: v_mov_b32_e32 v0, v4 +; VI-NEXT: s_setpc_b64 s[30:31] + %mask.arg0 = and i32 %arg0, 255 + %cvt0 = uitofp i32 %mask.arg0 to float + + %lshr.8 = lshr i32 %arg0, 8 + %mask.lshr.8 = and i32 %lshr.8, 255 + %cvt1 = uitofp i32 %mask.lshr.8 to float + + %lshr.16 = lshr i32 %arg0, 16 + %mask.lshr.16 = and i32 %lshr.16, 255 + %cvt2 = uitofp i32 %mask.lshr.16 to float + + %lshr.24 = lshr i32 %arg0, 24 + %mask.lshr.24 = and i32 %lshr.24, 255 + %cvt3 = uitofp i32 %mask.lshr.24 to float + + %ins.0 = insertelement <4 x float> undef, float %cvt0, i32 0 + %ins.1 = insertelement <4 x float> %ins.0, float %cvt1, i32 1 + %ins.2 = insertelement <4 x float> %ins.1, float %cvt2, i32 2 + %ins.3 = insertelement <4 x float> %ins.2, float %cvt3, i32 3 + ret <4 x float> %ins.3 +} + +define half @v_uitofp_i32_to_f16_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_i32_to_f16_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_i32_to_f16_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %masked = and i32 %arg0, 255 + %cvt = uitofp i32 %masked to half + ret half %cvt +} + +define half @v_sitofp_i32_to_f16_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_sitofp_i32_to_f16_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_sitofp_i32_to_f16_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %masked = and i32 %arg0, 255 + %cvt = sitofp i32 %masked to half + ret half %cvt +} + +define half @v_uitofp_to_f16_lshr8_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f16_lshr8_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f16_lshr8_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.8 = lshr i32 %arg0, 8 + %masked = and i32 %lshr.8, 255 + %cvt = uitofp i32 %masked to half + ret half %cvt +} + +define half @v_uitofp_to_f16_lshr16_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f16_lshr16_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f16_lshr16_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, 0xff +; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 16 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to half + ret half %cvt +} + +define half @v_uitofp_to_f16_lshr24_mask255(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_to_f16_lshr24_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cvt_f32_ubyte3_e32 v0, v0 +; GCN-NEXT: v_cvt_f16_f32_e32 v0, v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 24 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to half + ret half %cvt +} + +define half @v_uitofp_i8_to_f16(i8 %arg0) nounwind { +; SI-LABEL: v_uitofp_i8_to_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_i8_to_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %cvt = uitofp i8 %arg0 to half + ret half %cvt +} + +define double @v_uitofp_i32_to_f64_mask255(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_i32_to_f64_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 0xff, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %masked = and i32 %arg0, 255 + %cvt = uitofp i32 %masked to double + ret double %cvt +} + +define double @v_uitofp_to_f64_lshr8_mask255(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_to_f64_lshr8_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; GCN-NEXT: v_and_b32_e32 v0, 0xff, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %lshr.8 = lshr i32 %arg0, 8 + %masked = and i32 %lshr.8, 255 + %cvt = uitofp i32 %masked to double + ret double %cvt +} + +define double @v_uitofp_to_f64_lshr16_mask255(i32 %arg0) nounwind { +; SI-LABEL: v_uitofp_to_f64_lshr16_mask255: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_uitofp_to_f64_lshr16_mask255: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, 0xff +; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; VI-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 16 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to double + ret double %cvt +} + +define double @v_uitofp_to_f64_lshr24_mask255(i32 %arg0) nounwind { +; GCN-LABEL: v_uitofp_to_f64_lshr24_mask255: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_lshrrev_b32_e32 v0, 24, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %lshr.16 = lshr i32 %arg0, 24 + %masked = and i32 %lshr.16, 255 + %cvt = uitofp i32 %masked to double + ret double %cvt +} + +define double @v_uitofp_i8_to_f64(i8 %arg0) nounwind { +; GCN-LABEL: v_uitofp_i8_to_f64: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_and_b32_e32 v0, 0xff, v0 +; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0 +; GCN-NEXT: s_setpc_b64 s[30:31] + %cvt = uitofp i8 %arg0 to double + ret double %cvt +} + +define amdgpu_kernel void @load_i8_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: load_i8_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: load_i8_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s1 +; VI-NEXT: v_mov_b32_e32 v1, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v2, v3, vcc +; VI-NEXT: flat_load_ubyte v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i8, i8 addrspace(1)* %in, i32 %tid + %load = load i8, i8 addrspace(1)* %gep, align 1 + %cvt = uitofp i8 %load to float + store float %cvt, float addrspace(1)* %out, align 4 + ret void +} + +; FIXME: +; define amdgpu_kernel void @load_v2i8_to_v2f32(<2 x float> addrspace(1)* noalias %out, <2 x i8> addrspace(1)* noalias %in) nounwind { +; %tid = call i32 @llvm.amdgcn.workitem.id.x() +; %gep = getelementptr <2 x i8>, <2 x i8> addrspace(1)* %in, i32 %tid +; %load = load <2 x i8>, <2 x i8> addrspace(1)* %gep, align 2 +; %cvt = uitofp <2 x i8> %load to <2 x float> +; store <2 x float> %cvt, <2 x float> addrspace(1)* %out, align 16 +; ret void +; } + +; FIXME: +; define amdgpu_kernel void @load_v3i8_to_v3f32(<3 x float> addrspace(1)* noalias %out, <3 x i8> addrspace(1)* noalias %in) nounwind { +; %tid = call i32 @llvm.amdgcn.workitem.id.x() +; %gep = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %in, i32 %tid +; %load = load <3 x i8>, <3 x i8> addrspace(1)* %gep, align 4 +; %cvt = uitofp <3 x i8> %load to <3 x float> +; store <3 x float> %cvt, <3 x float> addrspace(1)* %out, align 16 +; ret void +; } + +; define amdgpu_kernel void @load_v4i8_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind { +; %tid = call i32 @llvm.amdgcn.workitem.id.x() +; %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid +; %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 4 +; %cvt = uitofp <4 x i8> %load to <4 x float> +; store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 +; ret void +; } + +; This should not be adding instructions to shift into the correct +; position in the word for the component. + +; FIXME: Packing bytes +define amdgpu_kernel void @load_v4i8_to_v4f32_unaligned(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind { +; SI-LABEL: load_v4i8_to_v4f32_unaligned: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_ubyte v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: buffer_load_ubyte v3, v[0:1], s[0:3], 0 addr64 offset:1 +; SI-NEXT: buffer_load_ubyte v4, v[0:1], s[0:3], 0 addr64 offset:2 +; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[0:3], 0 addr64 offset:3 +; SI-NEXT: s_movk_i32 s6, 0xff +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v1, s6, v2 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v2, s6, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, s6, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v4, s6, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v2 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v3 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v4 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: load_v4i8_to_v4f32_unaligned: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; VI-NEXT: v_add_u32_e32 v4, vcc, 2, v0 +; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc +; VI-NEXT: v_add_u32_e32 v6, vcc, 3, v0 +; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc +; VI-NEXT: flat_load_ubyte v0, v[0:1] +; VI-NEXT: flat_load_ubyte v1, v[2:3] +; VI-NEXT: flat_load_ubyte v2, v[4:5] +; VI-NEXT: flat_load_ubyte v3, v[6:7] +; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid + %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 1 + %cvt = uitofp <4 x i8> %load to <4 x float> + store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 + ret void +} + +; FIXME: Need to handle non-uniform case for function below (load without gep). +; Instructions still emitted to repack bytes for add use. +; define amdgpu_kernel void @load_v4i8_to_v4f32_2_uses(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %out2, <4 x i8> addrspace(1)* noalias %in) nounwind { +; %tid.x = call i32 @llvm.amdgcn.workitem.id.x() +; %in.ptr = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid.x +; %load = load <4 x i8>, <4 x i8> addrspace(1)* %in.ptr, align 4 +; %cvt = uitofp <4 x i8> %load to <4 x float> +; store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 +; %add = add <4 x i8> %load, ; Second use of %load +; store <4 x i8> %add, <4 x i8> addrspace(1)* %out2, align 4 +; ret void +; } + +; Make sure this doesn't crash. +; FIXME: +; define amdgpu_kernel void @load_v7i8_to_v7f32(<7 x float> addrspace(1)* noalias %out, <7 x i8> addrspace(1)* noalias %in) nounwind { +; %tid = call i32 @llvm.amdgcn.workitem.id.x() +; %gep = getelementptr <7 x i8>, <7 x i8> addrspace(1)* %in, i32 %tid +; %load = load <7 x i8>, <7 x i8> addrspace(1)* %gep, align 1 +; %cvt = uitofp <7 x i8> %load to <7 x float> +; store <7 x float> %cvt, <7 x float> addrspace(1)* %out, align 16 +; ret void +; } + +; FIXME +; define amdgpu_kernel void @load_v8i8_to_v8f32(<8 x float> addrspace(1)* noalias %out, <8 x i8> addrspace(1)* noalias %in) nounwind { +; %tid = call i32 @llvm.amdgcn.workitem.id.x() +; %gep = getelementptr <8 x i8>, <8 x i8> addrspace(1)* %in, i32 %tid +; %load = load <8 x i8>, <8 x i8> addrspace(1)* %gep, align 8 +; %cvt = uitofp <8 x i8> %load to <8 x float> +; store <8 x float> %cvt, <8 x float> addrspace(1)* %out, align 16 +; ret void +; } + +define amdgpu_kernel void @i8_zext_inreg_i32_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: i8_zext_inreg_i32_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_add_i32_e32 v0, vcc, 2, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: i8_zext_inreg_i32_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_add_u32_e32 v0, vcc, 2, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %add = add i32 %load, 2 + %inreg = and i32 %add, 255 + %cvt = uitofp i32 %inreg to float + store float %cvt, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @i8_zext_inreg_hi1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: i8_zext_inreg_hi1_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff00, v0 +; SI-NEXT: v_cvt_f32_ubyte1_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: i8_zext_inreg_hi1_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_and_b32_e32 v0, 0xff00, v0 +; VI-NEXT: v_cvt_f32_ubyte1_e32 v2, v0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %load = load i32, i32 addrspace(1)* %gep, align 4 + %inreg = and i32 %load, 65280 + %shr = lshr i32 %inreg, 8 + %cvt = uitofp i32 %shr to float + store float %cvt, float addrspace(1)* %out, align 4 + ret void +} + +; We don't get these ones because of the zext, but instcombine removes +; them so it shouldn't really matter. +define amdgpu_kernel void @i8_zext_i32_to_f32(float addrspace(1)* noalias %out, i8 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: i8_zext_i32_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: i8_zext_i32_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v3, 31, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s1 +; VI-NEXT: v_mov_b32_e32 v1, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v1, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v2, v3, vcc +; VI-NEXT: flat_load_ubyte v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i8, i8 addrspace(1)* %in, i32 %tid + %load = load i8, i8 addrspace(1)* %gep, align 1 + %ext = zext i8 %load to i32 + %cvt = uitofp i32 %ext to float + store float %cvt, float addrspace(1)* %out, align 4 + ret void +} + +define amdgpu_kernel void @v4i8_zext_v4i32_to_v4f32(<4 x float> addrspace(1)* noalias %out, <4 x i8> addrspace(1)* noalias %in) nounwind { +; SI-LABEL: v4i8_zext_v4i32_to_v4f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_ubyte v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: buffer_load_ubyte v3, v[0:1], s[0:3], 0 addr64 offset:1 +; SI-NEXT: buffer_load_ubyte v4, v[0:1], s[0:3], 0 addr64 offset:2 +; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[0:3], 0 addr64 offset:3 +; SI-NEXT: s_movk_i32 s6, 0xff +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(3) +; SI-NEXT: v_and_b32_e32 v1, s6, v2 +; SI-NEXT: s_waitcnt vmcnt(2) +; SI-NEXT: v_and_b32_e32 v2, s6, v3 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_and_b32_e32 v3, s6, v4 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v4, s6, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v1 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v2 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v3 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v4 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: v4i8_zext_v4i32_to_v4f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: v_add_u32_e32 v2, vcc, 1, v0 +; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc +; VI-NEXT: v_add_u32_e32 v4, vcc, 2, v0 +; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc +; VI-NEXT: v_add_u32_e32 v6, vcc, 3, v0 +; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc +; VI-NEXT: flat_load_ubyte v0, v[0:1] +; VI-NEXT: flat_load_ubyte v1, v[2:3] +; VI-NEXT: flat_load_ubyte v2, v[4:5] +; VI-NEXT: flat_load_ubyte v3, v[6:7] +; VI-NEXT: v_mov_b32_e32 v5, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: s_waitcnt vmcnt(3) lgkmcnt(3) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(2) lgkmcnt(2) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(1) lgkmcnt(1) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3] +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr <4 x i8>, <4 x i8> addrspace(1)* %in, i32 %tid + %load = load <4 x i8>, <4 x i8> addrspace(1)* %gep, align 1 + %ext = zext <4 x i8> %load to <4 x i32> + %cvt = uitofp <4 x i32> %ext to <4 x float> + store <4 x float> %cvt, <4 x float> addrspace(1)* %out, align 16 + ret void +} + +define amdgpu_kernel void @extract_byte0_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: extract_byte0_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: extract_byte0_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %val = load i32, i32 addrspace(1)* %gep + %and = and i32 %val, 255 + %cvt = uitofp i32 %and to float + store float %cvt, float addrspace(1)* %out + ret void +} + +define amdgpu_kernel void @extract_byte1_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: extract_byte1_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: extract_byte1_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_lshrrev_b32_e32 v0, 8, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %val = load i32, i32 addrspace(1)* %gep + %srl = lshr i32 %val, 8 + %and = and i32 %srl, 255 + %cvt = uitofp i32 %and to float + store float %cvt, float addrspace(1)* %out + ret void +} + +define amdgpu_kernel void @extract_byte2_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: extract_byte2_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; SI-NEXT: v_and_b32_e32 v0, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: extract_byte2_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: v_mov_b32_e32 v1, 0xff +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_and_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %val = load i32, i32 addrspace(1)* %gep + %srl = lshr i32 %val, 16 + %and = and i32 %srl, 255 + %cvt = uitofp i32 %and to float + store float %cvt, float addrspace(1)* %out + ret void +} + +define amdgpu_kernel void @extract_byte3_to_f32(float addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) nounwind { +; SI-LABEL: extract_byte3_to_f32: +; SI: ; %bb.0: +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xa +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xc +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b64 s[6:7], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cvt_f32_ubyte3_e32 v0, v0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: extract_byte3_to_f32: +; VI: ; %bb.0: +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x28 +; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x30 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_cvt_f32_ubyte3_e32 v2, v0 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid + %val = load i32, i32 addrspace(1)* %gep + %srl = lshr i32 %val, 24 + %and = and i32 %srl, 255 + %cvt = uitofp i32 %and to float + store float %cvt, float addrspace(1)* %out + ret void +} + +define amdgpu_kernel void @cvt_ubyte0_or_multiuse(i32 addrspace(1)* %in, float addrspace(1)* %out) { +; SI-LABEL: cvt_ubyte0_or_multiuse: +; SI: ; %bb.0: ; %bb +; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[0:1] +; SI-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_or_b32_e32 v0, 0x80000001, v0 +; SI-NEXT: v_and_b32_e32 v1, 0xff, v0 +; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1 +; SI-NEXT: v_add_f32_e32 v0, v0, v1 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: cvt_ubyte0_or_multiuse: +; VI: ; %bb.0: ; %bb +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; VI-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: v_mov_b32_e32 v2, s0 +; VI-NEXT: v_add_u32_e32 v0, vcc, v2, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v1, vcc +; VI-NEXT: flat_load_dword v0, v[0:1] +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: v_or_b32_e32 v0, 0x80000001, v0 +; VI-NEXT: v_cvt_f32_ubyte0_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 +; VI-NEXT: v_add_f32_e32 v2, v0, v1 +; VI-NEXT: v_mov_b32_e32 v0, s2 +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +bb: + %lid = tail call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 %lid + %load = load i32, i32 addrspace(1)* %gep + %or = or i32 %load, -2147483647 + %and = and i32 %or, 255 + %uitofp = uitofp i32 %and to float + %cast = bitcast i32 %or to float + %add = fadd float %cast, %uitofp + store float %add, float addrspace(1)* %out + ret void +}