diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -648,6 +648,9 @@ /// (fma fneg(x), fneg(y), z) -> (fma x, y, z) bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo); + /// Combine (build_vector_trunc x, undef) -> (bitcast x) + bool matchBuildVectorTruncUndef(MachineInstr &MI, BuildFnTy &MatchInfo); + private: /// Given a non-indexed load or store instruction \p MI, find an offset that /// can be usefully and legally folded into it as a post-indexing operation. diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td --- a/llvm/include/llvm/Target/GlobalISel/Combine.td +++ b/llvm/include/llvm/Target/GlobalISel/Combine.td @@ -645,6 +645,14 @@ extract_vec_elt_build_vec, extract_all_elts_from_build_vector]>; +// Combine (build_vector_trunc x, undef) -> (bitcast x) +def build_vector_trunc_undef : GICombineRule< + (defs root:$root, build_fn_matchinfo:$info), + (match (wip_match_opcode G_BUILD_VECTOR_TRUNC):$root, + [{ return Helper.matchBuildVectorTruncUndef(*${root}, ${info}); }]), + (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }]) +>; + def funnel_shift_to_rotate : GICombineRule< (defs root:$root), (match (wip_match_opcode G_FSHL, G_FSHR):$root, @@ -758,7 +766,8 @@ propagate_undef_any_op, propagate_undef_all_ops, propagate_undef_shuffle_mask, - erase_undef_store]>; + erase_undef_store, + build_vector_trunc_undef]>; def identity_combines : GICombineGroup<[select_same_val, right_identity_zero, binop_same_val, binop_left_to_zero, diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -4760,6 +4760,37 @@ return true; } +// Combine (build_vector_trunc x, undef) -> (bitcast x) +bool CombinerHelper::matchBuildVectorTruncUndef(MachineInstr &MI, + BuildFnTy &MatchInfo) { + assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC); + + // Only combine if the sizes match, so we can use just a bitcast with no + // anyext or trunc. + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(Dst); + LLT SrcTy = MRI.getType(Src); + if (DstTy.getSizeInBits() != SrcTy.getSizeInBits()) + return false; + + // Allow any number of sources as long as all but the first are undef. + for (unsigned I = 2, E = MI.getNumOperands(); I < E; ++I) { + if (!getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(I).getReg(), + MRI)) + return false; + } + + MatchInfo = [&](MachineIRBuilder &B) { + Observer.changingInstr(MI); + MI.setDesc(B.getTII().get(TargetOpcode::G_BITCAST)); + for (unsigned I = MI.getNumOperands(); --I >= 2;) + MI.RemoveOperand(I); + Observer.changedInstr(MI); + }; + return true; +} + bool CombinerHelper::tryCombine(MachineInstr &MI) { if (tryCombineCopy(MI)) return true; diff --git a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll --- a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll +++ b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll @@ -86,11 +86,12 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: smulh z2.b, p0/m, z2.b, z1.b -; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: asr z1.b, z0.b, #7 -; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, z1.b -; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z1.b +; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: asr z1.b, z2.b, #7 +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv16i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -109,14 +110,15 @@ ; CHECK-NEXT: smulh z4.b, p0/m, z4.b, z3.b ; CHECK-NEXT: mul z1.b, p0/m, z1.b, z3.b ; CHECK-NEXT: movprfx z3, z0 -; CHECK-NEXT: smulh z3.b, p0/m, z3.b, z2.b -; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: asr z2.b, z1.b, #7 -; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, z2.b -; CHECK-NEXT: asr z2.b, z0.b, #7 -; CHECK-NEXT: cmpne p0.b, p0/z, z3.b, z2.b -; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z3.b, p0/m, z3.b, z2.b +; CHECK-NEXT: asr z5.b, z1.b, #7 +; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: asr z2.b, z3.b, #7 +; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, z5.b +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z2.b ; CHECK-NEXT: mov z1.b, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z3.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z3.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv32i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -135,26 +137,28 @@ ; CHECK-NEXT: smulh z24.b, p0/m, z24.b, z7.b ; CHECK-NEXT: mul z3.b, p0/m, z3.b, z7.b ; CHECK-NEXT: movprfx z7, z2 -; CHECK-NEXT: smulh z7.b, p0/m, z7.b, z6.b -; CHECK-NEXT: mul z2.b, p0/m, z2.b, z6.b +; CHECK-NEXT: mul z7.b, p0/m, z7.b, z6.b +; CHECK-NEXT: smulh z2.b, p0/m, z2.b, z6.b +; CHECK-NEXT: asr z6.b, z7.b, #7 +; CHECK-NEXT: cmpne p2.b, p0/z, z2.b, z6.b ; CHECK-NEXT: movprfx z6, z1 ; CHECK-NEXT: smulh z6.b, p0/m, z6.b, z5.b ; CHECK-NEXT: mul z1.b, p0/m, z1.b, z5.b -; CHECK-NEXT: movprfx z5, z0 -; CHECK-NEXT: smulh z5.b, p0/m, z5.b, z4.b -; CHECK-NEXT: mul z0.b, p0/m, z0.b, z4.b -; CHECK-NEXT: asr z4.b, z3.b, #7 -; CHECK-NEXT: cmpne p1.b, p0/z, z24.b, z4.b +; CHECK-NEXT: asr z25.b, z3.b, #7 +; CHECK-NEXT: asr z5.b, z1.b, #7 +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z4.b +; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z4.b ; CHECK-NEXT: asr z4.b, z2.b, #7 -; CHECK-NEXT: asr z24.b, z1.b, #7 -; CHECK-NEXT: cmpne p2.b, p0/z, z7.b, z4.b -; CHECK-NEXT: asr z4.b, z0.b, #7 -; CHECK-NEXT: cmpne p3.b, p0/z, z6.b, z24.b -; CHECK-NEXT: cmpne p0.b, p0/z, z5.b, z4.b -; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: cmpne p1.b, p0/z, z24.b, z25.b +; CHECK-NEXT: cmpne p3.b, p0/z, z6.b, z5.b +; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z4.b +; CHECK-NEXT: mov z7.b, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.b, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z2.b, p2/m, #0 // =0x0 ; CHECK-NEXT: mov z3.b, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: mov z2.d, z7.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv64i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -222,11 +226,12 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z1.h -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: asr z1.h, z0.h, #15 -; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, z1.h -; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: asr z1.h, z2.h, #15 +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -245,14 +250,15 @@ ; CHECK-NEXT: smulh z4.h, p0/m, z4.h, z3.h ; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h ; CHECK-NEXT: movprfx z3, z0 -; CHECK-NEXT: smulh z3.h, p0/m, z3.h, z2.h -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: asr z2.h, z1.h, #15 -; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, z2.h -; CHECK-NEXT: asr z2.h, z0.h, #15 -; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, z2.h -; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: asr z5.h, z1.h, #15 +; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: asr z2.h, z3.h, #15 +; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, z5.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z2.h ; CHECK-NEXT: mov z1.h, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z3.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z3.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv16i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -271,26 +277,28 @@ ; CHECK-NEXT: smulh z24.h, p0/m, z24.h, z7.h ; CHECK-NEXT: mul z3.h, p0/m, z3.h, z7.h ; CHECK-NEXT: movprfx z7, z2 -; CHECK-NEXT: smulh z7.h, p0/m, z7.h, z6.h -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z6.h +; CHECK-NEXT: mul z7.h, p0/m, z7.h, z6.h +; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z6.h +; CHECK-NEXT: asr z6.h, z7.h, #15 +; CHECK-NEXT: cmpne p2.h, p0/z, z2.h, z6.h ; CHECK-NEXT: movprfx z6, z1 ; CHECK-NEXT: smulh z6.h, p0/m, z6.h, z5.h ; CHECK-NEXT: mul z1.h, p0/m, z1.h, z5.h -; CHECK-NEXT: movprfx z5, z0 -; CHECK-NEXT: smulh z5.h, p0/m, z5.h, z4.h -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z4.h -; CHECK-NEXT: asr z4.h, z3.h, #15 -; CHECK-NEXT: cmpne p1.h, p0/z, z24.h, z4.h +; CHECK-NEXT: asr z25.h, z3.h, #15 +; CHECK-NEXT: asr z5.h, z1.h, #15 +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h +; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z4.h ; CHECK-NEXT: asr z4.h, z2.h, #15 -; CHECK-NEXT: asr z24.h, z1.h, #15 -; CHECK-NEXT: cmpne p2.h, p0/z, z7.h, z4.h -; CHECK-NEXT: asr z4.h, z0.h, #15 -; CHECK-NEXT: cmpne p3.h, p0/z, z6.h, z24.h -; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, z4.h -; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: cmpne p1.h, p0/z, z24.h, z25.h +; CHECK-NEXT: cmpne p3.h, p0/z, z6.h, z5.h +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z4.h +; CHECK-NEXT: mov z7.h, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.h, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z2.h, p2/m, #0 // =0x0 ; CHECK-NEXT: mov z3.h, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: mov z2.d, z7.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv32i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -332,11 +340,12 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s -; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: asr z1.s, z0.s, #31 -; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, z1.s -; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: asr z1.s, z2.s, #31 +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -355,14 +364,15 @@ ; CHECK-NEXT: smulh z4.s, p0/m, z4.s, z3.s ; CHECK-NEXT: mul z1.s, p0/m, z1.s, z3.s ; CHECK-NEXT: movprfx z3, z0 -; CHECK-NEXT: smulh z3.s, p0/m, z3.s, z2.s -; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: asr z2.s, z1.s, #31 -; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, z2.s -; CHECK-NEXT: asr z2.s, z0.s, #31 -; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, z2.s -; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z3.s, p0/m, z3.s, z2.s +; CHECK-NEXT: asr z5.s, z1.s, #31 +; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: asr z2.s, z3.s, #31 +; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, z5.s +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z2.s ; CHECK-NEXT: mov z1.s, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z3.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z3.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -381,26 +391,28 @@ ; CHECK-NEXT: smulh z24.s, p0/m, z24.s, z7.s ; CHECK-NEXT: mul z3.s, p0/m, z3.s, z7.s ; CHECK-NEXT: movprfx z7, z2 -; CHECK-NEXT: smulh z7.s, p0/m, z7.s, z6.s -; CHECK-NEXT: mul z2.s, p0/m, z2.s, z6.s +; CHECK-NEXT: mul z7.s, p0/m, z7.s, z6.s +; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z6.s +; CHECK-NEXT: asr z6.s, z7.s, #31 +; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, z6.s ; CHECK-NEXT: movprfx z6, z1 ; CHECK-NEXT: smulh z6.s, p0/m, z6.s, z5.s ; CHECK-NEXT: mul z1.s, p0/m, z1.s, z5.s -; CHECK-NEXT: movprfx z5, z0 -; CHECK-NEXT: smulh z5.s, p0/m, z5.s, z4.s -; CHECK-NEXT: mul z0.s, p0/m, z0.s, z4.s -; CHECK-NEXT: asr z4.s, z3.s, #31 -; CHECK-NEXT: cmpne p1.s, p0/z, z24.s, z4.s +; CHECK-NEXT: asr z25.s, z3.s, #31 +; CHECK-NEXT: asr z5.s, z1.s, #31 +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z4.s +; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z4.s ; CHECK-NEXT: asr z4.s, z2.s, #31 -; CHECK-NEXT: asr z24.s, z1.s, #31 -; CHECK-NEXT: cmpne p2.s, p0/z, z7.s, z4.s -; CHECK-NEXT: asr z4.s, z0.s, #31 -; CHECK-NEXT: cmpne p3.s, p0/z, z6.s, z24.s -; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, z4.s -; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: cmpne p1.s, p0/z, z24.s, z25.s +; CHECK-NEXT: cmpne p3.s, p0/z, z6.s, z5.s +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z4.s +; CHECK-NEXT: mov z7.s, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.s, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 ; CHECK-NEXT: mov z3.s, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: mov z2.d, z7.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv16i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -416,11 +428,12 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: asr z1.d, z0.d, #63 -; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, z1.d -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: asr z1.d, z2.d, #63 +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -439,14 +452,15 @@ ; CHECK-NEXT: smulh z4.d, p0/m, z4.d, z3.d ; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d ; CHECK-NEXT: movprfx z3, z0 -; CHECK-NEXT: smulh z3.d, p0/m, z3.d, z2.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: asr z2.d, z1.d, #63 -; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, z2.d -; CHECK-NEXT: asr z2.d, z0.d, #63 -; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, z2.d -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mul z3.d, p0/m, z3.d, z2.d +; CHECK-NEXT: asr z5.d, z1.d, #63 +; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: asr z2.d, z3.d, #63 +; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, z5.d +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z2.d ; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z3.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -465,26 +479,28 @@ ; CHECK-NEXT: smulh z24.d, p0/m, z24.d, z7.d ; CHECK-NEXT: mul z3.d, p0/m, z3.d, z7.d ; CHECK-NEXT: movprfx z7, z2 -; CHECK-NEXT: smulh z7.d, p0/m, z7.d, z6.d -; CHECK-NEXT: mul z2.d, p0/m, z2.d, z6.d +; CHECK-NEXT: mul z7.d, p0/m, z7.d, z6.d +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z6.d +; CHECK-NEXT: asr z6.d, z7.d, #63 +; CHECK-NEXT: cmpne p2.d, p0/z, z2.d, z6.d ; CHECK-NEXT: movprfx z6, z1 ; CHECK-NEXT: smulh z6.d, p0/m, z6.d, z5.d ; CHECK-NEXT: mul z1.d, p0/m, z1.d, z5.d -; CHECK-NEXT: movprfx z5, z0 -; CHECK-NEXT: smulh z5.d, p0/m, z5.d, z4.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z4.d -; CHECK-NEXT: asr z4.d, z3.d, #63 -; CHECK-NEXT: cmpne p1.d, p0/z, z24.d, z4.d +; CHECK-NEXT: asr z25.d, z3.d, #63 +; CHECK-NEXT: asr z5.d, z1.d, #63 +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z4.d +; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z4.d ; CHECK-NEXT: asr z4.d, z2.d, #63 -; CHECK-NEXT: asr z24.d, z1.d, #63 -; CHECK-NEXT: cmpne p2.d, p0/z, z7.d, z4.d -; CHECK-NEXT: asr z4.d, z0.d, #63 -; CHECK-NEXT: cmpne p3.d, p0/z, z6.d, z24.d -; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, z4.d -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: cmpne p1.d, p0/z, z24.d, z25.d +; CHECK-NEXT: cmpne p3.d, p0/z, z6.d, z5.d +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z4.d +; CHECK-NEXT: mov z7.d, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.d, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0 ; CHECK-NEXT: mov z3.d, p1/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: mov z2.d, z7.d ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i64( %x, %y) %b = extractvalue { , } %a, 0 diff --git a/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll --- a/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll +++ b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll @@ -10,13 +10,14 @@ ; CHECK-NEXT: and z1.d, z1.d, #0xff ; CHECK-NEXT: and z0.d, z0.d, #0xff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: lsr z1.d, z0.d, #8 -; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z2.d, #8 +; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0 ; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -34,13 +35,14 @@ ; CHECK-NEXT: and z1.s, z1.s, #0xff ; CHECK-NEXT: and z0.s, z0.s, #0xff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s -; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: lsr z1.s, z0.s, #8 -; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: lsr z1.s, z2.s, #8 +; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 ; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -58,13 +60,14 @@ ; CHECK-NEXT: and z1.h, z1.h, #0xff ; CHECK-NEXT: and z0.h, z0.h, #0xff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.h, p0/m, z2.h, z1.h -; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: lsr z1.h, z0.h, #8 -; CHECK-NEXT: cmpne p1.h, p0/z, z2.h, #0 +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: lsr z1.h, z2.h, #8 +; CHECK-NEXT: cmpne p1.h, p0/z, z0.h, #0 ; CHECK-NEXT: cmpne p2.h, p0/z, z1.h, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -80,11 +83,10 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: mul z2.b, p0/m, z2.b, z1.b -; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z1.b -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: umulh z2.b, p0/m, z2.b, z1.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, #0 +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv16i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -101,15 +103,14 @@ ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: movprfx z4, z1 ; CHECK-NEXT: mul z4.b, p0/m, z4.b, z3.b -; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z1.b -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.b, p0/m, z1.b, z2.b -; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z2.b -; CHECK-NEXT: cmpne p1.b, p0/z, z3.b, #0 -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z3.b +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z2.b +; CHECK-NEXT: cmpne p1.b, p0/z, z1.b, #0 +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b +; CHECK-NEXT: cmpne p0.b, p0/z, z3.b, #0 ; CHECK-NEXT: mov z4.b, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.d, z4.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv32i8( %x, %y) @@ -127,27 +128,25 @@ ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: movprfx z24, z3 ; CHECK-NEXT: mul z24.b, p0/m, z24.b, z7.b -; CHECK-NEXT: umulh z7.b, p0/m, z7.b, z3.b +; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z7.b +; CHECK-NEXT: cmpne p1.b, p0/z, z3.b, #0 ; CHECK-NEXT: movprfx z3, z2 -; CHECK-NEXT: mul z3.b, p0/m, z3.b, z6.b -; CHECK-NEXT: umulh z6.b, p0/m, z6.b, z2.b -; CHECK-NEXT: movprfx z2, z1 -; CHECK-NEXT: mul z2.b, p0/m, z2.b, z5.b -; CHECK-NEXT: umulh z5.b, p0/m, z5.b, z1.b +; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z6.b +; CHECK-NEXT: cmpne p2.b, p0/z, z3.b, #0 +; CHECK-NEXT: movprfx z3, z1 +; CHECK-NEXT: mul z3.b, p0/m, z3.b, z5.b +; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z5.b +; CHECK-NEXT: mul z2.b, p0/m, z2.b, z6.b +; CHECK-NEXT: cmpne p3.b, p0/z, z1.b, #0 ; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.b, p0/m, z1.b, z4.b -; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z4.b -; CHECK-NEXT: cmpne p1.b, p0/z, z7.b, #0 -; CHECK-NEXT: cmpne p2.b, p0/z, z6.b, #0 -; CHECK-NEXT: cmpne p3.b, p0/z, z5.b, #0 -; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0 -; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z2.b, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z3.b, p2/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z4.b +; CHECK-NEXT: mul z0.b, p0/m, z0.b, z4.b +; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0 +; CHECK-NEXT: mov z3.b, p3/m, #0 // =0x0 ; CHECK-NEXT: mov z24.b, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d -; CHECK-NEXT: mov z1.d, z2.d -; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.b, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, z3.d ; CHECK-NEXT: mov z3.d, z24.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv64i8( %x, %y) @@ -166,13 +165,14 @@ ; CHECK-NEXT: and z1.d, z1.d, #0xffff ; CHECK-NEXT: and z0.d, z0.d, #0xffff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: lsr z1.d, z0.d, #16 -; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z2.d, #16 +; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0 ; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -190,13 +190,14 @@ ; CHECK-NEXT: and z1.s, z1.s, #0xffff ; CHECK-NEXT: and z0.s, z0.s, #0xffff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s -; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: lsr z1.s, z0.s, #16 -; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0 +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: lsr z1.s, z2.s, #16 +; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 ; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -212,11 +213,10 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h -; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: umulh z2.h, p0/m, z2.h, z1.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, #0 +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -233,15 +233,14 @@ ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: movprfx z4, z1 ; CHECK-NEXT: mul z4.h, p0/m, z4.h, z3.h -; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z1.h -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.h, p0/m, z1.h, z2.h -; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z2.h -; CHECK-NEXT: cmpne p1.h, p0/z, z3.h, #0 -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z3.h +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z2.h +; CHECK-NEXT: cmpne p1.h, p0/z, z1.h, #0 +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h +; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, #0 ; CHECK-NEXT: mov z4.h, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.d, z4.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv16i16( %x, %y) @@ -259,27 +258,25 @@ ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: movprfx z24, z3 ; CHECK-NEXT: mul z24.h, p0/m, z24.h, z7.h -; CHECK-NEXT: umulh z7.h, p0/m, z7.h, z3.h +; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z7.h +; CHECK-NEXT: cmpne p1.h, p0/z, z3.h, #0 ; CHECK-NEXT: movprfx z3, z2 -; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h -; CHECK-NEXT: umulh z6.h, p0/m, z6.h, z2.h -; CHECK-NEXT: movprfx z2, z1 -; CHECK-NEXT: mul z2.h, p0/m, z2.h, z5.h -; CHECK-NEXT: umulh z5.h, p0/m, z5.h, z1.h +; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z6.h +; CHECK-NEXT: cmpne p2.h, p0/z, z3.h, #0 +; CHECK-NEXT: movprfx z3, z1 +; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h +; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z5.h +; CHECK-NEXT: mul z2.h, p0/m, z2.h, z6.h +; CHECK-NEXT: cmpne p3.h, p0/z, z1.h, #0 ; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.h, p0/m, z1.h, z4.h -; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z4.h -; CHECK-NEXT: cmpne p1.h, p0/z, z7.h, #0 -; CHECK-NEXT: cmpne p2.h, p0/z, z6.h, #0 -; CHECK-NEXT: cmpne p3.h, p0/z, z5.h, #0 -; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 -; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z2.h, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z3.h, p2/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z4.h +; CHECK-NEXT: mul z0.h, p0/m, z0.h, z4.h +; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0 +; CHECK-NEXT: mov z3.h, p3/m, #0 // =0x0 ; CHECK-NEXT: mov z24.h, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d -; CHECK-NEXT: mov z1.d, z2.d -; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.h, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, z3.d ; CHECK-NEXT: mov z3.d, z24.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv32i16( %x, %y) @@ -298,13 +295,14 @@ ; CHECK-NEXT: and z1.d, z1.d, #0xffffffff ; CHECK-NEXT: and z0.d, z0.d, #0xffffffff ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d -; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: lsr z1.d, z0.d, #32 -; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0 +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: lsr z1.d, z2.d, #32 +; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0 ; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0 ; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b -; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z0.d, z2.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -320,11 +318,10 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s -; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0 +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -341,15 +338,14 @@ ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: movprfx z4, z1 ; CHECK-NEXT: mul z4.s, p0/m, z4.s, z3.s -; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z1.s -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.s, p0/m, z1.s, z2.s -; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z2.s -; CHECK-NEXT: cmpne p1.s, p0/z, z3.s, #0 -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z3.s +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z2.s +; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0 +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s +; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, #0 ; CHECK-NEXT: mov z4.s, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.d, z4.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i32( %x, %y) @@ -367,27 +363,25 @@ ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: movprfx z24, z3 ; CHECK-NEXT: mul z24.s, p0/m, z24.s, z7.s -; CHECK-NEXT: umulh z7.s, p0/m, z7.s, z3.s +; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z7.s +; CHECK-NEXT: cmpne p1.s, p0/z, z3.s, #0 ; CHECK-NEXT: movprfx z3, z2 -; CHECK-NEXT: mul z3.s, p0/m, z3.s, z6.s -; CHECK-NEXT: umulh z6.s, p0/m, z6.s, z2.s -; CHECK-NEXT: movprfx z2, z1 -; CHECK-NEXT: mul z2.s, p0/m, z2.s, z5.s -; CHECK-NEXT: umulh z5.s, p0/m, z5.s, z1.s +; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z6.s +; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, #0 +; CHECK-NEXT: movprfx z3, z1 +; CHECK-NEXT: mul z3.s, p0/m, z3.s, z5.s +; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z5.s +; CHECK-NEXT: mul z2.s, p0/m, z2.s, z6.s +; CHECK-NEXT: cmpne p3.s, p0/z, z1.s, #0 ; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.s, p0/m, z1.s, z4.s -; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z4.s -; CHECK-NEXT: cmpne p1.s, p0/z, z7.s, #0 -; CHECK-NEXT: cmpne p2.s, p0/z, z6.s, #0 -; CHECK-NEXT: cmpne p3.s, p0/z, z5.s, #0 -; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 -; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z2.s, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z3.s, p2/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z4.s +; CHECK-NEXT: mul z0.s, p0/m, z0.s, z4.s +; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0 +; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0 ; CHECK-NEXT: mov z24.s, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d -; CHECK-NEXT: mov z1.d, z2.d -; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, z3.d ; CHECK-NEXT: mov z3.d, z24.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv16i32( %x, %y) @@ -404,11 +398,10 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: movprfx z2, z0 -; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d -; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z2.d +; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, #0 +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -425,15 +418,14 @@ ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: movprfx z4, z1 ; CHECK-NEXT: mul z4.d, p0/m, z4.d, z3.d -; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z1.d -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d -; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z2.d -; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0 -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z3.d +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z2.d +; CHECK-NEXT: cmpne p1.d, p0/z, z1.d, #0 +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d +; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, #0 ; CHECK-NEXT: mov z4.d, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 ; CHECK-NEXT: mov z1.d, z4.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i64( %x, %y) @@ -451,27 +443,25 @@ ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: movprfx z24, z3 ; CHECK-NEXT: mul z24.d, p0/m, z24.d, z7.d -; CHECK-NEXT: umulh z7.d, p0/m, z7.d, z3.d +; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z7.d +; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0 ; CHECK-NEXT: movprfx z3, z2 -; CHECK-NEXT: mul z3.d, p0/m, z3.d, z6.d -; CHECK-NEXT: umulh z6.d, p0/m, z6.d, z2.d -; CHECK-NEXT: movprfx z2, z1 -; CHECK-NEXT: mul z2.d, p0/m, z2.d, z5.d -; CHECK-NEXT: umulh z5.d, p0/m, z5.d, z1.d +; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z6.d +; CHECK-NEXT: cmpne p2.d, p0/z, z3.d, #0 +; CHECK-NEXT: movprfx z3, z1 +; CHECK-NEXT: mul z3.d, p0/m, z3.d, z5.d +; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z5.d +; CHECK-NEXT: mul z2.d, p0/m, z2.d, z6.d +; CHECK-NEXT: cmpne p3.d, p0/z, z1.d, #0 ; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: mul z1.d, p0/m, z1.d, z4.d -; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z4.d -; CHECK-NEXT: cmpne p1.d, p0/z, z7.d, #0 -; CHECK-NEXT: cmpne p2.d, p0/z, z6.d, #0 -; CHECK-NEXT: cmpne p3.d, p0/z, z5.d, #0 -; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 -; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0 -; CHECK-NEXT: mov z2.d, p3/m, #0 // =0x0 -; CHECK-NEXT: mov z3.d, p2/m, #0 // =0x0 +; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z4.d +; CHECK-NEXT: mul z0.d, p0/m, z0.d, z4.d +; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0 +; CHECK-NEXT: mov z3.d, p3/m, #0 // =0x0 ; CHECK-NEXT: mov z24.d, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z0.d, z1.d -; CHECK-NEXT: mov z1.d, z2.d -; CHECK-NEXT: mov z2.d, z3.d +; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 +; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z1.d, z3.d ; CHECK-NEXT: mov z3.d, z24.d ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i64( %x, %y) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-build-vector-trunc-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-build-vector-trunc-undef.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-build-vector-trunc-undef.mir @@ -0,0 +1,77 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s +# RUN: llc -march=amdgcn -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s + +--- +name: test_v2s16_s32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; CHECK-LABEL: name: test_v2s16_s32 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s32) + ; CHECK-NEXT: $vgpr0 = COPY [[BITCAST]](<2 x s16>) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_IMPLICIT_DEF + %2:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC %0, %1 + $vgpr0 = COPY %2 +... + +--- +name: test_v2s16_s32_swap +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; CHECK-LABEL: name: test_v2s16_s32_swap + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[DEF]](s32), [[COPY]](s32) + ; CHECK-NEXT: $vgpr0 = COPY [[BUILD_VECTOR_TRUNC]](<2 x s16>) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_IMPLICIT_DEF + %2:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC %1, %0 + $vgpr0 = COPY %2 +... + +--- +name: test_v4s16_s32 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0 + ; CHECK-LABEL: name: test_v4s16_s32 + ; CHECK: liveins: $vgpr0 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR_TRUNC]](<4 x s16>) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = G_IMPLICIT_DEF + %2:_(<4 x s16>) = G_BUILD_VECTOR_TRUNC %0, %1, %1, %1 + $vgpr0_vgpr1 = COPY %2 +... + +--- +name: test_v4s16_s64 +tracksRegLiveness: true +body: | + bb.0: + liveins: $vgpr0_vgpr1 + ; CHECK-LABEL: name: test_v4s16_s64 + ; CHECK: liveins: $vgpr0_vgpr1 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<4 x s16>) = G_BITCAST [[COPY]](s64) + ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BITCAST]](<4 x s16>) + %0:_(s64) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_IMPLICIT_DEF + %2:_(<4 x s16>) = G_BUILD_VECTOR_TRUNC %0, %1, %1, %1 + $vgpr0_vgpr1 = COPY %2 +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.a16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.a16.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.a16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.atomic.dim.a16.ll @@ -486,39 +486,34 @@ define amdgpu_ps float @atomic_add_i32_3d(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %r) { ; GFX9-LABEL: atomic_add_i32_3d: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s8 -; GFX9-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 unorm glc a16 +; GFX9-NEXT: v_and_or_b32 v2, v1, v4, v2 +; GFX9-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 unorm glc a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i32_3d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10-NEXT: v_and_or_b32 v2, v3, v4, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v1, v2 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm glc a16 +; GFX10-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_3D unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -530,39 +525,34 @@ define amdgpu_ps float @atomic_add_i32_cube(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %face) { ; GFX9-LABEL: atomic_add_i32_cube: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s8 -; GFX9-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 unorm glc a16 da +; GFX9-NEXT: v_and_or_b32 v2, v1, v4, v2 +; GFX9-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 unorm glc a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i32_cube: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10-NEXT: v_and_or_b32 v2, v3, v4, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v1, v2 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm glc a16 +; GFX10-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_CUBE unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -613,39 +603,34 @@ define amdgpu_ps float @atomic_add_i32_2darray(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %slice) { ; GFX9-LABEL: atomic_add_i32_2darray: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s8 -; GFX9-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 unorm glc a16 da +; GFX9-NEXT: v_and_or_b32 v2, v1, v4, v2 +; GFX9-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 unorm glc a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i32_2darray: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10-NEXT: v_and_or_b32 v2, v3, v4, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v1, v2 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm glc a16 +; GFX10-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -657,39 +642,34 @@ define amdgpu_ps float @atomic_add_i32_2dmsaa(<8 x i32> inreg %rsrc, i32 %data, i16 %s, i16 %t, i16 %fragid) { ; GFX9-LABEL: atomic_add_i32_2dmsaa: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s8 -; GFX9-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 unorm glc a16 +; GFX9-NEXT: v_and_or_b32 v2, v1, v4, v2 +; GFX9-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 unorm glc a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i32_2dmsaa: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10-NEXT: v_and_or_b32 v2, v3, v4, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v1, v2 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v0, v[1:2], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm glc a16 +; GFX10-NEXT: image_atomic_add v0, v[2:3], s[0:7] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -1260,39 +1240,34 @@ define amdgpu_ps <2 x float> @atomic_add_i64_3d(<8 x i32> inreg %rsrc, i64 %data, i16 %s, i16 %t, i16 %r) { ; GFX9-LABEL: atomic_add_i64_3d: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX9-NEXT: v_and_or_b32 v3, v4, v5, s8 -; GFX9-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 unorm glc a16 +; GFX9-NEXT: v_and_or_b32 v3, v2, v5, v3 +; GFX9-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 unorm glc a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i64_3d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX10-NEXT: v_and_or_b32 v3, v4, v5, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v3, 0xffff, v2, v3 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_3D unorm glc a16 +; GFX10-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_3D unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -1304,39 +1279,34 @@ define amdgpu_ps <2 x float> @atomic_add_i64_cube(<8 x i32> inreg %rsrc, i64 %data, i16 %s, i16 %t, i16 %face) { ; GFX9-LABEL: atomic_add_i64_cube: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX9-NEXT: v_and_or_b32 v3, v4, v5, s8 -; GFX9-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 unorm glc a16 da +; GFX9-NEXT: v_and_or_b32 v3, v2, v5, v3 +; GFX9-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 unorm glc a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i64_cube: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX10-NEXT: v_and_or_b32 v3, v4, v5, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v3, 0xffff, v2, v3 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_CUBE unorm glc a16 +; GFX10-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_CUBE unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -1387,39 +1357,34 @@ define amdgpu_ps <2 x float> @atomic_add_i64_2darray(<8 x i32> inreg %rsrc, i64 %data, i16 %s, i16 %t, i16 %slice) { ; GFX9-LABEL: atomic_add_i64_2darray: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX9-NEXT: v_and_or_b32 v3, v4, v5, s8 -; GFX9-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 unorm glc a16 da +; GFX9-NEXT: v_and_or_b32 v3, v2, v5, v3 +; GFX9-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 unorm glc a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i64_2darray: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX10-NEXT: v_and_or_b32 v3, v4, v5, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v3, 0xffff, v2, v3 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D_ARRAY unorm glc a16 +; GFX10-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D_ARRAY unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: @@ -1431,39 +1396,34 @@ define amdgpu_ps <2 x float> @atomic_add_i64_2dmsaa(<8 x i32> inreg %rsrc, i64 %data, i16 %s, i16 %t, i16 %fragid) { ; GFX9-LABEL: atomic_add_i64_2dmsaa: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX9-NEXT: v_and_or_b32 v3, v4, v5, s8 -; GFX9-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 unorm glc a16 +; GFX9-NEXT: v_and_or_b32 v3, v2, v5, v3 +; GFX9-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 unorm glc a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: atomic_add_i64_2dmsaa: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX10-NEXT: v_and_or_b32 v3, v4, v5, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v3, 0xffff, v2, v3 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_atomic_add v[0:1], v[2:3], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D_MSAA unorm glc a16 +; GFX10-NEXT: image_atomic_add v[0:1], v[3:4], s[0:7] dmask:0x3 dim:SQ_RSRC_IMG_2D_MSAA unorm glc a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog main_body: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.gather4.a16.dim.ll @@ -60,24 +60,22 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX9-NEXT: v_and_or_b32 v1, v0, v3, v1 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] -; GFX9-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 a16 da +; GFX9-NEXT: image_gather4 v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; @@ -86,24 +84,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10NSA-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v0, v1 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16 +; GFX10NSA-NEXT: image_gather4 v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_CUBE a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -117,24 +112,22 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX9-NEXT: v_and_or_b32 v1, v0, v3, v1 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] -; GFX9-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 a16 da +; GFX9-NEXT: image_gather4 v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 a16 da ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; @@ -143,24 +136,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10NSA-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v0, v1 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4 v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16 +; GFX10NSA-NEXT: image_gather4 v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -226,24 +216,22 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX9-NEXT: v_and_or_b32 v1, v0, v3, v1 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] -; GFX9-NEXT: image_gather4_cl v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 a16 +; GFX9-NEXT: image_gather4_cl v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; @@ -252,24 +240,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10NSA-NEXT: v_and_or_b32 v1, v2, v3, s12 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v0, v1 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4_cl v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_cl v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -283,24 +268,24 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_mov_b32_e32 v4, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s12 +; GFX9-NEXT: v_and_or_b32 v2, v4, v0, v2 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] -; GFX9-NEXT: image_gather4_c_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 a16 +; GFX9-NEXT: image_gather4_c_cl v[0:3], v[1:3], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; @@ -309,24 +294,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10NSA-NEXT: v_and_or_b32 v2, v3, v4, s12 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4_c_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_c_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -444,24 +426,24 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_mov_b32_e32 v4, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s12 +; GFX9-NEXT: v_and_or_b32 v2, v4, v0, v2 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] -; GFX9-NEXT: image_gather4_b_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 a16 +; GFX9-NEXT: image_gather4_b_cl v[0:3], v[1:3], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; @@ -470,24 +452,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10NSA-NEXT: v_and_or_b32 v2, v3, v4, s12 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4_b_cl v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_b_cl v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -501,22 +480,22 @@ ; GFX9-NEXT: s_mov_b64 s[14:15], exec ; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_wqm_b64 exec, exec -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: v_mov_b32_e32 v3, v4 +; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v5 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX9-NEXT: v_and_or_b32 v3, v4, v5, s12 +; GFX9-NEXT: v_and_or_b32 v2, v2, v4, v5 ; GFX9-NEXT: s_and_b64 exec, exec, s[14:15] ; GFX9-NEXT: image_gather4_c_b_cl v[0:3], v[0:3], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) @@ -527,24 +506,21 @@ ; GFX10NSA-NEXT: s_mov_b32 s14, exec_lo ; GFX10NSA-NEXT: s_mov_b32 s0, s2 ; GFX10NSA-NEXT: s_wqm_b32 exec_lo, exec_lo -; GFX10NSA-NEXT: v_mov_b32_e32 v5, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v3, 16, v3 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: v_and_or_b32 v2, v2, v5, v3 -; GFX10NSA-NEXT: v_and_or_b32 v3, v4, v5, s12 +; GFX10NSA-NEXT: v_and_or_b32 v2, 0xffff, v2, v3 ; GFX10NSA-NEXT: s_and_b32 exec_lo, exec_lo, s14 -; GFX10NSA-NEXT: image_gather4_c_b_cl v[0:3], v[0:3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_c_b_cl v[0:3], [v0, v1, v2, v4], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -555,47 +531,42 @@ define amdgpu_ps <4 x float> @gather4_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %s, half %t, half %lod) { ; GFX9-LABEL: gather4_l_2d: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v1, v2, v3, s12 -; GFX9-NEXT: image_gather4_l v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 a16 +; GFX9-NEXT: v_and_or_b32 v1, v0, v3, v1 +; GFX9-NEXT: image_gather4_l v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10NSA-LABEL: gather4_l_2d: ; GFX10NSA: ; %bb.0: ; %main_body -; GFX10NSA-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10NSA-NEXT: s_mov_b32 s0, s2 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10NSA-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10NSA-NEXT: v_and_or_b32 v1, v2, v3, s12 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v0, v1 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: image_gather4_l v[0:3], v[0:1], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_l v[0:3], v[1:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: @@ -606,47 +577,44 @@ define amdgpu_ps <4 x float> @gather4_c_l_2d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %s, half %t, half %lod) { ; GFX9-LABEL: gather4_c_l_2d: ; GFX9: ; %bb.0: ; %main_body -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v4, 0xffff +; GFX9-NEXT: v_mov_b32_e32 v4, v1 +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2 -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX9-NEXT: v_and_or_b32 v2, v3, v4, s12 -; GFX9-NEXT: image_gather4_c_l v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 a16 +; GFX9-NEXT: v_and_or_b32 v2, v4, v0, v2 +; GFX9-NEXT: image_gather4_c_l v[0:3], v[1:3], s[0:7], s[8:11] dmask:0x1 a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10NSA-LABEL: gather4_c_l_2d: ; GFX10NSA: ; %bb.0: ; %main_body -; GFX10NSA-NEXT: v_mov_b32_e32 v4, 0xffff ; GFX10NSA-NEXT: v_lshlrev_b32_e32 v2, 16, v2 ; GFX10NSA-NEXT: s_mov_b32 s0, s2 -; GFX10NSA-NEXT: s_mov_b32 s2, s4 -; GFX10NSA-NEXT: s_mov_b32 s4, s6 -; GFX10NSA-NEXT: s_mov_b32 s6, s8 -; GFX10NSA-NEXT: s_mov_b32 s8, s10 -; GFX10NSA-NEXT: s_mov_b32 s10, s12 -; GFX10NSA-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10NSA-NEXT: v_and_or_b32 v1, v1, v4, v2 -; GFX10NSA-NEXT: v_and_or_b32 v2, v3, v4, s12 ; GFX10NSA-NEXT: s_mov_b32 s1, s3 +; GFX10NSA-NEXT: s_mov_b32 s2, s4 ; GFX10NSA-NEXT: s_mov_b32 s3, s5 +; GFX10NSA-NEXT: v_and_or_b32 v1, 0xffff, v1, v2 +; GFX10NSA-NEXT: s_mov_b32 s4, s6 ; GFX10NSA-NEXT: s_mov_b32 s5, s7 +; GFX10NSA-NEXT: s_mov_b32 s6, s8 ; GFX10NSA-NEXT: s_mov_b32 s7, s9 +; GFX10NSA-NEXT: s_mov_b32 s8, s10 ; GFX10NSA-NEXT: s_mov_b32 s9, s11 +; GFX10NSA-NEXT: s_mov_b32 s10, s12 ; GFX10NSA-NEXT: s_mov_b32 s11, s13 -; GFX10NSA-NEXT: image_gather4_c_l v[0:3], v[0:2], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 +; GFX10NSA-NEXT: image_gather4_c_l v[0:3], [v0, v1, v3], s[0:7], s[8:11] dmask:0x1 dim:SQ_RSRC_IMG_2D a16 ; GFX10NSA-NEXT: s_waitcnt vmcnt(0) ; GFX10NSA-NEXT: ; return to shader part epilog main_body: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.load.3d.a16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.load.3d.a16.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.load.3d.a16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.load.3d.a16.ll @@ -5,39 +5,34 @@ define amdgpu_ps <4 x float> @load_3d_v4f32_xyzw(<8 x i32> inreg %rsrc, i16 %s, i16 %t, i16 %r) { ; GFX9-LABEL: load_3d_v4f32_xyzw: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v1, v2, v3, s8 -; GFX9-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf unorm a16 +; GFX9-NEXT: v_and_or_b32 v1, v0, v3, v1 +; GFX9-NEXT: image_load v[0:3], v[1:2], s[0:7] dmask:0xf unorm a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: load_3d_v4f32_xyzw: ; GFX10: ; %bb.0: -; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_and_or_b32 v0, v0, v3, v1 -; GFX10-NEXT: v_and_or_b32 v1, v2, v3, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v0, v1 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: image_load v[0:3], v[0:1], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 +; GFX10-NEXT: image_load v[0:3], v[1:2], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog %v = call <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i16(i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 0, i32 0) @@ -47,63 +42,60 @@ define amdgpu_ps <4 x float> @load_3d_v4f32_xyzw_tfe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i16 %s, i16 %t, i16 %r) { ; GFX9-LABEL: load_3d_v4f32_xyzw_tfe: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff +; GFX9-NEXT: v_mov_b32_e32 v6, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 -; GFX9-NEXT: v_mov_b32_e32 v5, 0 -; GFX9-NEXT: v_and_or_b32 v10, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v11, v2, v3, s8 -; GFX9-NEXT: v_mov_b32_e32 v6, v5 -; GFX9-NEXT: v_mov_b32_e32 v7, v5 -; GFX9-NEXT: v_mov_b32_e32 v8, v5 -; GFX9-NEXT: v_mov_b32_e32 v9, v5 -; GFX9-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-NEXT: v_mov_b32_e32 v7, 0 +; GFX9-NEXT: v_and_or_b32 v5, v0, v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v8, v7 +; GFX9-NEXT: v_mov_b32_e32 v9, v7 +; GFX9-NEXT: v_mov_b32_e32 v10, v7 +; GFX9-NEXT: v_mov_b32_e32 v11, v7 +; GFX9-NEXT: v_mov_b32_e32 v0, v7 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_mov_b32_e32 v1, v6 -; GFX9-NEXT: v_mov_b32_e32 v2, v7 -; GFX9-NEXT: v_mov_b32_e32 v3, v8 -; GFX9-NEXT: v_mov_b32_e32 v4, v9 -; GFX9-NEXT: image_load v[0:4], v[10:11], s[0:7] dmask:0xf unorm a16 tfe +; GFX9-NEXT: v_mov_b32_e32 v1, v8 +; GFX9-NEXT: v_mov_b32_e32 v2, v9 +; GFX9-NEXT: v_mov_b32_e32 v3, v10 +; GFX9-NEXT: v_mov_b32_e32 v4, v11 +; GFX9-NEXT: image_load v[0:4], v[5:6], s[0:7] dmask:0xf unorm a16 tfe ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: global_store_dword v5, v4, s[10:11] +; GFX9-NEXT: global_store_dword v7, v4, s[10:11] ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: load_3d_v4f32_xyzw_tfe: ; GFX10: ; %bb.0: -; GFX10-NEXT: v_mov_b32_e32 v5, 0 -; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff +; GFX10-NEXT: v_mov_b32_e32 v7, 0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_mov_b32_e32 v6, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_mov_b32_e32 v6, v5 -; GFX10-NEXT: v_mov_b32_e32 v7, v5 -; GFX10-NEXT: v_mov_b32_e32 v8, v5 -; GFX10-NEXT: v_mov_b32_e32 v9, v5 -; GFX10-NEXT: v_and_or_b32 v10, v0, v3, v1 -; GFX10-NEXT: v_and_or_b32 v11, v2, v3, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, v7 +; GFX10-NEXT: v_mov_b32_e32 v9, v7 +; GFX10-NEXT: v_mov_b32_e32 v10, v7 +; GFX10-NEXT: v_mov_b32_e32 v11, v7 +; GFX10-NEXT: v_and_or_b32 v5, 0xffff, v0, v1 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: v_mov_b32_e32 v0, v5 -; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: v_mov_b32_e32 v2, v7 -; GFX10-NEXT: v_mov_b32_e32 v3, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v9 -; GFX10-NEXT: image_load v[0:4], v[10:11], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 tfe +; GFX10-NEXT: v_mov_b32_e32 v0, v7 +; GFX10-NEXT: v_mov_b32_e32 v1, v8 +; GFX10-NEXT: v_mov_b32_e32 v2, v9 +; GFX10-NEXT: v_mov_b32_e32 v3, v10 +; GFX10-NEXT: v_mov_b32_e32 v4, v11 +; GFX10-NEXT: image_load v[0:4], v[5:6], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 tfe ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: global_store_dword v5, v4, s[10:11] +; GFX10-NEXT: global_store_dword v7, v4, s[10:11] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: ; return to shader part epilog %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.3d.sl_v4f32i32s.i16(i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 1, i32 0) @@ -116,63 +108,60 @@ define amdgpu_ps <4 x float> @load_3d_v4f32_xyzw_tfe_lwe(<8 x i32> inreg %rsrc, i32 addrspace(1)* inreg %out, i16 %s, i16 %t, i16 %r) { ; GFX9-LABEL: load_3d_v4f32_xyzw_tfe_lwe: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff +; GFX9-NEXT: v_mov_b32_e32 v6, v2 +; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff ; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1 -; GFX9-NEXT: s_lshl_b32 s8, s0, 16 -; GFX9-NEXT: v_mov_b32_e32 v5, 0 -; GFX9-NEXT: v_and_or_b32 v10, v0, v3, v1 -; GFX9-NEXT: v_and_or_b32 v11, v2, v3, s8 -; GFX9-NEXT: v_mov_b32_e32 v6, v5 -; GFX9-NEXT: v_mov_b32_e32 v7, v5 -; GFX9-NEXT: v_mov_b32_e32 v8, v5 -; GFX9-NEXT: v_mov_b32_e32 v9, v5 -; GFX9-NEXT: v_mov_b32_e32 v0, v5 +; GFX9-NEXT: v_mov_b32_e32 v7, 0 +; GFX9-NEXT: v_and_or_b32 v5, v0, v2, v1 +; GFX9-NEXT: v_mov_b32_e32 v8, v7 +; GFX9-NEXT: v_mov_b32_e32 v9, v7 +; GFX9-NEXT: v_mov_b32_e32 v10, v7 +; GFX9-NEXT: v_mov_b32_e32 v11, v7 +; GFX9-NEXT: v_mov_b32_e32 v0, v7 +; GFX9-NEXT: s_mov_b32 s0, s2 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 -; GFX9-NEXT: v_mov_b32_e32 v1, v6 -; GFX9-NEXT: v_mov_b32_e32 v2, v7 -; GFX9-NEXT: v_mov_b32_e32 v3, v8 -; GFX9-NEXT: v_mov_b32_e32 v4, v9 -; GFX9-NEXT: image_load v[0:4], v[10:11], s[0:7] dmask:0xf unorm a16 tfe lwe +; GFX9-NEXT: v_mov_b32_e32 v1, v8 +; GFX9-NEXT: v_mov_b32_e32 v2, v9 +; GFX9-NEXT: v_mov_b32_e32 v3, v10 +; GFX9-NEXT: v_mov_b32_e32 v4, v11 +; GFX9-NEXT: image_load v[0:4], v[5:6], s[0:7] dmask:0xf unorm a16 tfe lwe ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: global_store_dword v5, v4, s[10:11] +; GFX9-NEXT: global_store_dword v7, v4, s[10:11] ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: load_3d_v4f32_xyzw_tfe_lwe: ; GFX10: ; %bb.0: -; GFX10-NEXT: v_mov_b32_e32 v5, 0 -; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff +; GFX10-NEXT: v_mov_b32_e32 v7, 0 ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; GFX10-NEXT: v_mov_b32_e32 v6, v2 ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_lshl_b32 s8, s0, 16 -; GFX10-NEXT: v_mov_b32_e32 v6, v5 -; GFX10-NEXT: v_mov_b32_e32 v7, v5 -; GFX10-NEXT: v_mov_b32_e32 v8, v5 -; GFX10-NEXT: v_mov_b32_e32 v9, v5 -; GFX10-NEXT: v_and_or_b32 v10, v0, v3, v1 -; GFX10-NEXT: v_and_or_b32 v11, v2, v3, s8 ; GFX10-NEXT: s_mov_b32 s1, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, v7 +; GFX10-NEXT: v_mov_b32_e32 v9, v7 +; GFX10-NEXT: v_mov_b32_e32 v10, v7 +; GFX10-NEXT: v_mov_b32_e32 v11, v7 +; GFX10-NEXT: v_and_or_b32 v5, 0xffff, v0, v1 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 -; GFX10-NEXT: v_mov_b32_e32 v0, v5 -; GFX10-NEXT: v_mov_b32_e32 v1, v6 -; GFX10-NEXT: v_mov_b32_e32 v2, v7 -; GFX10-NEXT: v_mov_b32_e32 v3, v8 -; GFX10-NEXT: v_mov_b32_e32 v4, v9 -; GFX10-NEXT: image_load v[0:4], v[10:11], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 tfe lwe +; GFX10-NEXT: v_mov_b32_e32 v0, v7 +; GFX10-NEXT: v_mov_b32_e32 v1, v8 +; GFX10-NEXT: v_mov_b32_e32 v2, v9 +; GFX10-NEXT: v_mov_b32_e32 v3, v10 +; GFX10-NEXT: v_mov_b32_e32 v4, v11 +; GFX10-NEXT: image_load v[0:4], v[5:6], s[0:7] dmask:0xf dim:SQ_RSRC_IMG_3D unorm a16 tfe lwe ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: global_store_dword v5, v4, s[10:11] +; GFX10-NEXT: global_store_dword v7, v4, s[10:11] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: ; return to shader part epilog %v = call { <4 x float>, i32 } @llvm.amdgcn.image.load.3d.sl_v4f32i32s.i16(i32 15, i16 %s, i16 %t, i16 %r, <8 x i32> %rsrc, i32 3, i32 0) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.g16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.g16.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.g16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.g16.ll @@ -4,10 +4,6 @@ define amdgpu_ps <4 x float> @sample_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, float %s) { ; GFX10-LABEL: sample_d_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v0, v0, v3, s12 -; GFX10-NEXT: v_and_or_b32 v1, v1, v3, s12 ; GFX10-NEXT: image_sample_d_g16 v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -35,16 +31,13 @@ define amdgpu_ps <4 x float> @sample_d_3d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dtdh, half %drdh, half %dsdv, half %dtdv, half %drdv, float %s, float %t, float %r) { ; GFX10-LABEL: sample_d_3d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v9, v2 -; GFX10-NEXT: v_mov_b32_e32 v10, v3 -; GFX10-NEXT: v_mov_b32_e32 v11, 0xffff +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_mov_b32_e32 v10, 0xffff ; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v3, v9, v11, s12 -; GFX10-NEXT: v_and_or_b32 v2, v0, v11, v1 -; GFX10-NEXT: v_and_or_b32 v4, v10, v11, v4 -; GFX10-NEXT: v_and_or_b32 v5, v5, v11, s12 +; GFX10-NEXT: v_mov_b32_e32 v3, v2 +; GFX10-NEXT: v_and_or_b32 v2, v0, v10, v1 +; GFX10-NEXT: v_and_or_b32 v4, v9, v10, v4 ; GFX10-NEXT: image_sample_d_g16 v[0:3], v[2:8], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -56,10 +49,6 @@ define amdgpu_ps <4 x float> @sample_c_d_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, float %s) { ; GFX10-LABEL: sample_c_d_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, s12 -; GFX10-NEXT: v_and_or_b32 v2, v2, v4, s12 ; GFX10-NEXT: image_sample_c_d_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -87,10 +76,6 @@ define amdgpu_ps <4 x float> @sample_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, float %s, float %clamp) { ; GFX10-LABEL: sample_d_cl_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v0, v0, v4, s12 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, s12 ; GFX10-NEXT: image_sample_d_cl_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -118,10 +103,6 @@ define amdgpu_ps <4 x float> @sample_c_d_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, float %s, float %clamp) { ; GFX10-LABEL: sample_c_d_cl_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, s12 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, s12 ; GFX10-NEXT: image_sample_c_d_cl_g16 v[0:3], v[0:4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -152,10 +133,6 @@ define amdgpu_ps <4 x float> @sample_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, float %s) { ; GFX10-LABEL: sample_cd_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v0, v0, v3, s12 -; GFX10-NEXT: v_and_or_b32 v1, v1, v3, s12 ; GFX10-NEXT: image_sample_cd_g16 v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -183,10 +160,6 @@ define amdgpu_ps <4 x float> @sample_c_cd_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, float %s) { ; GFX10-LABEL: sample_c_cd_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, s12 -; GFX10-NEXT: v_and_or_b32 v2, v2, v4, s12 ; GFX10-NEXT: image_sample_c_cd_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -214,10 +187,6 @@ define amdgpu_ps <4 x float> @sample_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, half %dsdh, half %dsdv, float %s, float %clamp) { ; GFX10-LABEL: sample_cd_cl_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v0, v0, v4, s12 -; GFX10-NEXT: v_and_or_b32 v1, v1, v4, s12 ; GFX10-NEXT: image_sample_cd_cl_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog @@ -245,10 +214,6 @@ define amdgpu_ps <4 x float> @sample_c_cd_cl_1d(<8 x i32> inreg %rsrc, <4 x i32> inreg %samp, float %zcompare, half %dsdh, half %dsdv, float %s, float %clamp) { ; GFX10-LABEL: sample_c_cd_cl_1d: ; GFX10: ; %bb.0: ; %main_body -; GFX10-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10-NEXT: v_and_or_b32 v1, v1, v5, s12 -; GFX10-NEXT: v_and_or_b32 v2, v2, v5, s12 ; GFX10-NEXT: image_sample_c_cd_cl_g16 v[0:3], v[0:4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.image.sample.ltolz.a16.ll @@ -93,20 +93,17 @@ ; GFX9-LABEL: sample_c_l_1d: ; GFX9: ; %bb.0: ; %main_body ; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v1, v1, v2, s12 ; GFX9-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog @@ -114,18 +111,16 @@ ; GFX10-LABEL: sample_c_l_1d: ; GFX10: ; %bb.0: ; %main_body ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 +; GFX10-NEXT: s_mov_b32 s8, s10 ; GFX10-NEXT: s_mov_b32 s9, s11 +; GFX10-NEXT: s_mov_b32 s10, s12 ; GFX10-NEXT: s_mov_b32 s11, s13 ; GFX10-NEXT: image_sample_c_lz v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -185,20 +180,17 @@ ; GFX9-LABEL: sample_l_o_1d: ; GFX9: ; %bb.0: ; %main_body ; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v1, v1, v2, s12 ; GFX9-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog @@ -206,18 +198,16 @@ ; GFX10-LABEL: sample_l_o_1d: ; GFX10: ; %bb.0: ; %main_body ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: v_and_or_b32 v1, 0xffff, v1, s12 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 +; GFX10-NEXT: s_mov_b32 s8, s10 ; GFX10-NEXT: s_mov_b32 s9, s11 +; GFX10-NEXT: s_mov_b32 s10, s12 ; GFX10-NEXT: s_mov_b32 s11, s13 ; GFX10-NEXT: image_sample_lz_o v[0:3], v[0:1], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) @@ -277,20 +267,17 @@ ; GFX9-LABEL: sample_c_l_o_1d: ; GFX9: ; %bb.0: ; %main_body ; GFX9-NEXT: s_mov_b32 s0, s2 -; GFX9-NEXT: s_mov_b32 s2, s4 -; GFX9-NEXT: s_mov_b32 s4, s6 -; GFX9-NEXT: s_mov_b32 s6, s8 -; GFX9-NEXT: s_mov_b32 s8, s10 -; GFX9-NEXT: s_mov_b32 s10, s12 -; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX9-NEXT: s_lshl_b32 s12, s0, 16 ; GFX9-NEXT: s_mov_b32 s1, s3 +; GFX9-NEXT: s_mov_b32 s2, s4 ; GFX9-NEXT: s_mov_b32 s3, s5 +; GFX9-NEXT: s_mov_b32 s4, s6 ; GFX9-NEXT: s_mov_b32 s5, s7 +; GFX9-NEXT: s_mov_b32 s6, s8 ; GFX9-NEXT: s_mov_b32 s7, s9 +; GFX9-NEXT: s_mov_b32 s8, s10 ; GFX9-NEXT: s_mov_b32 s9, s11 +; GFX9-NEXT: s_mov_b32 s10, s12 ; GFX9-NEXT: s_mov_b32 s11, s13 -; GFX9-NEXT: v_and_or_b32 v2, v2, v3, s12 ; GFX9-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf a16 ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: ; return to shader part epilog @@ -298,18 +285,16 @@ ; GFX10-LABEL: sample_c_l_o_1d: ; GFX10: ; %bb.0: ; %main_body ; GFX10-NEXT: s_mov_b32 s0, s2 -; GFX10-NEXT: s_mov_b32 s2, s4 -; GFX10-NEXT: s_mov_b32 s4, s6 -; GFX10-NEXT: s_mov_b32 s6, s8 -; GFX10-NEXT: s_mov_b32 s8, s10 -; GFX10-NEXT: s_mov_b32 s10, s12 -; GFX10-NEXT: s_lshl_b32 s12, s0, 16 ; GFX10-NEXT: s_mov_b32 s1, s3 -; GFX10-NEXT: v_and_or_b32 v2, 0xffff, v2, s12 +; GFX10-NEXT: s_mov_b32 s2, s4 ; GFX10-NEXT: s_mov_b32 s3, s5 +; GFX10-NEXT: s_mov_b32 s4, s6 ; GFX10-NEXT: s_mov_b32 s5, s7 +; GFX10-NEXT: s_mov_b32 s6, s8 ; GFX10-NEXT: s_mov_b32 s7, s9 +; GFX10-NEXT: s_mov_b32 s8, s10 ; GFX10-NEXT: s_mov_b32 s9, s11 +; GFX10-NEXT: s_mov_b32 s10, s12 ; GFX10-NEXT: s_mov_b32 s11, s13 ; GFX10-NEXT: image_sample_c_lz_o v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.a16.dim.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.a16.dim.ll --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.a16.dim.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.g16.a16.dim.ll @@ -13,8 +13,6 @@ ; ; GFX10GISEL-LABEL: sample_d_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v2, 0xffff, v2, s12 ; GFX10GISEL-NEXT: image_sample_d v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -62,11 +60,10 @@ ; ; GFX10GISEL-LABEL: sample_d_3d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v9, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v6, v6, v9, v7 -; GFX10GISEL-NEXT: v_and_or_b32 v7, v8, v9, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v9, v7 +; GFX10GISEL-NEXT: v_mov_b32_e32 v7, v8 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v8, 16, v9 +; GFX10GISEL-NEXT: v_and_or_b32 v6, 0xffff, v6, v8 ; GFX10GISEL-NEXT: image_sample_d v[0:3], v[0:7], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -84,8 +81,6 @@ ; ; GFX10GISEL-LABEL: sample_c_d_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v3, 0xffff, v3, s12 ; GFX10GISEL-NEXT: image_sample_c_d v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -152,11 +147,10 @@ ; ; GFX10GISEL-LABEL: sample_d_cl_2d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v7, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v4, v4, v7, v5 -; GFX10GISEL-NEXT: v_and_or_b32 v5, v6, v7, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v7, v5 +; GFX10GISEL-NEXT: v_mov_b32_e32 v5, v6 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v7 +; GFX10GISEL-NEXT: v_and_or_b32 v4, 0xffff, v4, v6 ; GFX10GISEL-NEXT: image_sample_d_cl v[0:3], v[0:5], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -203,11 +197,10 @@ ; ; GFX10GISEL-LABEL: sample_c_d_cl_2d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v8, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v5, v5, v8, v6 -; GFX10GISEL-NEXT: v_and_or_b32 v6, v7, v8, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v8, v6 +; GFX10GISEL-NEXT: v_mov_b32_e32 v6, v7 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v8 +; GFX10GISEL-NEXT: v_and_or_b32 v5, 0xffff, v5, v7 ; GFX10GISEL-NEXT: image_sample_c_d_cl v[0:3], v[0:6], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -225,8 +218,6 @@ ; ; GFX10GISEL-LABEL: sample_cd_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v2, 0xffff, v2, s12 ; GFX10GISEL-NEXT: image_sample_cd v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -265,8 +256,6 @@ ; ; GFX10GISEL-LABEL: sample_c_cd_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v3, 0xffff, v3, s12 ; GFX10GISEL-NEXT: image_sample_c_cd v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -333,11 +322,10 @@ ; ; GFX10GISEL-LABEL: sample_cd_cl_2d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v7, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v5, 16, v5 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v4, v4, v7, v5 -; GFX10GISEL-NEXT: v_and_or_b32 v5, v6, v7, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v7, v5 +; GFX10GISEL-NEXT: v_mov_b32_e32 v5, v6 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v7 +; GFX10GISEL-NEXT: v_and_or_b32 v4, 0xffff, v4, v6 ; GFX10GISEL-NEXT: image_sample_cd_cl v[0:3], v[0:5], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -384,11 +372,10 @@ ; ; GFX10GISEL-LABEL: sample_c_cd_cl_2d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v8, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v6, 16, v6 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v5, v5, v8, v6 -; GFX10GISEL-NEXT: v_and_or_b32 v6, v7, v8, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v8, v6 +; GFX10GISEL-NEXT: v_mov_b32_e32 v6, v7 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v8 +; GFX10GISEL-NEXT: v_and_or_b32 v5, 0xffff, v5, v7 ; GFX10GISEL-NEXT: image_sample_c_cd_cl v[0:3], v[0:6], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_2D a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -415,11 +402,10 @@ ; ; GFX10GISEL-LABEL: sample_c_d_o_2darray_V1: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v9, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v6, v6, v9, v7 -; GFX10GISEL-NEXT: v_and_or_b32 v7, v8, v9, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v9, v7 +; GFX10GISEL-NEXT: v_mov_b32_e32 v7, v8 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v8, 16, v9 +; GFX10GISEL-NEXT: v_and_or_b32 v6, 0xffff, v6, v8 ; GFX10GISEL-NEXT: image_sample_c_d_o v0, v[0:7], s[0:7], s[8:11] dmask:0x4 dim:SQ_RSRC_IMG_2D_ARRAY a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -446,11 +432,10 @@ ; ; GFX10GISEL-LABEL: sample_c_d_o_2darray_V2: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v9, 0xffff -; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v7, 16, v7 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v6, v6, v9, v7 -; GFX10GISEL-NEXT: v_and_or_b32 v7, v8, v9, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v9, v7 +; GFX10GISEL-NEXT: v_mov_b32_e32 v7, v8 +; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v8, 16, v9 +; GFX10GISEL-NEXT: v_and_or_b32 v6, 0xffff, v6, v8 ; GFX10GISEL-NEXT: image_sample_c_d_o v[0:1], v[0:7], s[0:7], s[8:11] dmask:0x6 dim:SQ_RSRC_IMG_2D_ARRAY a16 ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -490,10 +475,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_d_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v0, v0, v3, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v3, s12 ; GFX10GISEL-NEXT: image_sample_d_g16 v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -545,16 +526,13 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_d_3d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v9, v2 -; GFX10GISEL-NEXT: v_mov_b32_e32 v10, v3 -; GFX10GISEL-NEXT: v_mov_b32_e32 v11, 0xffff +; GFX10GISEL-NEXT: v_mov_b32_e32 v9, v3 +; GFX10GISEL-NEXT: v_mov_b32_e32 v10, 0xffff ; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1 ; GFX10GISEL-NEXT: v_lshlrev_b32_e32 v4, 16, v4 -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v3, v9, v11, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v2, v0, v11, v1 -; GFX10GISEL-NEXT: v_and_or_b32 v4, v10, v11, v4 -; GFX10GISEL-NEXT: v_and_or_b32 v5, v5, v11, s12 +; GFX10GISEL-NEXT: v_mov_b32_e32 v3, v2 +; GFX10GISEL-NEXT: v_and_or_b32 v2, v0, v10, v1 +; GFX10GISEL-NEXT: v_and_or_b32 v4, v9, v10, v4 ; GFX10GISEL-NEXT: image_sample_d_g16 v[0:3], v[2:8], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_3D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -572,10 +550,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_c_d_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v4, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v2, v2, v4, s12 ; GFX10GISEL-NEXT: image_sample_c_d_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -620,10 +594,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_d_cl_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v0, v0, v4, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v4, s12 ; GFX10GISEL-NEXT: image_sample_d_cl_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -668,10 +638,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_c_d_cl_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v5, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v2, v2, v5, s12 ; GFX10GISEL-NEXT: image_sample_c_d_cl_g16 v[0:3], v[0:4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -721,10 +687,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_cd_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v3, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v0, v0, v3, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v3, s12 ; GFX10GISEL-NEXT: image_sample_cd_g16 v[0:3], v[0:2], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -769,10 +731,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_c_cd_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v4, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v2, v2, v4, s12 ; GFX10GISEL-NEXT: image_sample_c_cd_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -817,10 +775,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_cd_cl_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v4, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v0, v0, v4, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v4, s12 ; GFX10GISEL-NEXT: image_sample_cd_cl_g16 v[0:3], v[0:3], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog @@ -865,10 +819,6 @@ ; ; GFX10GISEL-LABEL: sample_g16_noa16_c_cd_cl_1d: ; GFX10GISEL: ; %bb.0: ; %main_body -; GFX10GISEL-NEXT: v_mov_b32_e32 v5, 0xffff -; GFX10GISEL-NEXT: s_lshl_b32 s12, s0, 16 -; GFX10GISEL-NEXT: v_and_or_b32 v1, v1, v5, s12 -; GFX10GISEL-NEXT: v_and_or_b32 v2, v2, v5, s12 ; GFX10GISEL-NEXT: image_sample_c_cd_cl_g16 v[0:3], v[0:4], s[0:7], s[8:11] dmask:0xf dim:SQ_RSRC_IMG_1D ; GFX10GISEL-NEXT: s_waitcnt vmcnt(0) ; GFX10GISEL-NEXT: ; return to shader part epilog diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -7,11 +7,11 @@ ; CHECK-LABEL: smulo_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 7 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv1i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -26,11 +26,11 @@ ; CHECK-LABEL: smulo_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 7 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -45,11 +45,11 @@ ; CHECK-LABEL: smulo_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 7 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -64,11 +64,11 @@ ; CHECK-LABEL: smulo_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 7 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -83,11 +83,11 @@ ; CHECK-LABEL: smulo_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmulh.vv v26, v8, v10 -; CHECK-NEXT: vmul.vv v28, v8, v10 -; CHECK-NEXT: vsra.vi v30, v28, 7 -; CHECK-NEXT: vmsne.vv v0, v26, v30 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulh.vv v12, v8, v10 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vsra.vi v10, v8, 7 +; CHECK-NEXT: vmsne.vv v0, v12, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv16i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -102,10 +102,10 @@ ; CHECK-LABEL: smulo_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 7 -; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmsne.vv v0, v16, v12 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv32i8( %x, %y) @@ -140,11 +140,11 @@ ; CHECK-LABEL: smulo_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 15 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv1i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -159,11 +159,11 @@ ; CHECK-LABEL: smulo_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 15 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -178,11 +178,11 @@ ; CHECK-LABEL: smulo_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 15 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -197,11 +197,11 @@ ; CHECK-LABEL: smulo_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vmulh.vv v26, v8, v10 -; CHECK-NEXT: vmul.vv v28, v8, v10 -; CHECK-NEXT: vsra.vi v30, v28, 15 -; CHECK-NEXT: vmsne.vv v0, v26, v30 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulh.vv v12, v8, v10 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vsra.vi v10, v8, 15 +; CHECK-NEXT: vmsne.vv v0, v12, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -216,10 +216,10 @@ ; CHECK-LABEL: smulo_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 15 -; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmsne.vv v0, v16, v12 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv16i16( %x, %y) @@ -254,11 +254,11 @@ ; CHECK-LABEL: smulo_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 31 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv1i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -273,11 +273,11 @@ ; CHECK-LABEL: smulo_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 -; CHECK-NEXT: vsra.vi v27, v26, 31 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vsra.vi v9, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -292,11 +292,11 @@ ; CHECK-LABEL: smulo_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vmulh.vv v26, v8, v10 -; CHECK-NEXT: vmul.vv v28, v8, v10 -; CHECK-NEXT: vsra.vi v30, v28, 31 -; CHECK-NEXT: vmsne.vv v0, v26, v30 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulh.vv v12, v8, v10 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vsra.vi v10, v8, 31 +; CHECK-NEXT: vmsne.vv v0, v12, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -311,10 +311,10 @@ ; CHECK-LABEL: smulo_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 31 -; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmsne.vv v0, v16, v12 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i32( %x, %y) @@ -349,12 +349,12 @@ ; CHECK-LABEL: smulo_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmulh.vv v25, v8, v9 -; CHECK-NEXT: vmul.vv v26, v8, v9 +; CHECK-NEXT: vmulh.vv v10, v8, v9 +; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsra.vx v27, v26, a0 -; CHECK-NEXT: vmsne.vv v0, v25, v27 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vsra.vx v9, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v10, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv1i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -369,12 +369,12 @@ ; CHECK-LABEL: smulo_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmulh.vv v26, v8, v10 -; CHECK-NEXT: vmul.vv v28, v8, v10 +; CHECK-NEXT: vmulh.vv v12, v8, v10 +; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: addi a0, zero, 63 -; CHECK-NEXT: vsra.vx v30, v28, a0 -; CHECK-NEXT: vmsne.vv v0, v26, v30 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vsra.vx v10, v8, a0 +; CHECK-NEXT: vmsne.vv v0, v12, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -389,11 +389,11 @@ ; CHECK-LABEL: smulo_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmulh.vv v28, v8, v12 +; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: addi a0, zero, 63 ; CHECK-NEXT: vsra.vx v12, v8, a0 -; CHECK-NEXT: vmsne.vv v0, v28, v12 +; CHECK-NEXT: vmsne.vv v0, v16, v12 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i64( %x, %y) diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -7,10 +7,10 @@ ; CHECK-LABEL: umulo_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv1i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -25,10 +25,10 @@ ; CHECK-LABEL: umulo_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -43,10 +43,10 @@ ; CHECK-LABEL: umulo_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -61,10 +61,10 @@ ; CHECK-LABEL: umulo_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -79,10 +79,10 @@ ; CHECK-LABEL: umulo_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmulhu.vv v26, v8, v10 -; CHECK-NEXT: vmsne.vi v0, v26, 0 -; CHECK-NEXT: vmul.vv v26, v8, v10 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulhu.vv v12, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv16i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -97,10 +97,10 @@ ; CHECK-LABEL: umulo_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vmulhu.vv v28, v8, v12 -; CHECK-NEXT: vmsne.vi v0, v28, 0 -; CHECK-NEXT: vmul.vv v28, v8, v12 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulhu.vv v16, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv32i8( %x, %y) %b = extractvalue { , } %a, 0 @@ -133,10 +133,10 @@ ; CHECK-LABEL: umulo_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv1i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -151,10 +151,10 @@ ; CHECK-LABEL: umulo_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -169,10 +169,10 @@ ; CHECK-LABEL: umulo_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -187,10 +187,10 @@ ; CHECK-LABEL: umulo_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu -; CHECK-NEXT: vmulhu.vv v26, v8, v10 -; CHECK-NEXT: vmsne.vi v0, v26, 0 -; CHECK-NEXT: vmul.vv v26, v8, v10 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulhu.vv v12, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -205,10 +205,10 @@ ; CHECK-LABEL: umulo_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu -; CHECK-NEXT: vmulhu.vv v28, v8, v12 -; CHECK-NEXT: vmsne.vi v0, v28, 0 -; CHECK-NEXT: vmul.vv v28, v8, v12 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulhu.vv v16, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv16i16( %x, %y) %b = extractvalue { , } %a, 0 @@ -241,10 +241,10 @@ ; CHECK-LABEL: umulo_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv1i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -259,10 +259,10 @@ ; CHECK-LABEL: umulo_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -277,10 +277,10 @@ ; CHECK-LABEL: umulo_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu -; CHECK-NEXT: vmulhu.vv v26, v8, v10 -; CHECK-NEXT: vmsne.vi v0, v26, 0 -; CHECK-NEXT: vmul.vv v26, v8, v10 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulhu.vv v12, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -295,10 +295,10 @@ ; CHECK-LABEL: umulo_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu -; CHECK-NEXT: vmulhu.vv v28, v8, v12 -; CHECK-NEXT: vmsne.vi v0, v28, 0 -; CHECK-NEXT: vmul.vv v28, v8, v12 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulhu.vv v16, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv8i32( %x, %y) %b = extractvalue { , } %a, 0 @@ -331,10 +331,10 @@ ; CHECK-LABEL: umulo_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vmulhu.vv v25, v8, v9 -; CHECK-NEXT: vmsne.vi v0, v25, 0 -; CHECK-NEXT: vmul.vv v25, v8, v9 -; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 +; CHECK-NEXT: vmulhu.vv v10, v8, v9 +; CHECK-NEXT: vmsne.vi v0, v10, 0 +; CHECK-NEXT: vmul.vv v8, v8, v9 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv1i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -349,10 +349,10 @@ ; CHECK-LABEL: umulo_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vmulhu.vv v26, v8, v10 -; CHECK-NEXT: vmsne.vi v0, v26, 0 -; CHECK-NEXT: vmul.vv v26, v8, v10 -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmulhu.vv v12, v8, v10 +; CHECK-NEXT: vmsne.vi v0, v12, 0 +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv2i64( %x, %y) %b = extractvalue { , } %a, 0 @@ -367,10 +367,10 @@ ; CHECK-LABEL: umulo_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vmulhu.vv v28, v8, v12 -; CHECK-NEXT: vmsne.vi v0, v28, 0 -; CHECK-NEXT: vmul.vv v28, v8, v12 -; CHECK-NEXT: vmerge.vim v8, v28, 0, v0 +; CHECK-NEXT: vmulhu.vv v16, v8, v12 +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.umul.with.overflow.nxv4i64( %x, %y) %b = extractvalue { , } %a, 0