diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -6073,7 +6073,7 @@ } else { SatVal = MIRBuilder.buildConstant(Ty, APInt::getMaxValue(BW)); } - auto Ov = MIRBuilder.buildICmp(CmpInst::ICMP_NE, Ty, LHS, Orig); + auto Ov = MIRBuilder.buildICmp(CmpInst::ICMP_NE, BoolTy, LHS, Orig); MIRBuilder.buildSelect(Res, Ov, SatVal, Result); MI.eraseFromParent(); diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sshlsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sshlsat.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sshlsat.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-sshlsat.mir @@ -25,55 +25,53 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) ; GFX8-LABEL: name: sshlsat_s7 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s7) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR1]](s16) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 127 + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C4]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C1]](s16) + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: sshlsat_s7 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s7) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR1]](s16) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 127 + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C4]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C1]](s16) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s7) = G_TRUNC %0 @@ -105,55 +103,53 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) ; GFX8-LABEL: name: sshlsat_s8 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s8) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR1]](s16) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C4]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C1]](s16) + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: sshlsat_s8 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s8) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR1]](s16) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C4]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C1]](s16) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s8) = G_TRUNC %0 @@ -193,8 +189,8 @@ ; GFX6: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C6]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[C5]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C2]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -204,8 +200,8 @@ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[AND1]](s32) ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s32), [[C6]] ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C4]], [[C5]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL3]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SELECT3]], [[C2]](s32) ; GFX6: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32) @@ -221,63 +217,97 @@ ; GFX8-LABEL: name: sshlsat_v2s8 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) + ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) + ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16) - ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16) - ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>) - ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>) - ; GFX8: [[SSHLSAT:%[0-9]+]]:_(s8) = G_SSHLSAT [[UV]], [[UV2]](s8) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[UV3]](s8) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR1]](s16) - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SSHLSAT]](s8), [[TRUNC2]](s8) - ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>) - ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]] + ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C4]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C7]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C5]], [[C6]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C4]](s16) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C4]](s16) + ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[SHL2]], [[AND1]](s16) + ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[AND1]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s16), [[C7]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C5]], [[C6]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s16), [[ASHR2]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] + ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SELECT3]], [[C4]](s16) + ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16) + ; GFX8: [[AND2:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C3]] + ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR3]](s16) + ; GFX8: [[AND3:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C3]] + ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C4]](s16) + ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL4]] + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: sshlsat_v2s8 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) + ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) + ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) + ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16) - ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16) - ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>) - ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>) - ; GFX9: [[SSHLSAT:%[0-9]+]]:_(s8) = G_SSHLSAT [[UV]], [[UV2]](s8) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[UV3]](s8) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C3]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL1]] - ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR1]](s16) - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SSHLSAT]](s8), [[TRUNC2]](s8) - ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>) - ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]] + ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C4]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C7]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C5]], [[C6]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SELECT1]], [[C4]](s16) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C4]](s16) + ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[SHL2]], [[AND1]](s16) + ; GFX9: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[AND1]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s16), [[C7]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C5]], [[C6]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s16), [[ASHR2]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] + ; GFX9: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SELECT3]], [[C4]](s16) + ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16) + ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C3]] + ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR3]](s16) + ; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C3]] + ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C4]](s16) + ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL4]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s16) = G_TRUNC %0 @@ -312,8 +342,8 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) @@ -329,8 +359,8 @@ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C2]] ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: sshlsat_s16 @@ -345,8 +375,8 @@ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C2]] ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 @@ -384,8 +414,8 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) @@ -395,8 +425,8 @@ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[AND1]](s32) ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s32), [[C4]] ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL3]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SELECT3]], [[C]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]] @@ -409,37 +439,67 @@ ; GFX8-LABEL: name: sshlsat_v2s16 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 - ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) - ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) - ; GFX8: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV]], [[UV2]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV1]], [[UV3]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV3]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s16), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV1]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SELECT1]](s16) - ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC2]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC2]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC3]](s16) + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC3]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT3]](s16) + ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] + ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>) ; GFX9-LABEL: name: sshlsat_v2s16 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) - ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) - ; GFX9: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV]], [[UV2]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV1]], [[UV3]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV3]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s16), [[C2]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV1]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SELECT1]](s16) - ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC2]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC2]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC3]](s16) + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC3]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT3]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR_TRUNC]](<2 x s16>) %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s16>) = COPY $vgpr1 %2:_(<2 x s16>) = G_SSHLSAT %0, %1 @@ -477,8 +537,8 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32) @@ -488,8 +548,8 @@ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[AND1]](s32) ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s32), [[C4]] ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL3]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SELECT3]], [[C]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -499,8 +559,8 @@ ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL5]], [[AND2]](s32) ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL4]](s32), [[C4]] ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL4]](s32), [[ASHR4]] - ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[SELECT4]], [[SHL5]] + ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL4]](s32), [[ASHR4]] + ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL5]] ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[SELECT5]], [[C]](s32) ; GFX6: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF ; GFX6: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF @@ -536,50 +596,134 @@ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) ; GFX8-LABEL: name: sshlsat_v3s16 ; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 - ; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) - ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>) - ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>) - ; GFX8: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV2]], [[UV5]](s16) - ; GFX8: [[SSHLSAT1:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV3]], [[UV6]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV4]], [[UV7]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV7]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV4]](s16), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV4]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SSHLSAT1]](s16), [[SELECT1]](s16) - ; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX8: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[DEF]](<4 x s16>), [[DEF1]](<4 x s16>), [[DEF1]](<4 x s16>) - ; GFX8: [[UV8:%[0-9]+]]:_(<3 x s16>), [[UV9:%[0-9]+]]:_(<3 x s16>), [[UV10:%[0-9]+]]:_(<3 x s16>), [[UV11:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<12 x s16>) - ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[UV8]](<3 x s16>) - ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>) + ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>) + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>) + ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC3]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC3]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC4]](s16) + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC4]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC5]](s16) + ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[TRUNC5]](s16) + ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC2]](s16), [[C3]] + ; GFX8: [[SELECT4:%[0-9]+]]:_(s16) = G_SELECT [[ICMP4]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[ASHR2]] + ; GFX8: [[SELECT5:%[0-9]+]]:_(s16) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL2]] + ; GFX8: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; GFX8: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>) + ; GFX8: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV6]](<2 x s16>) + ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32) + ; GFX8: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV7]](<2 x s16>) + ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32) + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT3]](s16) + ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL3]] + ; GFX8: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT5]](s16) + ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST4]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]] + ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[C]](s32) + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL4]] + ; GFX8: [[BITCAST7:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32) + ; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) + ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C4]] + ; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST5]](s32) + ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C4]] + ; GFX8: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32) + ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND1]], [[SHL5]] + ; GFX8: [[BITCAST8:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) + ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST6]](<2 x s16>), [[BITCAST7]](<2 x s16>), [[BITCAST8]](<2 x s16>) + ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) ; GFX9-LABEL: name: sshlsat_v3s16 ; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 - ; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) - ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>) - ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>) - ; GFX9: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV2]], [[UV5]](s16) - ; GFX9: [[SSHLSAT1:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV3]], [[UV6]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV4]], [[UV7]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV7]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV4]](s16), [[C2]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV4]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SSHLSAT1]](s16), [[SELECT1]](s16) - ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[DEF]](<4 x s16>), [[DEF1]](<4 x s16>), [[DEF1]](<4 x s16>) - ; GFX9: [[UV8:%[0-9]+]]:_(<3 x s16>), [[UV9:%[0-9]+]]:_(<3 x s16>), [[UV10:%[0-9]+]]:_(<3 x s16>), [[UV11:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<12 x s16>) - ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[UV8]](<3 x s16>) - ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>) + ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>) + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>) + ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC3]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC3]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC4]](s16) + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC4]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC5]](s16) + ; GFX9: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[TRUNC5]](s16) + ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC2]](s16), [[C3]] + ; GFX9: [[SELECT4:%[0-9]+]]:_(s16) = G_SELECT [[ICMP4]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[ASHR2]] + ; GFX9: [[SELECT5:%[0-9]+]]:_(s16) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL2]] + ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; GFX9: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>) + ; GFX9: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV6]](<2 x s16>) + ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32) + ; GFX9: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV7]](<2 x s16>) + ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT3]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT5]](s16) + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST4]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[COPY1]](s32) + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32) + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST5]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>) + ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) %0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 %1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0 %3:_(<3 x s16>) = G_SSHLSAT %1, %2 @@ -620,8 +764,8 @@ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL1]] ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) @@ -631,8 +775,8 @@ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL3]], [[AND1]](s32) ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL2]](s32), [[C4]] ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL3]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[ASHR2]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL3]] ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SELECT3]], [[C]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32) @@ -642,8 +786,8 @@ ; GFX6: [[ASHR4:%[0-9]+]]:_(s32) = G_ASHR [[SHL5]], [[AND2]](s32) ; GFX6: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL4]](s32), [[C4]] ; GFX6: [[SELECT4:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL4]](s32), [[ASHR4]] - ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s32), [[SELECT4]], [[SHL5]] + ; GFX6: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL4]](s32), [[ASHR4]] + ; GFX6: [[SELECT5:%[0-9]+]]:_(s32) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL5]] ; GFX6: [[ASHR5:%[0-9]+]]:_(s32) = G_ASHR [[SELECT5]], [[C]](s32) ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -653,8 +797,8 @@ ; GFX6: [[ASHR6:%[0-9]+]]:_(s32) = G_ASHR [[SHL7]], [[AND3]](s32) ; GFX6: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL6]](s32), [[C4]] ; GFX6: [[SELECT6:%[0-9]+]]:_(s32) = G_SELECT [[ICMP6]](s1), [[C2]], [[C3]] - ; GFX6: [[ICMP7:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL6]](s32), [[ASHR6]] - ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s32), [[SELECT6]], [[SHL7]] + ; GFX6: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL6]](s32), [[ASHR6]] + ; GFX6: [[SELECT7:%[0-9]+]]:_(s32) = G_SELECT [[ICMP7]](s1), [[SELECT6]], [[SHL7]] ; GFX6: [[ASHR7:%[0-9]+]]:_(s32) = G_ASHR [[SELECT7]], [[C]](s32) ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) ; GFX6: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] @@ -675,41 +819,121 @@ ; GFX8-LABEL: name: sshlsat_v4s16 ; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 - ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) - ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) - ; GFX8: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV]], [[UV4]](s16) - ; GFX8: [[SSHLSAT1:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV1]], [[UV5]](s16) - ; GFX8: [[SSHLSAT2:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV2]], [[UV6]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV3]], [[UV7]](s16) - ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV7]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s16), [[C2]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV3]](s16), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SSHLSAT1]](s16), [[SSHLSAT2]](s16), [[SELECT1]](s16) - ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>) + ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32) + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>) + ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC4]](s16) + ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC4]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC5]](s16) + ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC5]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC6]](s16) + ; GFX8: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[TRUNC6]](s16) + ; GFX8: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC2]](s16), [[C3]] + ; GFX8: [[SELECT4:%[0-9]+]]:_(s16) = G_SELECT [[ICMP4]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[ASHR2]] + ; GFX8: [[SELECT5:%[0-9]+]]:_(s16) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL2]] + ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[TRUNC7]](s16) + ; GFX8: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[TRUNC7]](s16) + ; GFX8: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC3]](s16), [[C3]] + ; GFX8: [[SELECT6:%[0-9]+]]:_(s16) = G_SELECT [[ICMP6]](s1), [[C1]], [[C2]] + ; GFX8: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC3]](s16), [[ASHR3]] + ; GFX8: [[SELECT7:%[0-9]+]]:_(s16) = G_SELECT [[ICMP7]](s1), [[SELECT6]], [[SHL3]] + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT3]](s16) + ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT5]](s16) + ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT7]](s16) + ; GFX8: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32) + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]] + ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32) + ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>) + ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; GFX9-LABEL: name: sshlsat_v4s16 ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) - ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) - ; GFX9: [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV]], [[UV4]](s16) - ; GFX9: [[SSHLSAT1:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV1]], [[UV5]](s16) - ; GFX9: [[SSHLSAT2:%[0-9]+]]:_(s16) = G_SSHLSAT [[UV2]], [[UV6]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV3]], [[UV7]](s16) - ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[UV7]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 - ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s16), [[C2]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV3]](s16), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s16), [[SELECT]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SSHLSAT]](s16), [[SSHLSAT1]](s16), [[SSHLSAT2]](s16), [[SELECT1]](s16) - ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>) + ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32) + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>) + ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC4]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC4]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768 + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767 + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC]](s16), [[C3]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC5]](s16) + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SHL1]], [[TRUNC5]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC1]](s16), [[C3]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[ASHR1]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC6]](s16) + ; GFX9: [[ASHR2:%[0-9]+]]:_(s16) = G_ASHR [[SHL2]], [[TRUNC6]](s16) + ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC2]](s16), [[C3]] + ; GFX9: [[SELECT4:%[0-9]+]]:_(s16) = G_SELECT [[ICMP4]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[ASHR2]] + ; GFX9: [[SELECT5:%[0-9]+]]:_(s16) = G_SELECT [[ICMP5]](s1), [[SELECT4]], [[SHL2]] + ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[TRUNC7]](s16) + ; GFX9: [[ASHR3:%[0-9]+]]:_(s16) = G_ASHR [[SHL3]], [[TRUNC7]](s16) + ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[TRUNC3]](s16), [[C3]] + ; GFX9: [[SELECT6:%[0-9]+]]:_(s16) = G_SELECT [[ICMP6]](s1), [[C1]], [[C2]] + ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC3]](s16), [[ASHR3]] + ; GFX9: [[SELECT7:%[0-9]+]]:_(s16) = G_SELECT [[ICMP7]](s1), [[SELECT6]], [[SHL3]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT3]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT5]](s16) + ; GFX9: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT7]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[ANYEXT3]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) %0:_(<4 x s16>) = COPY $vgpr0_vgpr1 %1:_(<4 x s16>) = COPY $vgpr2_vgpr3 %2:_(<4 x s16>) = G_SSHLSAT %0, %1 @@ -732,8 +956,8 @@ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s32), [[C2]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX6: $vgpr0 = COPY [[SELECT1]](s32) ; GFX8-LABEL: name: sshlsat_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -745,8 +969,8 @@ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s32), [[C2]] ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX8: $vgpr0 = COPY [[SELECT1]](s32) ; GFX9-LABEL: name: sshlsat_s32 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -758,8 +982,8 @@ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s32), [[C2]] ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX9: $vgpr0 = COPY [[SELECT1]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 @@ -785,14 +1009,14 @@ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s32), [[C2]] ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[UV3]](s32) ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s32), [[C2]] ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL1]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT1]](s32), [[SELECT3]](s32) ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX8-LABEL: name: sshlsat_v2s32 @@ -807,14 +1031,14 @@ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s32), [[C2]] ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[UV3]](s32) ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s32), [[C2]] ; GFX8: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] - ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL1]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT1]](s32), [[SELECT3]](s32) ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: sshlsat_v2s32 @@ -829,14 +1053,14 @@ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s32), [[C2]] ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[SELECT]], [[SHL]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[UV3]](s32) ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s32), [[C2]] ; GFX9: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] - ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[SELECT2]], [[SHL1]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[ASHR1]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT1]](s32), [[SELECT3]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 @@ -854,41 +1078,44 @@ ; GFX6-LABEL: name: sshlsat_s64 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s64) + ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s64), [[C2]] ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT1]](s64) ; GFX8-LABEL: name: sshlsat_s64 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s64) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s64), [[C2]] ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT1]](s64) ; GFX9-LABEL: name: sshlsat_s64 ; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[COPY1]](s64) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s64), [[C2]] ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT1]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 @@ -907,51 +1134,72 @@ ; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX6: [[SSHLSAT:%[0-9]+]]:_(s64) = G_SSHLSAT [[UV]], [[UV2]](s64) - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[UV3]](s64) + ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX6: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s64), [[C2]] ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX6: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] - ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSHLSAT]](s64), [[SELECT1]](s64) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[ASHR]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[TRUNC1]](s32) + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR1]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64) ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX8-LABEL: name: sshlsat_v2s64 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX8: [[SSHLSAT:%[0-9]+]]:_(s64) = G_SSHLSAT [[UV]], [[UV2]](s64) - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[UV3]](s64) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX8: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s64), [[C2]] ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX8: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSHLSAT]](s64), [[SELECT1]](s64) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[ASHR]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[TRUNC1]](s32) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR1]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64) ; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX9-LABEL: name: sshlsat_v2s64 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX9: [[SSHLSAT:%[0-9]+]]:_(s64) = G_SSHLSAT [[UV]], [[UV2]](s64) - ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[UV3]](s64) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC]](s32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808 ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807 ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV]](s64), [[C2]] ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[C1]] - ; GFX9: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s64), [[SELECT]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSHLSAT]](s64), [[SELECT1]](s64) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[ASHR]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[SELECT]], [[SHL]] + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[TRUNC1]](s32) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV1]](s64), [[C2]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[C]], [[C1]] + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[ASHR1]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[SELECT2]], [[SHL1]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT1]](s64), [[SELECT3]](s64) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ushlsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ushlsat.mir --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ushlsat.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ushlsat.mir @@ -21,47 +21,45 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) ; GFX8-LABEL: name: ushlsat_s7 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s7) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR1]](s16) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 127 + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] + ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C1]](s16) + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: ushlsat_s7 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s7) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX9: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR1]](s16) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 127 + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 9 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] + ; GFX9: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C1]](s16) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s7) = G_TRUNC %0 @@ -89,47 +87,45 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) ; GFX8-LABEL: name: ushlsat_s8 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s8) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR1]](s16) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] + ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C1]](s16) + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: ushlsat_s8 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 - ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32) - ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC1]](s8) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX9: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR1]](s16) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]] + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] + ; GFX9: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C1]](s16) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s8) = G_TRUNC %0 @@ -165,8 +161,8 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR6]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C4]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR6]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C4]], [[SHL1]] ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C2]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -174,8 +170,8 @@ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32) ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SHL2]], [[AND1]](s32) ; GFX6: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[SHL3]], [[AND1]](s32) - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR8]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C4]], [[SHL3]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR8]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C4]], [[SHL3]] ; GFX6: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[SELECT1]], [[C2]](s32) ; GFX6: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR7]](s32) @@ -191,55 +187,85 @@ ; GFX8-LABEL: name: ushlsat_v2s8 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) + ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) + ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) - ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16) - ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16) - ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>) - ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>) - ; GFX8: [[USHLSAT:%[0-9]+]]:_(s8) = G_USHLSAT [[UV]], [[UV2]](s8) - ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8) - ; GFX8: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[UV3]](s8) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR1]](s16) - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[USHLSAT]](s8), [[TRUNC2]](s8) - ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>) - ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8) - ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16) - ; GFX8: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]] + ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C4]](s16) + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX8: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR6]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C5]], [[SHL1]] + ; GFX8: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C4]](s16) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C4]](s16) + ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[SHL2]], [[AND1]](s16) + ; GFX8: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[SHL3]], [[AND1]](s16) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s16), [[LSHR8]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C5]], [[SHL3]] + ; GFX8: [[LSHR9:%[0-9]+]]:_(s16) = G_LSHR [[SELECT1]], [[C4]](s16) + ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16) + ; GFX8: [[AND2:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C3]] + ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR9]](s16) + ; GFX8: [[AND3:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C3]] + ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C4]](s16) + ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL4]] + ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) + ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: ushlsat_v2s8 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32) + ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32) + ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32) + ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32) + ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32) ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255 ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) - ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16) - ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16) - ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>) - ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>) - ; GFX9: [[USHLSAT:%[0-9]+]]:_(s8) = G_USHLSAT [[UV]], [[UV2]](s8) - ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8) - ; GFX9: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[UV3]](s8) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16) - ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[ZEXT]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[ZEXT]](s16) - ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C1]], [[SHL1]] - ; GFX9: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C]](s16) - ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR1]](s16) - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[USHLSAT]](s8), [[TRUNC2]](s8) - ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>) - ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8) - ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16) - ; GFX9: $vgpr0 = COPY [[ANYEXT1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]] + ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 8 + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C4]](s16) + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) + ; GFX9: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[AND]](s16) + ; GFX9: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR6]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C5]], [[SHL1]] + ; GFX9: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SELECT]], [[C4]](s16) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C4]](s16) + ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[SHL2]], [[AND1]](s16) + ; GFX9: [[LSHR8:%[0-9]+]]:_(s16) = G_LSHR [[SHL3]], [[AND1]](s16) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s16), [[LSHR8]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C5]], [[SHL3]] + ; GFX9: [[LSHR9:%[0-9]+]]:_(s16) = G_LSHR [[SELECT1]], [[C4]](s16) + ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16) + ; GFX9: [[AND2:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C3]] + ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR9]](s16) + ; GFX9: [[AND3:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C3]] + ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND3]], [[C4]](s16) + ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[SHL4]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s16) = G_TRUNC %0 @@ -270,8 +296,8 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C1]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) ; GFX6: $vgpr0 = COPY [[COPY4]](s32) @@ -283,8 +309,8 @@ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC1]](s16) ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32) ; GFX9-LABEL: name: ushlsat_s16 @@ -295,8 +321,8 @@ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC1]](s16) ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) %0:_(s32) = COPY $vgpr0 @@ -330,8 +356,8 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR2]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR2]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) @@ -339,8 +365,8 @@ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SHL2]], [[AND1]](s32) ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[SHL3]], [[AND1]](s32) - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR4]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C2]], [[SHL3]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR4]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[SHL3]] ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]] @@ -353,29 +379,55 @@ ; GFX8-LABEL: name: ushlsat_v2s16 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 - ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) - ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) - ; GFX8: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV]], [[UV2]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV1]], [[UV3]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV3]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV1]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[SELECT]](s16) - ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC2]](s16) + ; GFX8: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC2]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR2]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC3]](s16) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC3]](s16) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR3]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL2]] + ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>) ; GFX9-LABEL: name: ushlsat_v2s16 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>) - ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>) - ; GFX9: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV]], [[UV2]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV1]], [[UV3]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV3]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV1]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[SELECT]](s16) - ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC2]](s16) + ; GFX9: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC2]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR2]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC3]](s16) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC3]](s16) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR3]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR_TRUNC]](<2 x s16>) %0:_(<2 x s16>) = COPY $vgpr0 %1:_(<2 x s16>) = COPY $vgpr1 %2:_(<2 x s16>) = G_USHLSAT %0, %1 @@ -409,8 +461,8 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR4]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR4]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C]](s32) ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32) @@ -418,8 +470,8 @@ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32) ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SHL2]], [[AND1]](s32) ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SHL3]], [[AND1]](s32) - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR6]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C2]], [[SHL3]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR6]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[SHL3]] ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -427,8 +479,8 @@ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32) ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[SHL4]], [[AND2]](s32) ; GFX6: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[SHL5]], [[AND2]](s32) - ; GFX6: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL4]](s32), [[LSHR8]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[C2]], [[SHL5]] + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL4]](s32), [[LSHR8]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[SHL5]] ; GFX6: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[SELECT2]], [[C]](s32) ; GFX6: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF ; GFX6: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF @@ -464,42 +516,118 @@ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) ; GFX8-LABEL: name: ushlsat_v3s16 ; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 - ; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) - ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>) - ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>) - ; GFX8: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV2]], [[UV5]](s16) - ; GFX8: [[USHLSAT1:%[0-9]+]]:_(s16) = G_USHLSAT [[UV3]], [[UV6]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV4]], [[UV7]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV7]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV4]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[USHLSAT1]](s16), [[SELECT]](s16) - ; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX8: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[DEF]](<4 x s16>), [[DEF1]](<4 x s16>), [[DEF1]](<4 x s16>) - ; GFX8: [[UV8:%[0-9]+]]:_(<3 x s16>), [[UV9:%[0-9]+]]:_(<3 x s16>), [[UV10:%[0-9]+]]:_(<3 x s16>), [[UV11:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<12 x s16>) - ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[UV8]](<3 x s16>) - ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>) + ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>) + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>) + ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC3]](s16) + ; GFX8: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC3]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR4]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC4]](s16) + ; GFX8: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC4]](s16) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR5]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC5]](s16) + ; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL2]], [[TRUNC5]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[LSHR6]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[SHL2]] + ; GFX8: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; GFX8: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX8: [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>) + ; GFX8: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV6]](<2 x s16>) + ; GFX8: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32) + ; GFX8: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV7]](<2 x s16>) + ; GFX8: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32) + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL3]] + ; GFX8: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT2]](s16) + ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST4]](s32) + ; GFX8: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C2]] + ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[C]](s32) + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL4]] + ; GFX8: [[BITCAST7:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32) + ; GFX8: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) + ; GFX8: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]] + ; GFX8: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST5]](s32) + ; GFX8: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]] + ; GFX8: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C]](s32) + ; GFX8: [[OR2:%[0-9]+]]:_(s32) = G_OR [[AND1]], [[SHL5]] + ; GFX8: [[BITCAST8:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32) + ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST6]](<2 x s16>), [[BITCAST7]](<2 x s16>), [[BITCAST8]](<2 x s16>) + ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) ; GFX9-LABEL: name: ushlsat_v3s16 ; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 - ; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) - ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>) - ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>) - ; GFX9: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV2]], [[UV5]](s16) - ; GFX9: [[USHLSAT1:%[0-9]+]]:_(s16) = G_USHLSAT [[UV3]], [[UV6]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV4]], [[UV7]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV7]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV4]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[USHLSAT1]](s16), [[SELECT]](s16) - ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX9: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF - ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<12 x s16>) = G_CONCAT_VECTORS [[DEF]](<4 x s16>), [[DEF1]](<4 x s16>), [[DEF1]](<4 x s16>) - ; GFX9: [[UV8:%[0-9]+]]:_(<3 x s16>), [[UV9:%[0-9]+]]:_(<3 x s16>), [[UV10:%[0-9]+]]:_(<3 x s16>), [[UV11:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<12 x s16>) - ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[UV8]](<3 x s16>) - ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>) + ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>) + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>) + ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC3]](s16) + ; GFX9: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC3]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR4]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC4]](s16) + ; GFX9: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC4]](s16) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR5]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC5]](s16) + ; GFX9: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL2]], [[TRUNC5]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[LSHR6]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[SHL2]] + ; GFX9: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; GFX9: [[DEF1:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[DEF2:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[DEF3:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF + ; GFX9: [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF2]](<4 x s16>) + ; GFX9: [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[UV6]](<2 x s16>) + ; GFX9: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST4]], [[C]](s32) + ; GFX9: [[BITCAST5:%[0-9]+]]:_(s32) = G_BITCAST [[UV7]](<2 x s16>) + ; GFX9: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST5]], [[C]](s32) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT2]](s16) + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST4]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[COPY1]](s32) + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32) + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST5]](s32) + ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY2]](s32), [[COPY3]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>), [[BUILD_VECTOR_TRUNC2]](<2 x s16>) + ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>) %0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2 %1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0 %3:_(<3 x s16>) = G_USHLSAT %1, %2 @@ -536,8 +664,8 @@ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[AND]](s32) ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR4]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C2]], [[SHL1]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR4]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C2]], [[SHL1]] ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SELECT]], [[C]](s32) ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32) @@ -545,8 +673,8 @@ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[SHL2]], [[AND1]](s32) ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SHL3]], [[AND1]](s32) - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR6]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C2]], [[SHL3]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL2]](s32), [[LSHR6]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C2]], [[SHL3]] ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[SELECT1]], [[C]](s32) ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32) ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32) @@ -554,8 +682,8 @@ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32) ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[SHL4]], [[AND2]](s32) ; GFX6: [[LSHR8:%[0-9]+]]:_(s32) = G_LSHR [[SHL5]], [[AND2]](s32) - ; GFX6: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL4]](s32), [[LSHR8]] - ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s32), [[C2]], [[SHL5]] + ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL4]](s32), [[LSHR8]] + ; GFX6: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[C2]], [[SHL5]] ; GFX6: [[LSHR9:%[0-9]+]]:_(s32) = G_LSHR [[SELECT2]], [[C]](s32) ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32) ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32) @@ -563,8 +691,8 @@ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C]](s32) ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[SHL6]], [[AND3]](s32) ; GFX6: [[LSHR10:%[0-9]+]]:_(s32) = G_LSHR [[SHL7]], [[AND3]](s32) - ; GFX6: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SHL6]](s32), [[LSHR10]] - ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s32), [[C2]], [[SHL7]] + ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL6]](s32), [[LSHR10]] + ; GFX6: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[C2]], [[SHL7]] ; GFX6: [[LSHR11:%[0-9]+]]:_(s32) = G_LSHR [[SELECT3]], [[C]](s32) ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32) ; GFX6: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]] @@ -585,33 +713,101 @@ ; GFX8-LABEL: name: ushlsat_v4s16 ; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 - ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) - ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) - ; GFX8: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV]], [[UV4]](s16) - ; GFX8: [[USHLSAT1:%[0-9]+]]:_(s16) = G_USHLSAT [[UV1]], [[UV5]](s16) - ; GFX8: [[USHLSAT2:%[0-9]+]]:_(s16) = G_USHLSAT [[UV2]], [[UV6]](s16) - ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV3]], [[UV7]](s16) - ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV7]](s16) - ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV3]](s16), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[USHLSAT1]](s16), [[USHLSAT2]](s16), [[SELECT]](s16) - ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>) + ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32) + ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>) + ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC4]](s16) + ; GFX8: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC4]](s16) + ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR4]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC5]](s16) + ; GFX8: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC5]](s16) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR5]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC6]](s16) + ; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL2]], [[TRUNC6]](s16) + ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[LSHR6]] + ; GFX8: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[SHL2]] + ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[TRUNC7]](s16) + ; GFX8: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SHL3]], [[TRUNC7]](s16) + ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC3]](s16), [[LSHR7]] + ; GFX8: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[C1]], [[SHL3]] + ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT]](s16) + ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT1]](s16) + ; GFX8: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32) + ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL4]] + ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32) + ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT2]](s16) + ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[SELECT3]](s16) + ; GFX8: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32) + ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL5]] + ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32) + ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>) + ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) ; GFX9-LABEL: name: ushlsat_v4s16 ; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3 - ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) - ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) - ; GFX9: [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[UV]], [[UV4]](s16) - ; GFX9: [[USHLSAT1:%[0-9]+]]:_(s16) = G_USHLSAT [[UV1]], [[UV5]](s16) - ; GFX9: [[USHLSAT2:%[0-9]+]]:_(s16) = G_USHLSAT [[UV2]], [[UV6]](s16) - ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[UV3]], [[UV7]](s16) - ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[UV7]](s16) - ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s16) = G_ICMP intpred(ne), [[UV3]](s16), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s16), [[C]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[USHLSAT]](s16), [[USHLSAT1]](s16), [[USHLSAT2]](s16), [[SELECT]](s16) - ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>) + ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>) + ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32) + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32) + ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>) + ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32) + ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32) + ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>) + ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>) + ; GFX9: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32) + ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32) + ; GFX9: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32) + ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>) + ; GFX9: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32) + ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32) + ; GFX9: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC4]](s16) + ; GFX9: [[LSHR4:%[0-9]+]]:_(s16) = G_LSHR [[SHL]], [[TRUNC4]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1 + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC]](s16), [[LSHR4]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[ICMP]](s1), [[C1]], [[SHL]] + ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC5]](s16) + ; GFX9: [[LSHR5:%[0-9]+]]:_(s16) = G_LSHR [[SHL1]], [[TRUNC5]](s16) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC1]](s16), [[LSHR5]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[ICMP1]](s1), [[C1]], [[SHL1]] + ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC6]](s16) + ; GFX9: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SHL2]], [[TRUNC6]](s16) + ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC2]](s16), [[LSHR6]] + ; GFX9: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[ICMP2]](s1), [[C1]], [[SHL2]] + ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[TRUNC7]](s16) + ; GFX9: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SHL3]], [[TRUNC7]](s16) + ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[TRUNC3]](s16), [[LSHR7]] + ; GFX9: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[ICMP3]](s1), [[C1]], [[SHL3]] + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT]](s16) + ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT1]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[ANYEXT1]](s32) + ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT2]](s16) + ; GFX9: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[SELECT3]](s16) + ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT2]](s32), [[ANYEXT3]](s32) + ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR_TRUNC]](<2 x s16>), [[BUILD_VECTOR_TRUNC1]](<2 x s16>) + ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>) %0:_(<4 x s16>) = COPY $vgpr0_vgpr1 %1:_(<4 x s16>) = COPY $vgpr2_vgpr3 %2:_(<4 x s16>) = G_USHLSAT %0, %1 @@ -630,8 +826,8 @@ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[COPY1]](s32) ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX6: $vgpr0 = COPY [[SELECT]](s32) ; GFX8-LABEL: name: ushlsat_s32 ; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -639,8 +835,8 @@ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[COPY1]](s32) ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX8: $vgpr0 = COPY [[SELECT]](s32) ; GFX9-LABEL: name: ushlsat_s32 ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 @@ -648,8 +844,8 @@ ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[COPY1]](s32) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX9: $vgpr0 = COPY [[SELECT]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 @@ -671,12 +867,12 @@ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[UV2]](s32) ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[UV2]](s32) ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[UV3]](s32) - ; GFX6: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] - ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C]], [[SHL1]] + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32) ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX8-LABEL: name: ushlsat_v2s32 @@ -687,12 +883,12 @@ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[UV2]](s32) ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[UV2]](s32) ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[UV3]](s32) - ; GFX8: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] - ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C]], [[SHL1]] + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32) ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) ; GFX9-LABEL: name: ushlsat_v2s32 @@ -703,12 +899,12 @@ ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[UV]], [[UV2]](s32) ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SHL]], [[UV2]](s32) ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s32), [[C]], [[SHL]] + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX9: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[UV1]], [[UV3]](s32) ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[SHL1]], [[UV3]](s32) - ; GFX9: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] - ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s32), [[C]], [[SHL1]] + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[LSHR1]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32) ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 @@ -726,29 +922,32 @@ ; GFX6-LABEL: name: ushlsat_s64 ; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[COPY1]](s64) + ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT]](s64) ; GFX8-LABEL: name: ushlsat_s64 ; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[COPY1]](s64) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT]](s64) ; GFX9-LABEL: name: ushlsat_s64 ; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1 ; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 - ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) - ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[COPY1]](s64) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) + ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s64), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT]](s64) %0:_(s64) = COPY $vgpr0_vgpr1 %1:_(s64) = COPY $vgpr2_vgpr3 @@ -767,39 +966,54 @@ ; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX6: [[USHLSAT:%[0-9]+]]:_(s64) = G_USHLSAT [[UV]], [[UV2]](s64) - ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[UV3]](s64) + ; GFX6: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX6: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX6: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR]] - ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] - ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USHLSAT]](s64), [[SELECT]](s64) + ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[LSHR]] + ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] + ; GFX6: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX6: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX6: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[SHL1]], [[TRUNC1]](s32) + ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR1]] + ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] + ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64) ; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX8-LABEL: name: ushlsat_v2s64 ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX8: [[USHLSAT:%[0-9]+]]:_(s64) = G_USHLSAT [[UV]], [[UV2]](s64) - ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[UV3]](s64) + ; GFX8: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX8: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX8: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX8: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR]] - ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] - ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USHLSAT]](s64), [[SELECT]](s64) + ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[LSHR]] + ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] + ; GFX8: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX8: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX8: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[SHL1]], [[TRUNC1]](s32) + ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR1]] + ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] + ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64) ; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) ; GFX9-LABEL: name: ushlsat_v2s64 ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) - ; GFX9: [[USHLSAT:%[0-9]+]]:_(s64) = G_USHLSAT [[UV]], [[UV2]](s64) - ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[UV3]](s64) - ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[UV3]](s64) + ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV2]](s64) + ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s32) + ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[SHL]], [[TRUNC]](s32) ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 - ; GFX9: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR]] - ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s64), [[C]], [[SHL]] - ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USHLSAT]](s64), [[SELECT]](s64) + ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[LSHR]] + ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[SHL]] + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64) + ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC1]](s32) + ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[SHL1]], [[TRUNC1]](s32) + ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[LSHR1]] + ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[SHL1]] + ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64) ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7