diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -169,6 +169,10 @@ getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) + .clampNumElements(0, v8s8, v16s8) + .clampNumElements(0, v4s16, v8s16) + .clampNumElements(0, v2s32, v4s32) + .clampNumElements(0, v2s64, v2s64) .lower(); getActionDefinitionsBuilder( diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir @@ -42,6 +42,34 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v32s8_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v32s8_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[SMIN:%[0-9]+]]:_(<16 x s8>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMIN1:%[0-9]+]]:_(<16 x s8>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) + %vec:_(<32 x s8>) = G_IMPLICIT_DEF + %vec1:_(<32 x s8>) = G_IMPLICIT_DEF + %smin:_(<32 x s8>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<32 x s8>), %1(p0) :: (store (<32 x s8>)) + ... --- name: v4s16_smin @@ -84,6 +112,34 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v16s16_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v16s16_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[SMIN:%[0-9]+]]:_(<8 x s16>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMIN1:%[0-9]+]]:_(<8 x s16>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) + %vec:_(<16 x s16>) = G_IMPLICIT_DEF + %vec1:_(<16 x s16>) = G_IMPLICIT_DEF + %smin:_(<16 x s16>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<16 x s16>), %1(p0) :: (store (<16 x s16>)) + ... --- name: v2s32_smin @@ -126,6 +182,34 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v8s32_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[SMIN:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMIN1:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %smin:_(<8 x s32>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_smin @@ -158,6 +242,50 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v4s64_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %smin:_(<4 x s64>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_umin @@ -200,6 +328,34 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v32s8_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v32s8_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[UMIN:%[0-9]+]]:_(<16 x s8>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMIN1:%[0-9]+]]:_(<16 x s8>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) + %vec:_(<32 x s8>) = G_IMPLICIT_DEF + %vec1:_(<32 x s8>) = G_IMPLICIT_DEF + %umin:_(<32 x s8>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<32 x s8>), %1(p0) :: (store (<32 x s8>)) + ... --- name: v4s16_umin @@ -242,6 +398,34 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v16s16_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v16s16_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[UMIN:%[0-9]+]]:_(<8 x s16>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMIN1:%[0-9]+]]:_(<8 x s16>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) + %vec:_(<16 x s16>) = G_IMPLICIT_DEF + %vec1:_(<16 x s16>) = G_IMPLICIT_DEF + %umin:_(<16 x s16>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<16 x s16>), %1(p0) :: (store (<16 x s16>)) + ... --- name: v2s32_umin @@ -284,6 +468,34 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v8s32_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[UMIN:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMIN1:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %umin:_(<8 x s32>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_umin @@ -316,6 +528,50 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v4s64_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %umin:_(<4 x s64>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_smax @@ -379,6 +635,34 @@ $x0 = COPY %smax RET_ReallyLR implicit $x0 +... +--- +name: v32s8_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v32s8_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[SMAX:%[0-9]+]]:_(<16 x s8>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMAX1:%[0-9]+]]:_(<16 x s8>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) + %vec:_(<32 x s8>) = G_IMPLICIT_DEF + %vec1:_(<32 x s8>) = G_IMPLICIT_DEF + %smax:_(<32 x s8>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<32 x s8>), %1(p0) :: (store (<32 x s8>)) + ... --- name: v8s16_smax @@ -400,6 +684,34 @@ $q0 = COPY %smax RET_ReallyLR implicit $q0 +... +--- +name: v16s16_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v16s16_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[SMAX:%[0-9]+]]:_(<8 x s16>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMAX1:%[0-9]+]]:_(<8 x s16>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) + %vec:_(<16 x s16>) = G_IMPLICIT_DEF + %vec1:_(<16 x s16>) = G_IMPLICIT_DEF + %smax:_(<16 x s16>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<16 x s16>), %1(p0) :: (store (<16 x s16>)) + ... --- name: v2s32_smax @@ -442,6 +754,34 @@ $q0 = COPY %smax RET_ReallyLR implicit $q0 +... +--- +name: v8s32_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[SMAX:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMAX1:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %smax:_(<8 x s32>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_smax @@ -474,6 +814,50 @@ $q0 = COPY %smax RET_ReallyLR implicit $q0 +... +--- +name: v4s64_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %smax:_(<4 x s64>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_umax @@ -516,6 +900,34 @@ $q0 = COPY %umax RET_ReallyLR implicit $q0 +... +--- +name: v32s8_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v32s8_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8) + ; CHECK: [[UMAX:%[0-9]+]]:_(<16 x s8>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMAX1:%[0-9]+]]:_(<16 x s8>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16) + %vec:_(<32 x s8>) = G_IMPLICIT_DEF + %vec1:_(<32 x s8>) = G_IMPLICIT_DEF + %umax:_(<32 x s8>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<32 x s8>), %1(p0) :: (store (<32 x s8>)) + ... --- name: v4s16_umax @@ -558,6 +970,34 @@ $q0 = COPY %umax RET_ReallyLR implicit $q0 +... +--- +name: v16s16_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v16s16_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16) + ; CHECK: [[UMAX:%[0-9]+]]:_(<8 x s16>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMAX1:%[0-9]+]]:_(<8 x s16>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16) + %vec:_(<16 x s16>) = G_IMPLICIT_DEF + %vec1:_(<16 x s16>) = G_IMPLICIT_DEF + %umax:_(<16 x s16>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<16 x s16>), %1(p0) :: (store (<16 x s16>)) + ... --- name: v2s32_umax @@ -600,6 +1040,34 @@ $q0 = COPY %umax RET_ReallyLR implicit $q0 +... +--- +name: v8s32_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[UMAX:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMAX1:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %umax:_(<8 x s32>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_umax @@ -633,4 +1101,48 @@ RET_ReallyLR implicit $q0 ... +--- +name: v4s64_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %umax:_(<4 x s64>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + +... diff --git a/llvm/test/CodeGen/AArch64/min-max.ll b/llvm/test/CodeGen/AArch64/min-max.ll --- a/llvm/test/CodeGen/AArch64/min-max.ll +++ b/llvm/test/CodeGen/AArch64/min-max.ll @@ -78,6 +78,20 @@ ret <16 x i8> %c } +declare <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone + +define void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) { +; CHECK-LABEL: smax32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: smax v0.16b, v0.16b, v2.16b +; CHECK-NEXT: smax v1.16b, v1.16b, v3.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b) + store <32 x i8> %c, <32 x i8>* %p + ret void +} + declare <4 x i16> @llvm.smax.v4i16(<4 x i16> %a, <4 x i16> %b) readnone define <4 x i16> @smax4i16(<4 x i16> %a, <4 x i16> %b) { @@ -100,6 +114,20 @@ ret <8 x i16> %c } +declare <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone + +define void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) { +; CHECK-LABEL: smax16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: smax v0.8h, v0.8h, v2.8h +; CHECK-NEXT: smax v1.8h, v1.8h, v3.8h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b) + store <16 x i16> %c, <16 x i16>* %p + ret void +} + declare <2 x i32> @llvm.smax.v2i32(<2 x i32> %a, <2 x i32> %b) readnone define <2 x i32> @smax2i32(<2 x i32> %a, <2 x i32> %b) { @@ -122,6 +150,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: smax8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-NEXT: smax v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.smax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @smax1i64(<1 x i64> %a, <1 x i64> %b) { @@ -159,6 +201,38 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: smax4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, v2.d[1] +; CHECK-NEXT: mov x9, v0.d[1] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: csel x8, x9, x8, gt +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: mov x9, v3.d[1] +; CHECK-NEXT: csel x10, x11, x10, gt +; CHECK-NEXT: mov x11, v1.d[1] +; CHECK-NEXT: cmp x11, x9 +; CHECK-NEXT: fmov d0, x10 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: csel x9, x11, x9, gt +; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: csel x10, x11, x10, gt +; CHECK-NEXT: fmov d1, x10 +; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: mov v1.d[1], x9 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.umax.i8(i8 %a, i8 %b) readnone define i8 @umaxi8(i8 %a, i8 %b) { @@ -233,6 +307,20 @@ ret <16 x i8> %c } +declare <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone + +define void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) { +; CHECK-LABEL: umax32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: umax v0.16b, v0.16b, v2.16b +; CHECK-NEXT: umax v1.16b, v1.16b, v3.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b) + store <32 x i8> %c, <32 x i8>* %p + ret void +} + declare <4 x i16> @llvm.umax.v4i16(<4 x i16> %a, <4 x i16> %b) readnone define <4 x i16> @umax4i16(<4 x i16> %a, <4 x i16> %b) { @@ -255,6 +343,20 @@ ret <8 x i16> %c } +declare <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone + +define void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) { +; CHECK-LABEL: umax16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: umax v0.8h, v0.8h, v2.8h +; CHECK-NEXT: umax v1.8h, v1.8h, v3.8h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b) + store <16 x i16> %c, <16 x i16>* %p + ret void +} + declare <2 x i32> @llvm.umax.v2i32(<2 x i32> %a, <2 x i32> %b) readnone define <2 x i32> @umax2i32(<2 x i32> %a, <2 x i32> %b) { @@ -277,6 +379,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: umax8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umax v0.4s, v0.4s, v2.4s +; CHECK-NEXT: umax v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.umax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @umax1i64(<1 x i64> %a, <1 x i64> %b) { @@ -306,6 +422,22 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: umax4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: uqsub v2.2d, v2.2d, v0.2d +; CHECK-NEXT: uqsub v3.2d, v3.2d, v1.2d +; CHECK-NEXT: add v0.2d, v0.2d, v2.2d +; CHECK-NEXT: add v1.2d, v1.2d, v3.2d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.smin.i8(i8 %a, i8 %b) readnone define i8 @smini8(i8 %a, i8 %b) { @@ -380,6 +512,20 @@ ret <16 x i8> %c } +declare <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone + +define void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) { +; CHECK-LABEL: smin32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: smin v0.16b, v0.16b, v2.16b +; CHECK-NEXT: smin v1.16b, v1.16b, v3.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b) + store <32 x i8> %c, <32 x i8>* %p + ret void +} + declare <4 x i16> @llvm.smin.v4i16(<4 x i16> %a, <4 x i16> %b) readnone define <4 x i16> @smin4i16(<4 x i16> %a, <4 x i16> %b) { @@ -402,6 +548,20 @@ ret <8 x i16> %c } +declare <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone + +define void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) { +; CHECK-LABEL: smin16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: smin v0.8h, v0.8h, v2.8h +; CHECK-NEXT: smin v1.8h, v1.8h, v3.8h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b) + store <16 x i16> %c, <16 x i16>* %p + ret void +} + declare <2 x i32> @llvm.smin.v2i32(<2 x i32> %a, <2 x i32> %b) readnone define <2 x i32> @smin2i32(<2 x i32> %a, <2 x i32> %b) { @@ -424,6 +584,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: smin8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-NEXT: smin v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.smin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @smin1i64(<1 x i64> %a, <1 x i64> %b) { @@ -461,6 +635,38 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: smin4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, v2.d[1] +; CHECK-NEXT: mov x9, v0.d[1] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: csel x8, x9, x8, lt +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: mov x9, v3.d[1] +; CHECK-NEXT: csel x10, x11, x10, lt +; CHECK-NEXT: mov x11, v1.d[1] +; CHECK-NEXT: cmp x11, x9 +; CHECK-NEXT: fmov d0, x10 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: csel x9, x11, x9, lt +; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: csel x10, x11, x10, lt +; CHECK-NEXT: fmov d1, x10 +; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: mov v1.d[1], x9 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.umin.i8(i8 %a, i8 %b) readnone define i8 @umini8(i8 %a, i8 %b) { @@ -535,6 +741,20 @@ ret <16 x i8> %c } +declare <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone + +define void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) { +; CHECK-LABEL: umin32i8: +; CHECK: // %bb.0: +; CHECK-NEXT: umin v0.16b, v0.16b, v2.16b +; CHECK-NEXT: umin v1.16b, v1.16b, v3.16b +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b) + store <32 x i8> %c, <32 x i8>* %p + ret void +} + declare <4 x i16> @llvm.umin.v4i16(<4 x i16> %a, <4 x i16> %b) readnone define <4 x i16> @umin4i16(<4 x i16> %a, <4 x i16> %b) { @@ -557,6 +777,20 @@ ret <8 x i16> %c } +declare <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone + +define void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) { +; CHECK-LABEL: umin16i16: +; CHECK: // %bb.0: +; CHECK-NEXT: umin v0.8h, v0.8h, v2.8h +; CHECK-NEXT: umin v1.8h, v1.8h, v3.8h +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b) + store <16 x i16> %c, <16 x i16>* %p + ret void +} + declare <2 x i32> @llvm.umin.v2i32(<2 x i32> %a, <2 x i32> %b) readnone define <2 x i32> @umin2i32(<2 x i32> %a, <2 x i32> %b) { @@ -579,6 +813,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: umin8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-NEXT: umin v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.umin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @umin1i64(<1 x i64> %a, <1 x i64> %b) { @@ -607,3 +855,19 @@ %c = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a, <2 x i64> %b) ret <2 x i64> %c } + +declare <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: umin4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: uqsub v2.2d, v0.2d, v2.2d +; CHECK-NEXT: uqsub v3.2d, v1.2d, v3.2d +; CHECK-NEXT: sub v0.2d, v0.2d, v2.2d +; CHECK-NEXT: sub v1.2d, v1.2d, v3.2d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +}