diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp @@ -169,6 +169,8 @@ getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX}) .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32}) + .clampNumElements(0, v2s32, v4s32) + .clampNumElements(0, v2s64, v2s64) .lower(); getActionDefinitionsBuilder( diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir @@ -126,6 +126,34 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v8s32_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[SMIN:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMIN1:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %smin:_(<8 x s32>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_smin @@ -158,6 +186,50 @@ $q0 = COPY %smin RET_ReallyLR implicit $q0 +... +--- +name: v4s64_smin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_smin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %smin:_(<4 x s64>) = G_SMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smin(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_umin @@ -284,6 +356,34 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v8s32_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[UMIN:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMIN1:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %umin:_(<8 x s32>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_umin @@ -316,6 +416,50 @@ $q0 = COPY %umin RET_ReallyLR implicit $q0 +... +--- +name: v4s64_umin +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_umin + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %umin:_(<4 x s64>) = G_UMIN %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umin(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_smax @@ -442,6 +586,34 @@ $q0 = COPY %smax RET_ReallyLR implicit $q0 +... +--- +name: v8s32_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[SMAX:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[SMAX1:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[SMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[SMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %smax:_(<8 x s32>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_smax @@ -474,6 +646,50 @@ $q0 = COPY %smax RET_ReallyLR implicit $q0 +... +--- +name: v4s64_smax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_smax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %smax:_(<4 x s64>) = G_SMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %smax(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + ... --- name: v8s8_umax @@ -600,6 +816,34 @@ $q0 = COPY %umax RET_ReallyLR implicit $q0 +... +--- +name: v8s32_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v8s32_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32) + ; CHECK: [[UMAX:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]] + ; CHECK: [[UMAX1:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]] + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[UMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64) + ; CHECK: G_STORE [[UMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16) + %vec:_(<8 x s32>) = G_IMPLICIT_DEF + %vec1:_(<8 x s32>) = G_IMPLICIT_DEF + %umax:_(<8 x s32>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<8 x s32>), %1(p0) :: (store (<8 x s32>)) + ... --- name: v2s64_umax @@ -633,4 +877,48 @@ RET_ReallyLR implicit $q0 ... +--- +name: v4s64_umax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $q0, $q1 + + ; CHECK-LABEL: name: v4s64_umax + ; CHECK: liveins: $x0, $q0, $q1 + ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF + ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>) + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63 + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>) + ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 + ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]] + ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]] + ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] + ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) + ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>) + ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) + ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]] + ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]] + ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] + ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] + ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32) + ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64) + ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16) + %vec:_(<4 x s64>) = G_IMPLICIT_DEF + %vec1:_(<4 x s64>) = G_IMPLICIT_DEF + %umax:_(<4 x s64>) = G_UMAX %vec, %vec1 + %1:_(p0) = COPY $x0 + G_STORE %umax(<4 x s64>), %1(p0) :: (store (<4 x s64>)) + +... diff --git a/llvm/test/CodeGen/AArch64/min-max.ll b/llvm/test/CodeGen/AArch64/min-max.ll --- a/llvm/test/CodeGen/AArch64/min-max.ll +++ b/llvm/test/CodeGen/AArch64/min-max.ll @@ -122,6 +122,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: smax8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: smax v0.4s, v0.4s, v2.4s +; CHECK-NEXT: smax v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.smax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @smax1i64(<1 x i64> %a, <1 x i64> %b) { @@ -159,6 +173,38 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: smax4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, v2.d[1] +; CHECK-NEXT: mov x9, v0.d[1] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: csel x8, x9, x8, gt +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: mov x9, v3.d[1] +; CHECK-NEXT: csel x10, x11, x10, gt +; CHECK-NEXT: mov x11, v1.d[1] +; CHECK-NEXT: cmp x11, x9 +; CHECK-NEXT: fmov d0, x10 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: csel x9, x11, x9, gt +; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: csel x10, x11, x10, gt +; CHECK-NEXT: fmov d1, x10 +; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: mov v1.d[1], x9 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.umax.i8(i8 %a, i8 %b) readnone define i8 @umaxi8(i8 %a, i8 %b) { @@ -277,6 +323,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: umax8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umax v0.4s, v0.4s, v2.4s +; CHECK-NEXT: umax v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.umax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @umax1i64(<1 x i64> %a, <1 x i64> %b) { @@ -306,6 +366,22 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: umax4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: uqsub v2.2d, v2.2d, v0.2d +; CHECK-NEXT: uqsub v3.2d, v3.2d, v1.2d +; CHECK-NEXT: add v0.2d, v0.2d, v2.2d +; CHECK-NEXT: add v1.2d, v1.2d, v3.2d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.smin.i8(i8 %a, i8 %b) readnone define i8 @smini8(i8 %a, i8 %b) { @@ -424,6 +500,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: smin8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: smin v0.4s, v0.4s, v2.4s +; CHECK-NEXT: smin v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.smin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @smin1i64(<1 x i64> %a, <1 x i64> %b) { @@ -461,6 +551,38 @@ ret <2 x i64> %c } +declare <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: smin4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov x8, v2.d[1] +; CHECK-NEXT: mov x9, v0.d[1] +; CHECK-NEXT: fmov x10, d2 +; CHECK-NEXT: fmov x11, d0 +; CHECK-NEXT: cmp x9, x8 +; CHECK-NEXT: csel x8, x9, x8, lt +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: mov x9, v3.d[1] +; CHECK-NEXT: csel x10, x11, x10, lt +; CHECK-NEXT: mov x11, v1.d[1] +; CHECK-NEXT: cmp x11, x9 +; CHECK-NEXT: fmov d0, x10 +; CHECK-NEXT: fmov x10, d3 +; CHECK-NEXT: csel x9, x11, x9, lt +; CHECK-NEXT: fmov x11, d1 +; CHECK-NEXT: cmp x11, x10 +; CHECK-NEXT: csel x10, x11, x10, lt +; CHECK-NEXT: fmov d1, x10 +; CHECK-NEXT: mov v0.d[1], x8 +; CHECK-NEXT: mov v1.d[1], x9 +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +} + declare i8 @llvm.umin.i8(i8 %a, i8 %b) readnone define i8 @umini8(i8 %a, i8 %b) { @@ -579,6 +701,20 @@ ret <4 x i32> %c } +declare <8 x i32> @llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone + +define void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) { +; CHECK-LABEL: umin8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: umin v0.4s, v0.4s, v2.4s +; CHECK-NEXT: umin v1.4s, v1.4s, v3.4s +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <8 x i32>@llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) + store <8 x i32> %c, <8 x i32>* %p + ret void +} + declare <1 x i64> @llvm.umin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone define <1 x i64> @umin1i64(<1 x i64> %a, <1 x i64> %b) { @@ -607,3 +743,19 @@ %c = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a, <2 x i64> %b) ret <2 x i64> %c } + +declare <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone + +define void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) { +; CHECK-LABEL: umin4i64: +; CHECK: // %bb.0: +; CHECK-NEXT: uqsub v2.2d, v0.2d, v2.2d +; CHECK-NEXT: uqsub v3.2d, v1.2d, v3.2d +; CHECK-NEXT: sub v0.2d, v0.2d, v2.2d +; CHECK-NEXT: sub v1.2d, v1.2d, v3.2d +; CHECK-NEXT: stp q0, q1, [x0] +; CHECK-NEXT: ret + %c = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) + store <4 x i64> %c, <4 x i64>* %p + ret void +}