diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -168,6 +168,7 @@
   getActionDefinitionsBuilder({G_SMULH, G_UMULH}).legalFor({s32, s64});
 
   getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
+      .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
       .lowerIf([=](const LegalityQuery &Q) { return Q.Types[0].isScalar(); });
 
   getActionDefinitionsBuilder(
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
@@ -0,0 +1,130 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name:            v8s8_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v8s8_smin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<8 x s8>) = G_SMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %smin(<8 x s8>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<8 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s8>) = G_IMPLICIT_DEF
+    %smin:_(<8 x s8>) = G_SMIN %vec, %vec1
+    $x0 = COPY %smin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v16s8_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v16s8_smin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<16 x s8>) = G_SMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %smin(<16 x s8>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<16 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s8>) = G_IMPLICIT_DEF
+    %smin:_(<16 x s8>) = G_SMIN %vec, %vec1
+    $q0 = COPY %smin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v4s16_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v4s16_smin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<4 x s16>) = G_SMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %smin(<4 x s16>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<4 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s16>) = G_IMPLICIT_DEF
+    %smin:_(<4 x s16>) = G_SMIN %vec, %vec1
+    $x0 = COPY %smin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v8s16_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v8s16_smin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<8 x s16>) = G_SMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %smin(<8 x s16>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<8 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s16>) = G_IMPLICIT_DEF
+    %smin:_(<8 x s16>) = G_SMIN %vec, %vec1
+    $q0 = COPY %smin
+    RET_ReallyLR implicit $q0
+
+...
+---
+name:            v2s32_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0
+
+    ; CHECK-LABEL: name: v2s32_smin
+    ; CHECK: liveins: $x0
+    ; CHECK: %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<2 x s32>) = G_SMIN %vec, %vec1
+    ; CHECK: $x0 = COPY %smin(<2 x s32>)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %vec:_(<2 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<2 x s32>) = G_IMPLICIT_DEF
+    %smin:_(<2 x s32>) = G_SMIN %vec, %vec1
+    $x0 = COPY %smin
+    RET_ReallyLR implicit $x0
+
+...
+---
+name:            v4s32_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $q0
+
+    ; CHECK-LABEL: name: v4s32_smin
+    ; CHECK: liveins: $q0
+    ; CHECK: %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    ; CHECK: %smin:_(<4 x s32>) = G_SMIN %vec, %vec1
+    ; CHECK: $q0 = COPY %smin(<4 x s32>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %vec:_(<4 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s32>) = G_IMPLICIT_DEF
+    %smin:_(<4 x s32>) = G_SMIN %vec, %vec1
+    $q0 = COPY %smin
+    RET_ReallyLR implicit $q0
+
+...
+