diff --git a/llvm/include/llvm/ADT/Hashing.h b/llvm/include/llvm/ADT/Hashing.h
--- a/llvm/include/llvm/ADT/Hashing.h
+++ b/llvm/include/llvm/ADT/Hashing.h
@@ -51,6 +51,7 @@
 #include <algorithm>
 #include <cassert>
 #include <cstring>
+#include <iterator>
 #include <string>
 #include <tuple>
 #include <utility>
@@ -267,51 +268,20 @@
   /// Create a new hash_state structure and initialize it based on the
   /// seed and the first 64-byte chunk.
   /// This effectively performs the initial mix.
-  static hash_state create(const char *s, uint64_t seed) {
-    hash_state state = {
-      0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
-      seed * k1, shift_mix(seed), 0 };
-    state.h6 = hash_16_bytes(state.h4, state.h5);
-    state.mix(s);
-    return state;
-  }
+  static hash_state create(const char *s, uint64_t seed);
 
   /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
   /// and 'b', including whatever is already in 'a' and 'b'.
-  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
-    a += fetch64(s);
-    uint64_t c = fetch64(s + 24);
-    b = rotate(b + a + c, 21);
-    uint64_t d = a;
-    a += fetch64(s + 8) + fetch64(s + 16);
-    b += rotate(a, 44) + d;
-    a += c;
-  }
+  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b);
 
   /// Mix in a 64-byte buffer of data.
   /// We mix all 64 bytes even when the chunk length is smaller, but we
   /// record the actual length.
-  void mix(const char *s) {
-    h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
-    h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
-    h0 ^= h6;
-    h1 += h3 + fetch64(s + 40);
-    h2 = rotate(h2 + h5, 33) * k1;
-    h3 = h4 * k1;
-    h4 = h0 + h5;
-    mix_32_bytes(s, h3, h4);
-    h5 = h2 + h6;
-    h6 = h1 + fetch64(s + 16);
-    mix_32_bytes(s + 32, h5, h6);
-    std::swap(h2, h0);
-  }
+  void mix(const char *s);
 
   /// Compute the final 64-bit hash code value based on the current
   /// state and the length of bytes hashed.
-  uint64_t finalize(size_t length) {
-    return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
-                         hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
-  }
+  uint64_t finalize(size_t length);
 };
 
 
@@ -568,24 +538,7 @@
   /// The base case when combining arguments recursively is reached when all
   /// arguments have been handled. It flushes the remaining buffer and
   /// constructs a hash_code.
-  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
-    // Check whether the entire set of values fit in the buffer. If so, we'll
-    // use the optimized short hashing routine and skip state entirely.
-    if (length == 0)
-      return hash_short(buffer, buffer_ptr - buffer, seed);
-
-    // Mix the final buffer, rotating it if we did a partial fill in order to
-    // simulate doing a mix of the last 64-bytes. That is how the algorithm
-    // works when we have a contiguous byte sequence, and we want to emulate
-    // that here.
-    std::rotate(buffer, buffer_ptr, buffer_end);
-
-    // Mix this chunk into the current state.
-    state.mix(buffer);
-    length += buffer_ptr - buffer;
-
-    return state.finalize(length);
-  }
+  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end);
 };
 
 } // namespace detail
diff --git a/llvm/lib/Support/Hashing.cpp b/llvm/lib/Support/Hashing.cpp
--- a/llvm/lib/Support/Hashing.cpp
+++ b/llvm/lib/Support/Hashing.cpp
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/ADT/Hashing.h"
+#include <algorithm>
 
 using namespace llvm;
 
@@ -26,3 +27,71 @@
 void llvm::set_fixed_execution_hash_seed(uint64_t fixed_value) {
   hashing::detail::fixed_seed_override = fixed_value;
 }
+
+namespace llvm::hashing::detail {
+
+hash_code hash_combine_recursive_helper::combine(size_t length,
+                                                 char *buffer_ptr,
+                                                 char *buffer_end) {
+  // Check whether the entire set of values fit in the buffer. If so, we'll
+  // use the optimized short hashing routine and skip state entirely.
+  if (length == 0)
+    return hash_short(buffer, buffer_ptr - buffer, seed);
+
+  // Mix the final buffer, rotating it if we did a partial fill in order to
+  // simulate doing a mix of the last 64-bytes. That is how the algorithm
+  // works when we have a contiguous byte sequence, and we want to emulate
+  // that here.
+  std::rotate(buffer, buffer_ptr, buffer_end);
+
+  // Mix this chunk into the current state.
+  state.mix(buffer);
+  length += buffer_ptr - buffer;
+
+  return state.finalize(length);
+}
+
+hash_state hash_state::create(const char *s, uint64_t seed) {
+  hash_state state = {0,
+                      seed,
+                      hash_16_bytes(seed, k1),
+                      rotate(seed ^ k1, 49),
+                      seed * k1,
+                      shift_mix(seed),
+                      0};
+  state.h6 = hash_16_bytes(state.h4, state.h5);
+  state.mix(s);
+  return state;
+}
+
+void hash_state::mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+  a += fetch64(s);
+  uint64_t c = fetch64(s + 24);
+  b = rotate(b + a + c, 21);
+  uint64_t d = a;
+  a += fetch64(s + 8) + fetch64(s + 16);
+  b += rotate(a, 44) + d;
+  a += c;
+}
+
+void hash_state::mix(const char *s) {
+  h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+  h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+  h0 ^= h6;
+  h1 += h3 + fetch64(s + 40);
+  h2 = rotate(h2 + h5, 33) * k1;
+  h3 = h4 * k1;
+  h4 = h0 + h5;
+  mix_32_bytes(s, h3, h4);
+  h5 = h2 + h6;
+  h6 = h1 + fetch64(s + 16);
+  mix_32_bytes(s + 32, h5, h6);
+  std::swap(h2, h0);
+}
+
+uint64_t hash_state::finalize(size_t length) {
+  return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+                       hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+}
+
+} // namespace llvm::hashing::detail