diff --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -430,6 +430,11 @@
 def FeatureSaveRestore : SubtargetFeature<"save-restore", "EnableSaveRestore",
                                           "true", "Enable save/restore.">;
 
+def FeatureUnalignedScalarMem
+   : SubtargetFeature<"unaligned-scalar-mem", "EnableUnalignedScalarMem",
+                      "true", "Has reasonably performant unaligned scalar "
+                      "loads and stores">;
+
 def TuneNoDefaultUnroll
     : SubtargetFeature<"no-default-unroll", "EnableDefaultUnroll", "false",
                        "Disable default unroll preference.">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -11787,9 +11787,13 @@
 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
     bool *Fast) const {
-  if (!VT.isVector())
-    return false;
+  if (!VT.isVector()) {
+    if (Fast)
+      *Fast = false;
+    return Subtarget.enableUnalignedScalarMem();
+  }
 
+  // All vector implementations must support element alignment
   EVT ElemVT = VT.getVectorElementType();
   if (Alignment >= ElemVT.getStoreSize()) {
     if (Fast)
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -92,6 +92,7 @@
   bool EnableRVCHintInstrs = true;
   bool EnableDefaultUnroll = true;
   bool EnableSaveRestore = false;
+  bool EnableUnalignedScalarMem = false;
   unsigned XLen = 32;
   unsigned ZvlLen = 0;
   MVT XLenVT = MVT::i32;
@@ -182,6 +183,7 @@
   bool enableRVCHintInstrs() const { return EnableRVCHintInstrs; }
   bool enableDefaultUnroll() const { return EnableDefaultUnroll; }
   bool enableSaveRestore() const { return EnableSaveRestore; }
+  bool enableUnalignedScalarMem() const { return EnableUnalignedScalarMem; }
   MVT getXLenVT() const { return XLenVT; }
   unsigned getXLen() const { return XLen; }
   unsigned getFLen() const {
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -1,43 +1,60 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=BOTH,RV32I %s
+; RUN:   | FileCheck -check-prefixes=ALL,NOMISALIGN,RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=BOTH,RV64I %s
+; RUN:   | FileCheck -check-prefixes=ALL,NOMISALIGN,RV64I %s
+; RUN: llc -mtriple=riscv32 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=ALL,MISALIGN,MISALIGN-RV32I %s
+; RUN: llc -mtriple=riscv64 -mattr=+unaligned-scalar-mem -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefixes=ALL,MISALIGN,MISALIGN-RV64I %s
 
 ; A collection of cases showing codegen for unaligned loads and stores
 
 define i8 @load_i8(i8* %p) {
-; BOTH-LABEL: load_i8:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    lb a0, 0(a0)
-; BOTH-NEXT:    ret
+; ALL-LABEL: load_i8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    lb a0, 0(a0)
+; ALL-NEXT:    ret
   %res = load i8, i8* %p, align 1
   ret i8 %res
 }
 
 define i16 @load_i16(i16* %p) {
-; BOTH-LABEL: load_i16:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    lb a1, 1(a0)
-; BOTH-NEXT:    lbu a0, 0(a0)
-; BOTH-NEXT:    slli a1, a1, 8
-; BOTH-NEXT:    or a0, a1, a0
-; BOTH-NEXT:    ret
+; NOMISALIGN-LABEL: load_i16:
+; NOMISALIGN:       # %bb.0:
+; NOMISALIGN-NEXT:    lb a1, 1(a0)
+; NOMISALIGN-NEXT:    lbu a0, 0(a0)
+; NOMISALIGN-NEXT:    slli a1, a1, 8
+; NOMISALIGN-NEXT:    or a0, a1, a0
+; NOMISALIGN-NEXT:    ret
+;
+; MISALIGN-LABEL: load_i16:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    lh a0, 0(a0)
+; MISALIGN-NEXT:    ret
   %res = load i16, i16* %p, align 1
   ret i16 %res
 }
 
 define i24 @load_i24(i24* %p) {
-; BOTH-LABEL: load_i24:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    lbu a1, 1(a0)
-; BOTH-NEXT:    lbu a2, 0(a0)
-; BOTH-NEXT:    lb a0, 2(a0)
-; BOTH-NEXT:    slli a1, a1, 8
-; BOTH-NEXT:    or a1, a1, a2
-; BOTH-NEXT:    slli a0, a0, 16
-; BOTH-NEXT:    or a0, a1, a0
-; BOTH-NEXT:    ret
+; NOMISALIGN-LABEL: load_i24:
+; NOMISALIGN:       # %bb.0:
+; NOMISALIGN-NEXT:    lbu a1, 1(a0)
+; NOMISALIGN-NEXT:    lbu a2, 0(a0)
+; NOMISALIGN-NEXT:    lb a0, 2(a0)
+; NOMISALIGN-NEXT:    slli a1, a1, 8
+; NOMISALIGN-NEXT:    or a1, a1, a2
+; NOMISALIGN-NEXT:    slli a0, a0, 16
+; NOMISALIGN-NEXT:    or a0, a1, a0
+; NOMISALIGN-NEXT:    ret
+;
+; MISALIGN-LABEL: load_i24:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    lb a1, 2(a0)
+; MISALIGN-NEXT:    lhu a0, 0(a0)
+; MISALIGN-NEXT:    slli a1, a1, 16
+; MISALIGN-NEXT:    or a0, a0, a1
+; MISALIGN-NEXT:    ret
   %res = load i24, i24* %p, align 1
   ret i24 %res
 }
@@ -70,6 +87,11 @@
 ; RV64I-NEXT:    slli a0, a0, 16
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; MISALIGN-LABEL: load_i32:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    lw a0, 0(a0)
+; MISALIGN-NEXT:    ret
   %res = load i32, i32* %p, align 1
   ret i32 %res
 }
@@ -125,54 +147,83 @@
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; MISALIGN-RV32I-LABEL: load_i64:
+; MISALIGN-RV32I:       # %bb.0:
+; MISALIGN-RV32I-NEXT:    lw a2, 0(a0)
+; MISALIGN-RV32I-NEXT:    lw a1, 4(a0)
+; MISALIGN-RV32I-NEXT:    mv a0, a2
+; MISALIGN-RV32I-NEXT:    ret
+;
+; MISALIGN-RV64I-LABEL: load_i64:
+; MISALIGN-RV64I:       # %bb.0:
+; MISALIGN-RV64I-NEXT:    ld a0, 0(a0)
+; MISALIGN-RV64I-NEXT:    ret
   %res = load i64, i64* %p, align 1
   ret i64 %res
 }
 
 define void @store_i8(i8* %p, i8 %v) {
-; BOTH-LABEL: store_i8:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    sb a1, 0(a0)
-; BOTH-NEXT:    ret
+; ALL-LABEL: store_i8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    sb a1, 0(a0)
+; ALL-NEXT:    ret
   store i8 %v, i8* %p, align 1
   ret void
 }
 
 define void @store_i16(i16* %p, i16 %v) {
-; BOTH-LABEL: store_i16:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    sb a1, 0(a0)
-; BOTH-NEXT:    srli a1, a1, 8
-; BOTH-NEXT:    sb a1, 1(a0)
-; BOTH-NEXT:    ret
+; NOMISALIGN-LABEL: store_i16:
+; NOMISALIGN:       # %bb.0:
+; NOMISALIGN-NEXT:    sb a1, 0(a0)
+; NOMISALIGN-NEXT:    srli a1, a1, 8
+; NOMISALIGN-NEXT:    sb a1, 1(a0)
+; NOMISALIGN-NEXT:    ret
+;
+; MISALIGN-LABEL: store_i16:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    sh a1, 0(a0)
+; MISALIGN-NEXT:    ret
   store i16 %v, i16* %p, align 1
   ret void
 }
 
 define void @store_i24(i24* %p, i24 %v) {
-; BOTH-LABEL: store_i24:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    sb a1, 0(a0)
-; BOTH-NEXT:    srli a2, a1, 8
-; BOTH-NEXT:    sb a2, 1(a0)
-; BOTH-NEXT:    srli a1, a1, 16
-; BOTH-NEXT:    sb a1, 2(a0)
-; BOTH-NEXT:    ret
+; NOMISALIGN-LABEL: store_i24:
+; NOMISALIGN:       # %bb.0:
+; NOMISALIGN-NEXT:    sb a1, 0(a0)
+; NOMISALIGN-NEXT:    srli a2, a1, 8
+; NOMISALIGN-NEXT:    sb a2, 1(a0)
+; NOMISALIGN-NEXT:    srli a1, a1, 16
+; NOMISALIGN-NEXT:    sb a1, 2(a0)
+; NOMISALIGN-NEXT:    ret
+;
+; MISALIGN-LABEL: store_i24:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    sh a1, 0(a0)
+; MISALIGN-NEXT:    srli a1, a1, 16
+; MISALIGN-NEXT:    sb a1, 2(a0)
+; MISALIGN-NEXT:    ret
   store i24 %v, i24* %p, align 1
   ret void
 }
 
 define void @store_i32(i32* %p, i32 %v) {
-; BOTH-LABEL: store_i32:
-; BOTH:       # %bb.0:
-; BOTH-NEXT:    sb a1, 0(a0)
-; BOTH-NEXT:    srli a2, a1, 24
-; BOTH-NEXT:    sb a2, 3(a0)
-; BOTH-NEXT:    srli a2, a1, 16
-; BOTH-NEXT:    sb a2, 2(a0)
-; BOTH-NEXT:    srli a1, a1, 8
-; BOTH-NEXT:    sb a1, 1(a0)
-; BOTH-NEXT:    ret
+; NOMISALIGN-LABEL: store_i32:
+; NOMISALIGN:       # %bb.0:
+; NOMISALIGN-NEXT:    sb a1, 0(a0)
+; NOMISALIGN-NEXT:    srli a2, a1, 24
+; NOMISALIGN-NEXT:    sb a2, 3(a0)
+; NOMISALIGN-NEXT:    srli a2, a1, 16
+; NOMISALIGN-NEXT:    sb a2, 2(a0)
+; NOMISALIGN-NEXT:    srli a1, a1, 8
+; NOMISALIGN-NEXT:    sb a1, 1(a0)
+; NOMISALIGN-NEXT:    ret
+;
+; MISALIGN-LABEL: store_i32:
+; MISALIGN:       # %bb.0:
+; MISALIGN-NEXT:    sw a1, 0(a0)
+; MISALIGN-NEXT:    ret
   store i32 %v, i32* %p, align 1
   ret void
 }
@@ -214,6 +265,17 @@
 ; RV64I-NEXT:    srli a1, a1, 8
 ; RV64I-NEXT:    sb a1, 1(a0)
 ; RV64I-NEXT:    ret
+;
+; MISALIGN-RV32I-LABEL: store_i64:
+; MISALIGN-RV32I:       # %bb.0:
+; MISALIGN-RV32I-NEXT:    sw a2, 4(a0)
+; MISALIGN-RV32I-NEXT:    sw a1, 0(a0)
+; MISALIGN-RV32I-NEXT:    ret
+;
+; MISALIGN-RV64I-LABEL: store_i64:
+; MISALIGN-RV64I:       # %bb.0:
+; MISALIGN-RV64I-NEXT:    sd a1, 0(a0)
+; MISALIGN-RV64I-NEXT:    ret
   store i64 %v, i64* %p, align 1
   ret void
 }