diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -402,6 +402,21 @@
                                    N->getValueType(0));
 }]>;
 
+def XLenSubTrailingOnes : SDNodeXForm<imm, [{
+  uint64_t XLen = Subtarget->getXLen();
+  uint64_t TrailingOnes = N->getAPIntValue().countTrailingOnes();
+  return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N),
+                                   N->getValueType(0));
+}]>;
+
+// Checks if this mask is a non-empty sequence of ones starting at the
+// least significant bit with the remainder zero and exceeds simm12.
+def TrailingOnesMask : PatLeaf<(imm), [{
+  if (!N->hasOneUse())
+    return false;
+  return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue());
+}], XLenSubTrailingOnes>;
+
 //===----------------------------------------------------------------------===//
 // Instruction Formats
 //===----------------------------------------------------------------------===//
@@ -1034,6 +1049,10 @@
 def : PatGprUimmLog2XLen<srl, SRLI>;
 def : PatGprUimmLog2XLen<sra, SRAI>;
 
+// AND with trailing ones mask exceeding simm12.
+def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)),
+          (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>;
+
 // Match both a plain shift and one where the shift amount is masked (this is
 // typically introduced when the legalizer promotes the shift amount and
 // zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
diff --git a/llvm/test/CodeGen/RISCV/alu16.ll b/llvm/test/CodeGen/RISCV/alu16.ll
--- a/llvm/test/CodeGen/RISCV/alu16.ll
+++ b/llvm/test/CodeGen/RISCV/alu16.ll
@@ -44,17 +44,15 @@
 define i16 @sltiu(i16 %a) nounwind {
 ; RV32I-LABEL: sltiu:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    sltiu a0, a0, 3
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: sltiu:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    sltiu a0, a0, 3
 ; RV64I-NEXT:    ret
   %1 = icmp ult i16 %a, 3
@@ -274,17 +272,15 @@
 define i16 @srl(i16 %a, i16 %b) nounwind {
 ; RV32I-LABEL: srl:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    srl a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: srl:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    srl a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = lshr i16 %a, %b
diff --git a/llvm/test/CodeGen/RISCV/and.ll b/llvm/test/CodeGen/RISCV/and.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/and.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV32I
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+
+; Test for handling of AND with constant. If this constant exceeds simm12 and
+; also is a non-empty sequence of ones starting at the least significant bit
+; with the remainder zero, we can replace it with SLLI + SLRI
+
+define i32 @and32_0x7ff(i32 %x) {
+; RV32I-LABEL: and32_0x7ff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 2047
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and32_0x7ff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 2047
+; RV64I-NEXT:    ret
+  %a = and i32 %x, 2047
+  ret i32 %a
+}
+
+define i32 @and32_0xfff(i32 %x) {
+; RV32I-LABEL: and32_0xfff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 20
+; RV32I-NEXT:    srli a0, a0, 20
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and32_0xfff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 52
+; RV64I-NEXT:    srli a0, a0, 52
+; RV64I-NEXT:    ret
+  %a = and i32 %x, 4095
+  ret i32 %a
+}
+
+define i64 @and64_0x7ff(i64 %x) {
+; RV32I-LABEL: and64_0x7ff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    andi a0, a0, 2047
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and64_0x7ff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 2047
+; RV64I-NEXT:    ret
+  %a = and i64 %x, 2047
+  ret i64 %a
+}
+
+define i64 @and64_0xfff(i64 %x) {
+; RV32I-LABEL: and64_0xfff:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 20
+; RV32I-NEXT:    srli a0, a0, 20
+; RV32I-NEXT:    li a1, 0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: and64_0xfff:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 52
+; RV64I-NEXT:    srli a0, a0, 52
+; RV64I-NEXT:    ret
+  %a = and i64 %x, 4095
+  ret i64 %a
+}
+
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -6413,9 +6413,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6435,9 +6434,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6461,9 +6459,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w.aq a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6483,9 +6480,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aq a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6509,9 +6505,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w.rl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6531,9 +6526,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.rl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6557,9 +6551,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6579,9 +6572,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6605,9 +6597,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6627,9 +6618,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w.aqrl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6653,9 +6643,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6675,9 +6664,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6701,9 +6689,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w.aq a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6723,9 +6710,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aq a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6749,9 +6735,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w.rl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6771,9 +6756,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.rl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6797,9 +6781,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6819,9 +6802,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -6845,9 +6827,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -6867,9 +6848,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w.aqrl a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -1535,9 +1535,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoor.w a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -1561,9 +1560,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoor.w a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
@@ -1591,9 +1589,8 @@
 ; RV32IA:       # %bb.0:
 ; RV32IA-NEXT:    andi a2, a0, -4
 ; RV32IA-NEXT:    slli a0, a0, 3
-; RV32IA-NEXT:    lui a3, 16
-; RV32IA-NEXT:    addi a3, a3, -1
-; RV32IA-NEXT:    and a1, a1, a3
+; RV32IA-NEXT:    slli a1, a1, 16
+; RV32IA-NEXT:    srli a1, a1, 16
 ; RV32IA-NEXT:    sll a1, a1, a0
 ; RV32IA-NEXT:    amoxor.w a1, a1, (a2)
 ; RV32IA-NEXT:    srl a0, a1, a0
@@ -1617,9 +1614,8 @@
 ; RV64IA:       # %bb.0:
 ; RV64IA-NEXT:    andi a2, a0, -4
 ; RV64IA-NEXT:    slliw a0, a0, 3
-; RV64IA-NEXT:    lui a3, 16
-; RV64IA-NEXT:    addiw a3, a3, -1
-; RV64IA-NEXT:    and a1, a1, a3
+; RV64IA-NEXT:    slli a1, a1, 48
+; RV64IA-NEXT:    srli a1, a1, 48
 ; RV64IA-NEXT:    sllw a1, a1, a0
 ; RV64IA-NEXT:    amoxor.w a1, a1, (a2)
 ; RV64IA-NEXT:    srlw a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
--- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
@@ -186,9 +186,8 @@
 define i16 @test_cttz_i16(i16 %a) nounwind {
 ; RV32I-LABEL: test_cttz_i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a1, a0, 16
+; RV32I-NEXT:    srli a1, a1, 16
 ; RV32I-NEXT:    beqz a1, .LBB4_2
 ; RV32I-NEXT:  # %bb.1: # %cond.false
 ; RV32I-NEXT:    addi a1, a0, -1
@@ -221,9 +220,8 @@
 ;
 ; RV64I-LABEL: test_cttz_i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a1, a0, 48
+; RV64I-NEXT:    srli a1, a1, 48
 ; RV64I-NEXT:    beqz a1, .LBB4_2
 ; RV64I-NEXT:  # %bb.1: # %cond.false
 ; RV64I-NEXT:    addi a1, a0, -1
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -19,9 +19,8 @@
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 16
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    slli a0, a1, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    add a0, s0, a0
@@ -36,9 +35,8 @@
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    slli a0, a1, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    addw a0, s0, a0
@@ -586,9 +584,8 @@
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call callee_half_ret@plt
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -600,9 +597,8 @@
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call callee_half_ret@plt
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -84,11 +84,10 @@
 ; RV32I-FPELIM:       # %bb.0:
 ; RV32I-FPELIM-NEXT:    lw t0, 4(sp)
 ; RV32I-FPELIM-NEXT:    lw t1, 0(sp)
-; RV32I-FPELIM-NEXT:    andi t2, a0, 255
-; RV32I-FPELIM-NEXT:    lui a0, 16
-; RV32I-FPELIM-NEXT:    addi a0, a0, -1
-; RV32I-FPELIM-NEXT:    and a0, a1, a0
-; RV32I-FPELIM-NEXT:    add a0, t2, a0
+; RV32I-FPELIM-NEXT:    andi a0, a0, 255
+; RV32I-FPELIM-NEXT:    slli a1, a1, 16
+; RV32I-FPELIM-NEXT:    srli a1, a1, 16
+; RV32I-FPELIM-NEXT:    add a0, a0, a1
 ; RV32I-FPELIM-NEXT:    add a0, a0, a2
 ; RV32I-FPELIM-NEXT:    xor a1, a4, t1
 ; RV32I-FPELIM-NEXT:    xor a2, a3, a7
@@ -108,11 +107,10 @@
 ; RV32I-WITHFP-NEXT:    addi s0, sp, 16
 ; RV32I-WITHFP-NEXT:    lw t0, 4(s0)
 ; RV32I-WITHFP-NEXT:    lw t1, 0(s0)
-; RV32I-WITHFP-NEXT:    andi t2, a0, 255
-; RV32I-WITHFP-NEXT:    lui a0, 16
-; RV32I-WITHFP-NEXT:    addi a0, a0, -1
-; RV32I-WITHFP-NEXT:    and a0, a1, a0
-; RV32I-WITHFP-NEXT:    add a0, t2, a0
+; RV32I-WITHFP-NEXT:    andi a0, a0, 255
+; RV32I-WITHFP-NEXT:    slli a1, a1, 16
+; RV32I-WITHFP-NEXT:    srli a1, a1, 16
+; RV32I-WITHFP-NEXT:    add a0, a0, a1
 ; RV32I-WITHFP-NEXT:    add a0, a0, a2
 ; RV32I-WITHFP-NEXT:    xor a1, a4, t1
 ; RV32I-WITHFP-NEXT:    xor a2, a3, a7
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -50,11 +50,10 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lw t0, 8(sp)
 ; RV64I-NEXT:    ld t1, 0(sp)
-; RV64I-NEXT:    andi t2, a0, 255
-; RV64I-NEXT:    lui a0, 16
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    addw a0, t2, a0
+; RV64I-NEXT:    andi a0, a0, 255
+; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    addw a0, a0, a1
 ; RV64I-NEXT:    addw a0, a0, a2
 ; RV64I-NEXT:    xor a1, a4, t1
 ; RV64I-NEXT:    xor a2, a3, a7
diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -31,19 +31,18 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    addi a3, a3, -1
-; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_promote_d_s:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 32
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -52,8 +51,8 @@
 ; RV32IF-NEXT:    fmv.x.w a2, fa0
 ; RV32IF-NEXT:    lui a3, 524288
 ; RV32IF-NEXT:    and a2, a2, a3
-; RV32IF-NEXT:    addi a3, a3, -1
-; RV32IF-NEXT:    and a1, a1, a3
+; RV32IF-NEXT:    slli a1, a1, 1
+; RV32IF-NEXT:    srli a1, a1, 1
 ; RV32IF-NEXT:    or a1, a1, a2
 ; RV32IF-NEXT:    ret
 ;
@@ -74,8 +73,8 @@
 ; RV32IFZFH-NEXT:    fmv.x.w a2, fa0
 ; RV32IFZFH-NEXT:    lui a3, 524288
 ; RV32IFZFH-NEXT:    and a2, a2, a3
-; RV32IFZFH-NEXT:    addi a3, a3, -1
-; RV32IFZFH-NEXT:    and a1, a1, a3
+; RV32IFZFH-NEXT:    slli a1, a1, 1
+; RV32IFZFH-NEXT:    srli a1, a1, 1
 ; RV32IFZFH-NEXT:    or a1, a1, a2
 ; RV32IFZFH-NEXT:    ret
 ;
@@ -98,35 +97,32 @@
 define double @fold_promote_d_h(double %a, half %b) nounwind {
 ; RV32I-LABEL: fold_promote_d_h:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a3, 524288
-; RV32I-NEXT:    addi a3, a3, -1
-; RV32I-NEXT:    and a1, a1, a3
 ; RV32I-NEXT:    lui a3, 8
 ; RV32I-NEXT:    and a2, a2, a3
 ; RV32I-NEXT:    slli a2, a2, 16
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_promote_d_h:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    lui a2, 8
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    slli a1, a1, 48
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_promote_d_h:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fmv.x.w a2, fa0
-; RV32IF-NEXT:    lui a3, 524288
-; RV32IF-NEXT:    addi a3, a3, -1
-; RV32IF-NEXT:    and a1, a1, a3
 ; RV32IF-NEXT:    lui a3, 8
 ; RV32IF-NEXT:    and a2, a2, a3
 ; RV32IF-NEXT:    slli a2, a2, 16
+; RV32IF-NEXT:    slli a1, a1, 1
+; RV32IF-NEXT:    srli a1, a1, 1
 ; RV32IF-NEXT:    or a1, a1, a2
 ; RV32IF-NEXT:    ret
 ;
@@ -163,12 +159,11 @@
 ; RV32IFZFH-LABEL: fold_promote_d_h:
 ; RV32IFZFH:       # %bb.0:
 ; RV32IFZFH-NEXT:    fmv.x.h a2, fa0
-; RV32IFZFH-NEXT:    lui a3, 524288
-; RV32IFZFH-NEXT:    addi a3, a3, -1
-; RV32IFZFH-NEXT:    and a1, a1, a3
 ; RV32IFZFH-NEXT:    lui a3, 8
 ; RV32IFZFH-NEXT:    and a2, a2, a3
 ; RV32IFZFH-NEXT:    slli a2, a2, 16
+; RV32IFZFH-NEXT:    slli a1, a1, 1
+; RV32IFZFH-NEXT:    srli a1, a1, 1
 ; RV32IFZFH-NEXT:    or a1, a1, a2
 ; RV32IFZFH-NEXT:    ret
 ;
@@ -191,23 +186,21 @@
 define float @fold_promote_f_h(float %a, half %b) nounwind {
 ; RV32I-LABEL: fold_promote_f_h:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    lui a2, 8
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_promote_f_h:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 524288
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    lui a2, 8
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    slliw a1, a1, 16
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -279,21 +272,20 @@
 ; RV32I-LABEL: fold_demote_s_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    and a2, a2, a1
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    or a0, a0, a2
+; RV32I-NEXT:    and a1, a2, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_s_d:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 524288
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a2, a2, 63
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 32
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
@@ -340,38 +332,35 @@
 define half @fold_demote_h_s(half %a, float %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_s:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 8
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    and a1, a1, a2
 ; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_s:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 8
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    li a2, 1
 ; RV64I-NEXT:    slli a2, a2, 31
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 16
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_s:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    fmv.x.w a0, fa1
-; RV32IF-NEXT:    fmv.x.w a1, fa0
-; RV32IF-NEXT:    lui a2, 8
-; RV32IF-NEXT:    addi a2, a2, -1
-; RV32IF-NEXT:    and a1, a1, a2
+; RV32IF-NEXT:    fmv.x.w a0, fa0
+; RV32IF-NEXT:    fmv.x.w a1, fa1
 ; RV32IF-NEXT:    lui a2, 524288
-; RV32IF-NEXT:    and a0, a0, a2
-; RV32IF-NEXT:    srli a0, a0, 16
-; RV32IF-NEXT:    or a0, a1, a0
+; RV32IF-NEXT:    and a1, a1, a2
+; RV32IF-NEXT:    srli a1, a1, 16
+; RV32IF-NEXT:    slli a0, a0, 17
+; RV32IF-NEXT:    srli a0, a0, 17
+; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    lui a1, 1048560
 ; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    fmv.w.x fa0, a0
@@ -379,15 +368,14 @@
 ;
 ; RV32IFD-LABEL: fold_demote_h_s:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fmv.x.w a0, fa1
-; RV32IFD-NEXT:    fmv.x.w a1, fa0
-; RV32IFD-NEXT:    lui a2, 8
-; RV32IFD-NEXT:    addi a2, a2, -1
-; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    fmv.x.w a0, fa0
+; RV32IFD-NEXT:    fmv.x.w a1, fa1
 ; RV32IFD-NEXT:    lui a2, 524288
-; RV32IFD-NEXT:    and a0, a0, a2
-; RV32IFD-NEXT:    srli a0, a0, 16
-; RV32IFD-NEXT:    or a0, a1, a0
+; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    srli a1, a1, 16
+; RV32IFD-NEXT:    slli a0, a0, 17
+; RV32IFD-NEXT:    srli a0, a0, 17
+; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
@@ -395,15 +383,14 @@
 ;
 ; RV64IFD-LABEL: fold_demote_h_s:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.x.w a0, fa1
-; RV64IFD-NEXT:    fmv.x.w a1, fa0
-; RV64IFD-NEXT:    lui a2, 8
-; RV64IFD-NEXT:    addiw a2, a2, -1
-; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    fmv.x.w a0, fa0
+; RV64IFD-NEXT:    fmv.x.w a1, fa1
 ; RV64IFD-NEXT:    lui a2, 524288
-; RV64IFD-NEXT:    and a0, a0, a2
-; RV64IFD-NEXT:    srli a0, a0, 16
-; RV64IFD-NEXT:    or a0, a1, a0
+; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    srli a1, a1, 16
+; RV64IFD-NEXT:    slli a0, a0, 49
+; RV64IFD-NEXT:    srli a0, a0, 49
+; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
@@ -434,36 +421,33 @@
 define half @fold_demote_h_d(half %a, double %b) nounwind {
 ; RV32I-LABEL: fold_demote_h_d:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 8
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
 ; RV32I-NEXT:    lui a1, 524288
 ; RV32I-NEXT:    and a1, a2, a1
 ; RV32I-NEXT:    srli a1, a1, 16
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fold_demote_h_d:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 8
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    li a2, -1
 ; RV64I-NEXT:    slli a2, a2, 63
 ; RV64I-NEXT:    and a1, a1, a2
 ; RV64I-NEXT:    srli a1, a1, 48
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fold_demote_h_d:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    fmv.x.w a0, fa0
-; RV32IF-NEXT:    lui a2, 8
-; RV32IF-NEXT:    addi a2, a2, -1
-; RV32IF-NEXT:    and a0, a0, a2
 ; RV32IF-NEXT:    lui a2, 524288
 ; RV32IF-NEXT:    and a1, a1, a2
 ; RV32IF-NEXT:    srli a1, a1, 16
+; RV32IF-NEXT:    slli a0, a0, 17
+; RV32IF-NEXT:    srli a0, a0, 17
 ; RV32IF-NEXT:    or a0, a0, a1
 ; RV32IF-NEXT:    lui a1, 1048560
 ; RV32IF-NEXT:    or a0, a0, a1
@@ -474,15 +458,14 @@
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    fsd fa1, 8(sp)
-; RV32IFD-NEXT:    fmv.x.w a0, fa0
-; RV32IFD-NEXT:    lw a1, 12(sp)
-; RV32IFD-NEXT:    lui a2, 8
-; RV32IFD-NEXT:    addi a2, a2, -1
-; RV32IFD-NEXT:    and a0, a0, a2
+; RV32IFD-NEXT:    lw a0, 12(sp)
+; RV32IFD-NEXT:    fmv.x.w a1, fa0
 ; RV32IFD-NEXT:    lui a2, 524288
-; RV32IFD-NEXT:    and a1, a1, a2
-; RV32IFD-NEXT:    srli a1, a1, 16
-; RV32IFD-NEXT:    or a0, a0, a1
+; RV32IFD-NEXT:    and a0, a0, a2
+; RV32IFD-NEXT:    srli a0, a0, 16
+; RV32IFD-NEXT:    slli a1, a1, 17
+; RV32IFD-NEXT:    srli a1, a1, 17
+; RV32IFD-NEXT:    or a0, a1, a0
 ; RV32IFD-NEXT:    lui a1, 1048560
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    fmv.w.x fa0, a0
@@ -491,16 +474,15 @@
 ;
 ; RV64IFD-LABEL: fold_demote_h_d:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.x.d a0, fa1
-; RV64IFD-NEXT:    fmv.x.w a1, fa0
-; RV64IFD-NEXT:    lui a2, 8
-; RV64IFD-NEXT:    addiw a2, a2, -1
-; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    fmv.x.w a0, fa0
+; RV64IFD-NEXT:    fmv.x.d a1, fa1
 ; RV64IFD-NEXT:    li a2, -1
 ; RV64IFD-NEXT:    slli a2, a2, 63
-; RV64IFD-NEXT:    and a0, a0, a2
-; RV64IFD-NEXT:    srli a0, a0, 48
-; RV64IFD-NEXT:    or a0, a1, a0
+; RV64IFD-NEXT:    and a1, a1, a2
+; RV64IFD-NEXT:    srli a1, a1, 48
+; RV64IFD-NEXT:    slli a0, a0, 49
+; RV64IFD-NEXT:    srli a0, a0, 49
+; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    lui a1, 1048560
 ; RV64IFD-NEXT:    or a0, a0, a1
 ; RV64IFD-NEXT:    fmv.w.x fa0, a0
diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -481,9 +481,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 5
 ; RV32I-NEXT:    call __udivsi3@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -502,9 +501,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    li a1, 5
 ; RV64I-NEXT:    call __udivdi3@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -556,9 +554,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __udivsi3@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -567,9 +564,8 @@
 ;
 ; RV32IM-LABEL: udiv16_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    lui a1, 16
-; RV32IM-NEXT:    addi a1, a1, -1
-; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srli a0, a0, 16
 ; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    divu a0, a1, a0
 ; RV32IM-NEXT:    ret
@@ -578,9 +574,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __udivdi3@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -589,9 +584,8 @@
 ;
 ; RV64IM-LABEL: udiv16_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    lui a1, 16
-; RV64IM-NEXT:    addiw a1, a1, -1
-; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srli a0, a0, 48
 ; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    divuw a0, a1, a0
 ; RV64IM-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -191,19 +191,19 @@
 ; RV32I-LABEL: fsgnj_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    and a3, a3, a2
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fsgnj_d:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
@@ -283,8 +283,8 @@
 ; RV32I-NEXT:    not a2, a3
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    addi a3, a3, -1
-; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
@@ -292,10 +292,10 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = fsub double -0.0, %b
@@ -328,9 +328,8 @@
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __adddf3@plt
 ; RV32I-NEXT:    mv a3, a1
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a3, a1
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    mv a2, a0
 ; RV32I-NEXT:    call __adddf3@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -343,9 +342,8 @@
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __adddf3@plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    li a0, -1
+; RV64I-NEXT:    slli a0, a0, 1
 ; RV64I-NEXT:    srli a0, a0, 1
-; RV64I-NEXT:    and a0, a1, a0
 ; RV64I-NEXT:    call __adddf3@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -52,30 +52,26 @@
 define double @fabs(double %a) nounwind {
 ; RV32I-LABEL: fabs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV32IFD-LABEL: fabs:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    lui a2, 524288
-; RV32IFD-NEXT:    addi a2, a2, -1
-; RV32IFD-NEXT:    and a1, a1, a2
+; RV32IFD-NEXT:    slli a1, a1, 1
+; RV32IFD-NEXT:    srli a1, a1, 1
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; RV64IFD-LABEL: fabs:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    li a1, -1
-; RV64IFD-NEXT:    srli a1, a1, 1
-; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    slli a0, a0, 1
+; RV64IFD-NEXT:    srli a0, a0, 1
 ; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
@@ -93,8 +89,8 @@
 ; RV32I-NEXT:    not a2, a3
 ; RV32I-NEXT:    lui a3, 524288
 ; RV32I-NEXT:    and a2, a2, a3
-; RV32I-NEXT:    addi a3, a3, -1
-; RV32I-NEXT:    and a1, a1, a3
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
@@ -118,10 +114,10 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -613,16 +613,14 @@
 ;
 ; RV32I-LABEL: fabs_f64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs_f64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
@@ -729,19 +727,19 @@
 ; RV32I-LABEL: copysign_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
-; RV32I-NEXT:    and a3, a3, a2
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    or a1, a1, a3
+; RV32I-NEXT:    and a2, a3, a2
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    or a1, a1, a2
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: copysign_f64:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    li a2, -1
-; RV64I-NEXT:    slli a3, a2, 63
-; RV64I-NEXT:    and a1, a1, a3
-; RV64I-NEXT:    srli a2, a2, 1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a2, a2, 63
+; RV64I-NEXT:    and a1, a1, a2
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -192,8 +192,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -201,8 +201,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.copysign.f32(float %a, float %b)
@@ -284,10 +284,10 @@
 ; RV32I-NEXT:    call __addsf3@plt
 ; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a2, a1, -1
-; RV32I-NEXT:    and a2, s0, a2
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    slli a1, s0, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -302,10 +302,10 @@
 ; RV64I-NEXT:    call __addsf3@plt
 ; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a2, a1, -1
-; RV64I-NEXT:    and a2, s0, a2
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    slli a1, s0, 33
+; RV64I-NEXT:    srli a1, a1, 33
+; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -341,9 +341,8 @@
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    call __addsf3@plt
 ; RV32I-NEXT:    mv a1, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    call __addsf3@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -355,9 +354,8 @@
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    call __addsf3@plt
 ; RV64I-NEXT:    mv a1, a0
-; RV64I-NEXT:    lui a0, 524288
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    and a0, a1, a0
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    call __addsf3@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -73,9 +73,8 @@
 ; RV32F-NEXT:    mv s1, a0
 ; RV32F-NEXT:    call __adddf3@plt
 ; RV32F-NEXT:    mv a2, a0
-; RV32F-NEXT:    lui a0, 524288
-; RV32F-NEXT:    addi a0, a0, -1
-; RV32F-NEXT:    and a3, a1, a0
+; RV32F-NEXT:    slli a0, a1, 1
+; RV32F-NEXT:    srli a3, a0, 1
 ; RV32F-NEXT:    mv a0, s1
 ; RV32F-NEXT:    mv a1, s0
 ; RV32F-NEXT:    call __adddf3@plt
@@ -110,9 +109,8 @@
 ; RV64F-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64F-NEXT:    mv s0, a0
 ; RV64F-NEXT:    call __adddf3@plt
-; RV64F-NEXT:    li a1, -1
-; RV64F-NEXT:    srli a1, a1, 1
-; RV64F-NEXT:    and a1, a0, a1
+; RV64F-NEXT:    slli a0, a0, 1
+; RV64F-NEXT:    srli a1, a0, 1
 ; RV64F-NEXT:    mv a0, s0
 ; RV64F-NEXT:    call __adddf3@plt
 ; RV64F-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
--- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
@@ -47,30 +47,26 @@
 define float @fabs(float %a) nounwind {
 ; RV32I-LABEL: fabs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV32IF-LABEL: fabs:
 ; RV32IF:       # %bb.0:
-; RV32IF-NEXT:    lui a1, 524288
-; RV32IF-NEXT:    addi a1, a1, -1
-; RV32IF-NEXT:    and a0, a0, a1
+; RV32IF-NEXT:    slli a0, a0, 1
+; RV32IF-NEXT:    srli a0, a0, 1
 ; RV32IF-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    ret
 ;
 ; RV64IF-LABEL: fabs:
 ; RV64IF:       # %bb.0:
-; RV64IF-NEXT:    lui a1, 524288
-; RV64IF-NEXT:    addiw a1, a1, -1
-; RV64IF-NEXT:    and a0, a0, a1
+; RV64IF-NEXT:    slli a0, a0, 33
+; RV64IF-NEXT:    srli a0, a0, 33
 ; RV64IF-NEXT:    ret
   %1 = call float @llvm.fabs.f32(float %a)
   ret float %1
@@ -88,8 +84,8 @@
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -108,8 +104,8 @@
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -588,16 +588,14 @@
 ;
 ; RV32I-LABEL: fabs_f32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs_f32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.fabs.f32(float %a)
   ret float %1
@@ -705,8 +703,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -714,8 +712,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 524288
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call float @llvm.copysign.f32(float %a, float %b)
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -294,9 +294,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call sqrtf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -308,9 +307,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call sqrtf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -338,9 +336,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 1048568
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 8
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -348,9 +345,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 1048568
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 8
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call half @llvm.copysign.f16(half %a, half %b)
@@ -485,9 +481,8 @@
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV32I-NEXT:    lui a1, 1048568
 ; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    lui a1, 8
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, s2, a1
+; RV32I-NEXT:    slli a1, s2, 17
+; RV32I-NEXT:    srli a1, a1, 17
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
@@ -525,9 +520,8 @@
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV64I-NEXT:    lui a1, 1048568
 ; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    lui a1, 8
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a1, s2, a1
+; RV64I-NEXT:    slli a1, s2, 49
+; RV64I-NEXT:    srli a1, a1, 49
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
@@ -583,9 +577,8 @@
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
-; RV32I-NEXT:    lui a0, 524288
-; RV32I-NEXT:    addi a0, a0, -1
-; RV32I-NEXT:    and a0, s0, a0
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV32I-NEXT:    and a0, a0, s1
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
@@ -621,9 +614,8 @@
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
-; RV64I-NEXT:    lui a0, 524288
-; RV64I-NEXT:    addiw a0, a0, -1
-; RV64I-NEXT:    and a0, s0, a0
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV64I-NEXT:    and a0, a0, s1
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
diff --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
--- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
@@ -47,30 +47,26 @@
 define half @fabs(half %a) nounwind {
 ; RV32I-LABEL: fabs:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 8
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    ret
 ;
 ; RV32IZFH-LABEL: fabs:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    lui a1, 8
-; RV32IZFH-NEXT:    addi a1, a1, -1
-; RV32IZFH-NEXT:    and a0, a0, a1
+; RV32IZFH-NEXT:    slli a0, a0, 17
+; RV32IZFH-NEXT:    srli a0, a0, 17
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 8
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fabs:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    lui a1, 8
-; RV64IZFH-NEXT:    addiw a1, a1, -1
-; RV64IZFH-NEXT:    and a0, a0, a1
+; RV64IZFH-NEXT:    slli a0, a0, 49
+; RV64IZFH-NEXT:    srli a0, a0, 49
 ; RV64IZFH-NEXT:    ret
   %1 = call half @llvm.fabs.f16(half %a)
   ret half %1
@@ -88,9 +84,8 @@
 ; RV32I-NEXT:    not a1, a1
 ; RV32I-NEXT:    lui a2, 1048568
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 8
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -107,9 +102,8 @@
 ; RV64I-NEXT:    not a1, a1
 ; RV64I-NEXT:    lui a2, 1048568
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 8
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -291,33 +291,29 @@
 define half @fcvt_h_ui(i16 %a) nounwind strictfp {
 ; RV32IZFH-LABEL: fcvt_h_ui:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    lui a1, 16
-; RV32IZFH-NEXT:    addi a1, a1, -1
-; RV32IZFH-NEXT:    and a0, a0, a1
+; RV32IZFH-NEXT:    slli a0, a0, 16
+; RV32IZFH-NEXT:    srli a0, a0, 16
 ; RV32IZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_ui:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    lui a1, 16
-; RV64IZFH-NEXT:    addiw a1, a1, -1
-; RV64IZFH-NEXT:    and a0, a0, a1
+; RV64IZFH-NEXT:    slli a0, a0, 48
+; RV64IZFH-NEXT:    srli a0, a0, 48
 ; RV64IZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_ui:
 ; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    lui a1, 16
-; RV32IDZFH-NEXT:    addi a1, a1, -1
-; RV32IDZFH-NEXT:    and a0, a0, a1
+; RV32IDZFH-NEXT:    slli a0, a0, 16
+; RV32IDZFH-NEXT:    srli a0, a0, 16
 ; RV32IDZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_ui:
 ; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    lui a1, 16
-; RV64IDZFH-NEXT:    addiw a1, a1, -1
-; RV64IDZFH-NEXT:    and a0, a0, a1
+; RV64IDZFH-NEXT:    slli a0, a0, 48
+; RV64IDZFH-NEXT:    srli a0, a0, 48
 ; RV64IDZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV64IDZFH-NEXT:    ret
   %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -37,9 +37,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -50,9 +49,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -143,9 +141,8 @@
 ; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 815104
@@ -192,9 +189,8 @@
 ; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 815104
@@ -263,9 +259,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -276,9 +271,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -443,9 +437,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -456,9 +449,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -522,9 +514,8 @@
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 847872
@@ -573,9 +564,8 @@
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 847872
@@ -645,9 +635,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -658,9 +647,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -717,9 +705,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfsi@plt
 ; RV32I-NEXT:    mv a1, a0
@@ -736,9 +723,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    mv a1, a0
@@ -808,9 +794,8 @@
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
@@ -847,9 +832,8 @@
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
@@ -917,9 +901,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfdi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -930,9 +913,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1088,9 +1070,8 @@
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 913408
@@ -1168,9 +1149,8 @@
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 913408
@@ -1250,9 +1230,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfdi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1263,9 +1242,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -1394,9 +1372,8 @@
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
@@ -1455,9 +1432,8 @@
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
@@ -1595,33 +1571,29 @@
 define half @fcvt_h_ui(i16 %a) nounwind {
 ; RV32IZFH-LABEL: fcvt_h_ui:
 ; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    lui a1, 16
-; RV32IZFH-NEXT:    addi a1, a1, -1
-; RV32IZFH-NEXT:    and a0, a0, a1
+; RV32IZFH-NEXT:    slli a0, a0, 16
+; RV32IZFH-NEXT:    srli a0, a0, 16
 ; RV32IZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV32IZFH-NEXT:    ret
 ;
 ; RV64IZFH-LABEL: fcvt_h_ui:
 ; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    lui a1, 16
-; RV64IZFH-NEXT:    addiw a1, a1, -1
-; RV64IZFH-NEXT:    and a0, a0, a1
+; RV64IZFH-NEXT:    slli a0, a0, 48
+; RV64IZFH-NEXT:    srli a0, a0, 48
 ; RV64IZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV64IZFH-NEXT:    ret
 ;
 ; RV32IDZFH-LABEL: fcvt_h_ui:
 ; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    lui a1, 16
-; RV32IDZFH-NEXT:    addi a1, a1, -1
-; RV32IDZFH-NEXT:    and a0, a0, a1
+; RV32IDZFH-NEXT:    slli a0, a0, 16
+; RV32IDZFH-NEXT:    srli a0, a0, 16
 ; RV32IDZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV32IDZFH-NEXT:    ret
 ;
 ; RV64IDZFH-LABEL: fcvt_h_ui:
 ; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    lui a1, 16
-; RV64IDZFH-NEXT:    addiw a1, a1, -1
-; RV64IDZFH-NEXT:    and a0, a0, a1
+; RV64IDZFH-NEXT:    slli a0, a0, 48
+; RV64IDZFH-NEXT:    srli a0, a0, 48
 ; RV64IDZFH-NEXT:    fcvt.h.wu fa0, a0
 ; RV64IDZFH-NEXT:    ret
 ;
@@ -1629,9 +1601,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __floatunsisf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -1642,9 +1613,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __floatunsisf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2061,9 +2031,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
@@ -2073,9 +2042,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
@@ -2169,9 +2137,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __extendsfdf2@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2182,9 +2149,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    slli a0, a0, 32
 ; RV64I-NEXT:    srli a0, a0, 32
@@ -2431,9 +2397,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2444,9 +2409,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2537,9 +2501,8 @@
 ; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 815104
@@ -2587,9 +2550,8 @@
 ; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 815104
@@ -2658,9 +2620,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2671,9 +2632,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2839,9 +2799,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixsfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -2852,9 +2811,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixsfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -2945,9 +2903,8 @@
 ; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    lui a1, 798720
@@ -2994,9 +2951,8 @@
 ; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    lui a1, 798720
@@ -3065,9 +3021,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call __fixunssfsi@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -3078,9 +3033,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call __fixunssfdi@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -3142,9 +3096,8 @@
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv s0, a0
 ; RV32I-NEXT:    li a1, 0
@@ -3180,9 +3133,8 @@
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    li a1, 0
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -45,9 +45,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call sqrtf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -59,9 +58,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call sqrtf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -127,9 +125,8 @@
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
 ; RV32I-NEXT:    mv s0, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    mv a1, s0
 ; RV32I-NEXT:    call __powisf2@plt
@@ -145,9 +142,8 @@
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    sext.w a1, s0
 ; RV64I-NEXT:    call __powisf2@plt
@@ -211,9 +207,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call sinf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -225,9 +220,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call sinf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -289,9 +283,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call cosf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -303,9 +296,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call cosf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -630,9 +622,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call expf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -644,9 +635,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call expf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -708,9 +698,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call exp2f@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -722,9 +711,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call exp2f@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -786,9 +774,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call logf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -800,9 +787,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call logf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -864,9 +850,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call log10f@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -878,9 +863,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call log10f@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -942,9 +926,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call log2f@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -956,9 +939,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call log2f@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1188,16 +1170,14 @@
 ;
 ; RV32I-LABEL: fabs_f16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 8
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: fabs_f16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 8
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    ret
   %1 = call half @llvm.fabs.f16(half %a)
   ret half %1
@@ -1403,9 +1383,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 1048568
 ; RV32I-NEXT:    and a1, a1, a2
-; RV32I-NEXT:    lui a2, 8
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
 ; RV32I-NEXT:    or a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
@@ -1413,9 +1392,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    lui a2, 1048568
 ; RV64I-NEXT:    and a1, a1, a2
-; RV64I-NEXT:    lui a2, 8
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
   %1 = call half @llvm.copysign.f16(half %a, half %b)
@@ -1473,9 +1451,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call floorf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1487,9 +1464,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call floorf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1551,9 +1527,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call ceilf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1565,9 +1540,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call ceilf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1629,9 +1603,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call truncf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1643,9 +1616,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call truncf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1707,9 +1679,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call rintf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1721,9 +1692,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call rintf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1785,9 +1755,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call nearbyintf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1799,9 +1768,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call nearbyintf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1863,9 +1831,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call roundf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1877,9 +1844,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call roundf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1941,9 +1907,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV32I-NEXT:    call roundevenf@plt
 ; RV32I-NEXT:    call __gnu_f2h_ieee@plt
@@ -1955,9 +1920,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    call __gnu_h2f_ieee@plt
 ; RV64I-NEXT:    call roundevenf@plt
 ; RV64I-NEXT:    call __gnu_f2h_ieee@plt
diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -632,9 +632,8 @@
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a1, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a1, a0, 16
 ; RV32I-NEXT:    li a0, 10
 ; RV32I-NEXT:    call __umodsi3@plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
@@ -643,9 +642,8 @@
 ;
 ; RV32IM-LABEL: urem16_constant_lhs:
 ; RV32IM:       # %bb.0:
-; RV32IM-NEXT:    lui a1, 16
-; RV32IM-NEXT:    addi a1, a1, -1
-; RV32IM-NEXT:    and a0, a0, a1
+; RV32IM-NEXT:    slli a0, a0, 16
+; RV32IM-NEXT:    srli a0, a0, 16
 ; RV32IM-NEXT:    li a1, 10
 ; RV32IM-NEXT:    remu a0, a1, a0
 ; RV32IM-NEXT:    ret
@@ -654,9 +652,8 @@
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a1, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a1, a0, 48
 ; RV64I-NEXT:    li a0, 10
 ; RV64I-NEXT:    call __umoddi3@plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
@@ -665,9 +662,8 @@
 ;
 ; RV64IM-LABEL: urem16_constant_lhs:
 ; RV64IM:       # %bb.0:
-; RV64IM-NEXT:    lui a1, 16
-; RV64IM-NEXT:    addiw a1, a1, -1
-; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    slli a0, a0, 48
+; RV64IM-NEXT:    srli a0, a0, 48
 ; RV64IM-NEXT:    li a1, 10
 ; RV64IM-NEXT:    remuw a0, a1, a0
 ; RV64IM-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -776,9 +776,8 @@
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV32I-LABEL: zexth_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: zexth_i32:
@@ -792,9 +791,8 @@
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-LABEL: zexth_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rv32zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbp.ll
--- a/llvm/test/CodeGen/RISCV/rv32zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbp.ll
@@ -2074,9 +2074,8 @@
 ; RV32I-NEXT:    srli a1, a0, 8
 ; RV32I-NEXT:    slli a0, a0, 8
 ; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBP-LABEL: bswap_i16:
@@ -2837,9 +2836,8 @@
 define i32 @pack_i32(i32 %a, i32 %b) nounwind {
 ; RV32I-LABEL: pack_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a2, 16
-; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    and a0, a0, a2
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    slli a1, a1, 16
 ; RV32I-NEXT:    or a0, a1, a0
 ; RV32I-NEXT:    ret
@@ -3004,9 +3002,8 @@
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV32I-LABEL: zexth_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBP-LABEL: zexth_i32:
@@ -3020,9 +3017,8 @@
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV32I-LABEL: zexth_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -486,9 +486,8 @@
 define i32 @bclri_i32_31(i32 %a) nounwind {
 ; RV32I-LABEL: bclri_i32_31:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 524288
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBS-LABEL: bclri_i32_31:
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -978,9 +978,8 @@
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV64I-LABEL: zexth_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: zexth_i32:
@@ -994,9 +993,8 @@
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV64I-LABEL: zexth_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: zexth_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbp.ll
--- a/llvm/test/CodeGen/RISCV/rv64zbp.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbp.ll
@@ -1955,9 +1955,8 @@
 ; RV64I-NEXT:    srli a1, a0, 8
 ; RV64I-NEXT:    slli a0, a0, 8
 ; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBP-LABEL: bswap_i16:
@@ -2787,9 +2786,8 @@
 define signext i32 @pack_i32(i32 signext %a, i32 signext %b) nounwind {
 ; RV64I-LABEL: pack_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a2, 16
-; RV64I-NEXT:    addiw a2, a2, -1
-; RV64I-NEXT:    and a0, a0, a2
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    slliw a1, a1, 16
 ; RV64I-NEXT:    or a0, a1, a0
 ; RV64I-NEXT:    ret
@@ -2945,9 +2943,8 @@
 define i32 @zexth_i32(i32 %a) nounwind {
 ; RV64I-LABEL: zexth_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBP-LABEL: zexth_i32:
@@ -2961,9 +2958,8 @@
 define i64 @zexth_i64(i64 %a) nounwind {
 ; RV64I-LABEL: zexth_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBP-LABEL: zexth_i64:
diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -563,16 +563,14 @@
 define signext i32 @bclri_i32_31(i32 signext %a) nounwind {
 ; RV64I-LABEL: bclri_i32_31:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 524288
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: bclri_i32_31:
 ; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    lui a1, 524288
-; RV64ZBS-NEXT:    addiw a1, a1, -1
-; RV64ZBS-NEXT:    and a0, a0, a1
+; RV64ZBS-NEXT:    slli a0, a0, 33
+; RV64ZBS-NEXT:    srli a0, a0, 33
 ; RV64ZBS-NEXT:    ret
   %and = and i32 %a, -2147483649
   ret i32 %and
@@ -661,9 +659,8 @@
 define i64 @bclri_i64_63(i64 %a) nounwind {
 ; RV64I-LABEL: bclri_i64_63:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, -1
-; RV64I-NEXT:    srli a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
 ; RV64I-NEXT:    ret
 ;
 ; RV64ZBS-LABEL: bclri_i64_63:
diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
--- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll
@@ -92,9 +92,8 @@
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fadd.h ft0, fa0, fa1
 ; RV64IZFH-NEXT:    fmv.x.h a0, ft0
-; RV64IZFH-NEXT:    lui a1, 16
-; RV64IZFH-NEXT:    addiw a1, a1, -1
-; RV64IZFH-NEXT:    and a0, a0, a1
+; RV64IZFH-NEXT:    slli a0, a0, 48
+; RV64IZFH-NEXT:    srli a0, a0, 48
 ; RV64IZFH-NEXT:    ret
   %1 = fadd half %a, %b
   %2 = bitcast half %1 to i16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -268,9 +268,8 @@
 define signext i16 @vpreduce_umax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_v2i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -280,9 +279,8 @@
 ;
 ; RV64-LABEL: vpreduce_umax_v2i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -313,9 +311,8 @@
 define signext i16 @vpreduce_umin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umin_v2i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -325,9 +322,8 @@
 ;
 ; RV64-LABEL: vpreduce_umin_v2i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -418,9 +414,8 @@
 define signext i16 @vpreduce_umax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_v4i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -430,9 +425,8 @@
 ;
 ; RV64-LABEL: vpreduce_umax_v4i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -463,9 +457,8 @@
 define signext i16 @vpreduce_umin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umin_v4i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -475,9 +468,8 @@
 ;
 ; RV64-LABEL: vpreduce_umin_v4i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -390,9 +390,8 @@
 define signext i16 @vpreduce_umax_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_nxv1i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -402,9 +401,8 @@
 ;
 ; RV64-LABEL: vpreduce_umax_nxv1i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -435,9 +433,8 @@
 define signext i16 @vpreduce_umin_nxv1i16(i16 signext %s, <vscale x 1 x i16> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umin_nxv1i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -447,9 +444,8 @@
 ;
 ; RV64-LABEL: vpreduce_umin_nxv1i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
@@ -540,9 +536,8 @@
 define signext i16 @vpreduce_umax_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_nxv2i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -552,9 +547,8 @@
 ;
 ; RV64-LABEL: vpreduce_umax_nxv2i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -585,9 +579,8 @@
 define signext i16 @vpreduce_umin_nxv2i16(i16 signext %s, <vscale x 2 x i16> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umin_nxv2i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -597,9 +590,8 @@
 ;
 ; RV64-LABEL: vpreduce_umin_nxv2i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
@@ -690,9 +682,8 @@
 define signext i16 @vpreduce_umax_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umax_nxv4i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -702,9 +693,8 @@
 ;
 ; RV64-LABEL: vpreduce_umax_nxv4i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -735,9 +725,8 @@
 define signext i16 @vpreduce_umin_nxv4i16(i16 signext %s, <vscale x 4 x i16> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vpreduce_umin_nxv4i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a2, 16
-; RV32-NEXT:    addi a2, a2, -1
-; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV32-NEXT:    vmv.s.x v9, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
@@ -747,9 +736,8 @@
 ;
 ; RV64-LABEL: vpreduce_umin_nxv4i16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    lui a2, 16
-; RV64-NEXT:    addiw a2, a2, -1
-; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
 ; RV64-NEXT:    vmv.s.x v9, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -268,16 +268,14 @@
 define i32 @zext_i16_to_i32(i16 %a) nounwind {
 ; RV32I-LABEL: zext_i16_to_i32:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i16_to_i32:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
   %1 = zext i16 %a to i32
   ret i32 %1
@@ -286,17 +284,15 @@
 define i64 @zext_i16_to_i64(i16 %a) nounwind {
 ; RV32I-LABEL: zext_i16_to_i64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a1, 16
-; RV32I-NEXT:    addi a1, a1, -1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
 ; RV32I-NEXT:    li a1, 0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: zext_i16_to_i64:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a1, 16
-; RV64I-NEXT:    addiw a1, a1, -1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
 ; RV64I-NEXT:    ret
   %1 = zext i16 %a to i64
   ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -17,9 +17,8 @@
 ; RV32-NEXT:    lui a1, 662
 ; RV32-NEXT:    addi a1, a1, -83
 ; RV32-NEXT:    add a0, a0, a1
-; RV32-NEXT:    lui a1, 131072
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    slli a0, a0, 3
+; RV32-NEXT:    srli a0, a0, 3
 ; RV32-NEXT:    lui a1, 1324
 ; RV32-NEXT:    addi a1, a1, -165
 ; RV32-NEXT:    sltu a0, a0, a1
@@ -36,10 +35,9 @@
 ; RV64-NEXT:    call __muldi3@plt
 ; RV64-NEXT:    lui a1, 662
 ; RV64-NEXT:    addiw a1, a1, -83
-; RV64-NEXT:    add a0, a0, a1
-; RV64-NEXT:    lui a1, 131072
-; RV64-NEXT:    addiw a1, a1, -1
-; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    addw a0, a0, a1
+; RV64-NEXT:    slli a0, a0, 35
+; RV64-NEXT:    srli a0, a0, 35
 ; RV64-NEXT:    lui a1, 1324
 ; RV64-NEXT:    addiw a1, a1, -165
 ; RV64-NEXT:    sltu a0, a0, a1
@@ -55,9 +53,8 @@
 ; RV32M-NEXT:    lui a1, 662
 ; RV32M-NEXT:    addi a1, a1, -83
 ; RV32M-NEXT:    add a0, a0, a1
-; RV32M-NEXT:    lui a1, 131072
-; RV32M-NEXT:    addi a1, a1, -1
-; RV32M-NEXT:    and a0, a0, a1
+; RV32M-NEXT:    slli a0, a0, 3
+; RV32M-NEXT:    srli a0, a0, 3
 ; RV32M-NEXT:    lui a1, 1324
 ; RV32M-NEXT:    addi a1, a1, -165
 ; RV32M-NEXT:    sltu a0, a0, a1
@@ -67,13 +64,12 @@
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 128424
 ; RV64M-NEXT:    addiw a1, a1, 331
-; RV64M-NEXT:    mul a0, a0, a1
+; RV64M-NEXT:    mulw a0, a0, a1
 ; RV64M-NEXT:    lui a1, 662
 ; RV64M-NEXT:    addiw a1, a1, -83
-; RV64M-NEXT:    add a0, a0, a1
-; RV64M-NEXT:    lui a1, 131072
-; RV64M-NEXT:    addiw a1, a1, -1
-; RV64M-NEXT:    and a0, a0, a1
+; RV64M-NEXT:    addw a0, a0, a1
+; RV64M-NEXT:    slli a0, a0, 35
+; RV64M-NEXT:    srli a0, a0, 35
 ; RV64M-NEXT:    lui a1, 1324
 ; RV64M-NEXT:    addiw a1, a1, -165
 ; RV64M-NEXT:    sltu a0, a0, a1
@@ -87,9 +83,8 @@
 ; RV32MV-NEXT:    lui a1, 662
 ; RV32MV-NEXT:    addi a1, a1, -83
 ; RV32MV-NEXT:    add a0, a0, a1
-; RV32MV-NEXT:    lui a1, 131072
-; RV32MV-NEXT:    addi a1, a1, -1
-; RV32MV-NEXT:    and a0, a0, a1
+; RV32MV-NEXT:    slli a0, a0, 3
+; RV32MV-NEXT:    srli a0, a0, 3
 ; RV32MV-NEXT:    lui a1, 1324
 ; RV32MV-NEXT:    addi a1, a1, -165
 ; RV32MV-NEXT:    sltu a0, a0, a1
@@ -99,13 +94,12 @@
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 128424
 ; RV64MV-NEXT:    addiw a1, a1, 331
-; RV64MV-NEXT:    mul a0, a0, a1
+; RV64MV-NEXT:    mulw a0, a0, a1
 ; RV64MV-NEXT:    lui a1, 662
 ; RV64MV-NEXT:    addiw a1, a1, -83
-; RV64MV-NEXT:    add a0, a0, a1
-; RV64MV-NEXT:    lui a1, 131072
-; RV64MV-NEXT:    addiw a1, a1, -1
-; RV64MV-NEXT:    and a0, a0, a1
+; RV64MV-NEXT:    addw a0, a0, a1
+; RV64MV-NEXT:    slli a0, a0, 35
+; RV64MV-NEXT:    srli a0, a0, 35
 ; RV64MV-NEXT:    lui a1, 1324
 ; RV64MV-NEXT:    addiw a1, a1, -165
 ; RV64MV-NEXT:    sltu a0, a0, a1
@@ -386,15 +380,13 @@
 ; RV64-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
 ; RV64-NEXT:    lb a0, 12(a0)
 ; RV64-NEXT:    lwu a1, 8(s0)
 ; RV64-NEXT:    slli a0, a0, 32
 ; RV64-NEXT:    or a0, a1, a0
-; RV64-NEXT:    li s4, -1
-; RV64-NEXT:    srli a1, s4, 24
-; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    slli a0, a0, 24
+; RV64-NEXT:    srli a0, a0, 24
 ; RV64-NEXT:    ld a1, 0(s0)
 ; RV64-NEXT:    slli a2, a0, 29
 ; RV64-NEXT:    srai s1, a2, 31
@@ -434,7 +426,8 @@
 ; RV64-NEXT:    srli a3, a3, 61
 ; RV64-NEXT:    sb a3, 12(s0)
 ; RV64-NEXT:    slliw a1, a1, 2
-; RV64-NEXT:    srli a3, s4, 31
+; RV64-NEXT:    li a3, -1
+; RV64-NEXT:    srli a3, a3, 31
 ; RV64-NEXT:    and a2, a2, a3
 ; RV64-NEXT:    srli a4, a2, 31
 ; RV64-NEXT:    subw a1, a4, a1
@@ -448,7 +441,6 @@
 ; RV64-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 48
 ; RV64-NEXT:    ret
 ;
@@ -540,65 +532,65 @@
 ; RV64M-NEXT:    lwu a2, 8(a0)
 ; RV64M-NEXT:    slli a1, a1, 32
 ; RV64M-NEXT:    or a1, a2, a1
-; RV64M-NEXT:    li a6, -1
-; RV64M-NEXT:    srli a3, a6, 24
-; RV64M-NEXT:    and a1, a1, a3
-; RV64M-NEXT:    ld a3, 0(a0)
-; RV64M-NEXT:    slli a4, a1, 29
-; RV64M-NEXT:    srai a4, a4, 31
+; RV64M-NEXT:    slli a1, a1, 24
+; RV64M-NEXT:    srli a1, a1, 24
+; RV64M-NEXT:    ld a2, 0(a0)
+; RV64M-NEXT:    slli a3, a1, 29
+; RV64M-NEXT:    srai a3, a3, 31
 ; RV64M-NEXT:    slli a1, a1, 31
-; RV64M-NEXT:    srli a5, a3, 33
-; RV64M-NEXT:    lui a2, %hi(.LCPI3_0)
-; RV64M-NEXT:    ld a2, %lo(.LCPI3_0)(a2)
-; RV64M-NEXT:    or a1, a5, a1
+; RV64M-NEXT:    srli a4, a2, 33
+; RV64M-NEXT:    lui a5, %hi(.LCPI3_0)
+; RV64M-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
+; RV64M-NEXT:    or a1, a4, a1
 ; RV64M-NEXT:    slli a1, a1, 31
 ; RV64M-NEXT:    srai a1, a1, 31
-; RV64M-NEXT:    mulh a2, a1, a2
-; RV64M-NEXT:    srli a5, a2, 63
-; RV64M-NEXT:    srai a2, a2, 1
-; RV64M-NEXT:    add a2, a2, a5
-; RV64M-NEXT:    slli a5, a2, 3
-; RV64M-NEXT:    sub a2, a2, a5
+; RV64M-NEXT:    mulh a4, a1, a5
+; RV64M-NEXT:    srli a5, a4, 63
+; RV64M-NEXT:    srai a4, a4, 1
+; RV64M-NEXT:    add a4, a4, a5
+; RV64M-NEXT:    slli a5, a4, 3
+; RV64M-NEXT:    sub a4, a4, a5
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_1)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_1)(a5)
-; RV64M-NEXT:    slli a3, a3, 31
-; RV64M-NEXT:    srai a3, a3, 31
-; RV64M-NEXT:    add a1, a1, a2
-; RV64M-NEXT:    mulh a2, a4, a5
-; RV64M-NEXT:    srli a5, a2, 63
-; RV64M-NEXT:    srai a2, a2, 1
-; RV64M-NEXT:    add a2, a2, a5
-; RV64M-NEXT:    slli a5, a2, 2
-; RV64M-NEXT:    add a2, a5, a2
-; RV64M-NEXT:    add a2, a4, a2
-; RV64M-NEXT:    addi a2, a2, -2
-; RV64M-NEXT:    snez a2, a2
+; RV64M-NEXT:    slli a2, a2, 31
+; RV64M-NEXT:    srai a2, a2, 31
+; RV64M-NEXT:    add a1, a1, a4
+; RV64M-NEXT:    mulh a4, a3, a5
+; RV64M-NEXT:    srli a5, a4, 63
+; RV64M-NEXT:    srai a4, a4, 1
+; RV64M-NEXT:    add a4, a4, a5
+; RV64M-NEXT:    slli a5, a4, 2
+; RV64M-NEXT:    add a4, a5, a4
+; RV64M-NEXT:    add a3, a3, a4
+; RV64M-NEXT:    addi a3, a3, -2
+; RV64M-NEXT:    snez a3, a3
 ; RV64M-NEXT:    lui a4, %hi(.LCPI3_2)
 ; RV64M-NEXT:    ld a4, %lo(.LCPI3_2)(a4)
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_3)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_3)(a5)
 ; RV64M-NEXT:    addi a1, a1, -1
 ; RV64M-NEXT:    snez a1, a1
-; RV64M-NEXT:    mul a3, a3, a4
-; RV64M-NEXT:    add a3, a3, a5
-; RV64M-NEXT:    slli a4, a3, 63
-; RV64M-NEXT:    srli a3, a3, 1
-; RV64M-NEXT:    or a3, a3, a4
-; RV64M-NEXT:    sltu a3, a5, a3
+; RV64M-NEXT:    mul a2, a2, a4
+; RV64M-NEXT:    add a2, a2, a5
+; RV64M-NEXT:    slli a4, a2, 63
+; RV64M-NEXT:    srli a2, a2, 1
+; RV64M-NEXT:    or a2, a2, a4
+; RV64M-NEXT:    sltu a2, a5, a2
 ; RV64M-NEXT:    neg a1, a1
-; RV64M-NEXT:    neg a4, a2
-; RV64M-NEXT:    neg a3, a3
+; RV64M-NEXT:    neg a4, a3
+; RV64M-NEXT:    neg a2, a2
 ; RV64M-NEXT:    slli a4, a4, 29
 ; RV64M-NEXT:    srli a4, a4, 61
 ; RV64M-NEXT:    sb a4, 12(a0)
-; RV64M-NEXT:    slliw a2, a2, 2
-; RV64M-NEXT:    srli a4, a6, 31
+; RV64M-NEXT:    slliw a3, a3, 2
+; RV64M-NEXT:    li a4, -1
+; RV64M-NEXT:    srli a4, a4, 31
 ; RV64M-NEXT:    and a1, a1, a4
 ; RV64M-NEXT:    srli a5, a1, 31
-; RV64M-NEXT:    subw a2, a5, a2
-; RV64M-NEXT:    sw a2, 8(a0)
+; RV64M-NEXT:    subw a3, a5, a3
+; RV64M-NEXT:    sw a3, 8(a0)
 ; RV64M-NEXT:    slli a1, a1, 33
-; RV64M-NEXT:    and a2, a3, a4
+; RV64M-NEXT:    and a2, a2, a4
 ; RV64M-NEXT:    or a1, a2, a1
 ; RV64M-NEXT:    sd a1, 0(a0)
 ; RV64M-NEXT:    ret
@@ -718,47 +710,46 @@
 ; RV64MV-NEXT:    lwu a2, 8(a0)
 ; RV64MV-NEXT:    slli a1, a1, 32
 ; RV64MV-NEXT:    or a1, a2, a1
-; RV64MV-NEXT:    li a6, -1
-; RV64MV-NEXT:    ld a3, 0(a0)
-; RV64MV-NEXT:    srli a4, a6, 24
-; RV64MV-NEXT:    and a1, a1, a4
-; RV64MV-NEXT:    slli a4, a1, 31
-; RV64MV-NEXT:    srli a5, a3, 33
-; RV64MV-NEXT:    or a4, a5, a4
-; RV64MV-NEXT:    slli a4, a4, 31
-; RV64MV-NEXT:    srai a4, a4, 31
-; RV64MV-NEXT:    lui a5, %hi(.LCPI3_0)
-; RV64MV-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
-; RV64MV-NEXT:    slli a1, a1, 29
+; RV64MV-NEXT:    ld a2, 0(a0)
+; RV64MV-NEXT:    slli a1, a1, 24
+; RV64MV-NEXT:    srli a1, a1, 24
+; RV64MV-NEXT:    slli a3, a1, 31
+; RV64MV-NEXT:    srli a4, a2, 33
+; RV64MV-NEXT:    or a3, a4, a3
 ; RV64MV-NEXT:    slli a3, a3, 31
 ; RV64MV-NEXT:    srai a3, a3, 31
-; RV64MV-NEXT:    mulh a5, a3, a5
-; RV64MV-NEXT:    srli a2, a5, 63
-; RV64MV-NEXT:    add a2, a5, a2
+; RV64MV-NEXT:    lui a4, %hi(.LCPI3_0)
+; RV64MV-NEXT:    ld a4, %lo(.LCPI3_0)(a4)
+; RV64MV-NEXT:    slli a1, a1, 29
+; RV64MV-NEXT:    slli a2, a2, 31
+; RV64MV-NEXT:    srai a2, a2, 31
+; RV64MV-NEXT:    mulh a4, a2, a4
+; RV64MV-NEXT:    srli a5, a4, 63
+; RV64MV-NEXT:    add a4, a4, a5
 ; RV64MV-NEXT:    li a5, 6
-; RV64MV-NEXT:    mul a2, a2, a5
+; RV64MV-NEXT:    mul a4, a4, a5
 ; RV64MV-NEXT:    lui a5, %hi(.LCPI3_1)
 ; RV64MV-NEXT:    ld a5, %lo(.LCPI3_1)(a5)
 ; RV64MV-NEXT:    srai a1, a1, 31
-; RV64MV-NEXT:    sub a2, a3, a2
+; RV64MV-NEXT:    sub a2, a2, a4
 ; RV64MV-NEXT:    sd a2, 32(sp)
 ; RV64MV-NEXT:    mulh a2, a1, a5
-; RV64MV-NEXT:    srli a3, a2, 63
+; RV64MV-NEXT:    srli a4, a2, 63
 ; RV64MV-NEXT:    srai a2, a2, 1
-; RV64MV-NEXT:    add a2, a2, a3
-; RV64MV-NEXT:    slli a3, a2, 2
+; RV64MV-NEXT:    add a2, a2, a4
+; RV64MV-NEXT:    slli a4, a2, 2
 ; RV64MV-NEXT:    lui a5, %hi(.LCPI3_2)
 ; RV64MV-NEXT:    ld a5, %lo(.LCPI3_2)(a5)
-; RV64MV-NEXT:    add a2, a3, a2
+; RV64MV-NEXT:    add a2, a4, a2
 ; RV64MV-NEXT:    add a1, a1, a2
 ; RV64MV-NEXT:    sd a1, 48(sp)
-; RV64MV-NEXT:    mulh a1, a4, a5
+; RV64MV-NEXT:    mulh a1, a3, a5
 ; RV64MV-NEXT:    srli a2, a1, 63
 ; RV64MV-NEXT:    srai a1, a1, 1
 ; RV64MV-NEXT:    add a1, a1, a2
 ; RV64MV-NEXT:    slli a2, a1, 3
 ; RV64MV-NEXT:    sub a1, a1, a2
-; RV64MV-NEXT:    add a1, a4, a1
+; RV64MV-NEXT:    add a1, a3, a1
 ; RV64MV-NEXT:    sd a1, 40(sp)
 ; RV64MV-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64MV-NEXT:    addi a1, sp, 32
@@ -766,7 +757,8 @@
 ; RV64MV-NEXT:    lui a1, %hi(.LCPI3_3)
 ; RV64MV-NEXT:    addi a1, a1, %lo(.LCPI3_3)
 ; RV64MV-NEXT:    vle64.v v10, (a1)
-; RV64MV-NEXT:    srli a1, a6, 31
+; RV64MV-NEXT:    li a1, -1
+; RV64MV-NEXT:    srli a1, a1, 31
 ; RV64MV-NEXT:    vand.vx v8, v8, a1
 ; RV64MV-NEXT:    vmsne.vv v0, v8, v10
 ; RV64MV-NEXT:    vmv.v.i v8, 0
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -14,9 +14,8 @@
 ; RV32-NEXT:    lui a1, 1
 ; RV32-NEXT:    addi a1, a1, -819
 ; RV32-NEXT:    call __mulsi3@plt
-; RV32-NEXT:    lui a1, 2
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    slli a0, a0, 19
+; RV32-NEXT:    srli a0, a0, 19
 ; RV32-NEXT:    sltiu a0, a0, 1639
 ; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    addi sp, sp, 16
@@ -29,9 +28,8 @@
 ; RV64-NEXT:    lui a1, 1
 ; RV64-NEXT:    addiw a1, a1, -819
 ; RV64-NEXT:    call __muldi3@plt
-; RV64-NEXT:    lui a1, 2
-; RV64-NEXT:    addiw a1, a1, -1
-; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    slli a0, a0, 51
+; RV64-NEXT:    srli a0, a0, 51
 ; RV64-NEXT:    sltiu a0, a0, 1639
 ; RV64-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    addi sp, sp, 16
@@ -42,9 +40,8 @@
 ; RV32M-NEXT:    lui a1, 1
 ; RV32M-NEXT:    addi a1, a1, -819
 ; RV32M-NEXT:    mul a0, a0, a1
-; RV32M-NEXT:    lui a1, 2
-; RV32M-NEXT:    addi a1, a1, -1
-; RV32M-NEXT:    and a0, a0, a1
+; RV32M-NEXT:    slli a0, a0, 19
+; RV32M-NEXT:    srli a0, a0, 19
 ; RV32M-NEXT:    sltiu a0, a0, 1639
 ; RV32M-NEXT:    ret
 ;
@@ -52,10 +49,9 @@
 ; RV64M:       # %bb.0:
 ; RV64M-NEXT:    lui a1, 1
 ; RV64M-NEXT:    addiw a1, a1, -819
-; RV64M-NEXT:    mul a0, a0, a1
-; RV64M-NEXT:    lui a1, 2
-; RV64M-NEXT:    addiw a1, a1, -1
-; RV64M-NEXT:    and a0, a0, a1
+; RV64M-NEXT:    mulw a0, a0, a1
+; RV64M-NEXT:    slli a0, a0, 51
+; RV64M-NEXT:    srli a0, a0, 51
 ; RV64M-NEXT:    sltiu a0, a0, 1639
 ; RV64M-NEXT:    ret
 ;
@@ -64,9 +60,8 @@
 ; RV32MV-NEXT:    lui a1, 1
 ; RV32MV-NEXT:    addi a1, a1, -819
 ; RV32MV-NEXT:    mul a0, a0, a1
-; RV32MV-NEXT:    lui a1, 2
-; RV32MV-NEXT:    addi a1, a1, -1
-; RV32MV-NEXT:    and a0, a0, a1
+; RV32MV-NEXT:    slli a0, a0, 19
+; RV32MV-NEXT:    srli a0, a0, 19
 ; RV32MV-NEXT:    sltiu a0, a0, 1639
 ; RV32MV-NEXT:    ret
 ;
@@ -74,10 +69,9 @@
 ; RV64MV:       # %bb.0:
 ; RV64MV-NEXT:    lui a1, 1
 ; RV64MV-NEXT:    addiw a1, a1, -819
-; RV64MV-NEXT:    mul a0, a0, a1
-; RV64MV-NEXT:    lui a1, 2
-; RV64MV-NEXT:    addiw a1, a1, -1
-; RV64MV-NEXT:    and a0, a0, a1
+; RV64MV-NEXT:    mulw a0, a0, a1
+; RV64MV-NEXT:    slli a0, a0, 51
+; RV64MV-NEXT:    srli a0, a0, 51
 ; RV64MV-NEXT:    sltiu a0, a0, 1639
 ; RV64MV-NEXT:    ret
   %urem = urem i13 %X, 5
@@ -97,9 +91,8 @@
 ; RV32-NEXT:    slli a0, a0, 5
 ; RV32-NEXT:    srli a0, a0, 6
 ; RV32-NEXT:    or a0, a0, a1
-; RV32-NEXT:    lui a1, 32768
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    slli a0, a0, 5
+; RV32-NEXT:    srli a0, a0, 5
 ; RV32-NEXT:    lui a1, 2341
 ; RV32-NEXT:    addi a1, a1, -1755
 ; RV32-NEXT:    sltu a0, a0, a1
@@ -118,9 +111,8 @@
 ; RV64-NEXT:    slli a0, a0, 37
 ; RV64-NEXT:    srli a0, a0, 38
 ; RV64-NEXT:    or a0, a0, a1
-; RV64-NEXT:    lui a1, 32768
-; RV64-NEXT:    addiw a1, a1, -1
-; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    slli a0, a0, 37
+; RV64-NEXT:    srli a0, a0, 37
 ; RV64-NEXT:    lui a1, 2341
 ; RV64-NEXT:    addiw a1, a1, -1755
 ; RV64-NEXT:    sltu a0, a0, a1
@@ -137,9 +129,8 @@
 ; RV32M-NEXT:    slli a0, a0, 5
 ; RV32M-NEXT:    srli a0, a0, 6
 ; RV32M-NEXT:    or a0, a0, a1
-; RV32M-NEXT:    lui a1, 32768
-; RV32M-NEXT:    addi a1, a1, -1
-; RV32M-NEXT:    and a0, a0, a1
+; RV32M-NEXT:    slli a0, a0, 5
+; RV32M-NEXT:    srli a0, a0, 5
 ; RV32M-NEXT:    lui a1, 2341
 ; RV32M-NEXT:    addi a1, a1, -1755
 ; RV32M-NEXT:    sltu a0, a0, a1
@@ -154,9 +145,8 @@
 ; RV64M-NEXT:    slli a0, a0, 37
 ; RV64M-NEXT:    srli a0, a0, 38
 ; RV64M-NEXT:    or a0, a0, a1
-; RV64M-NEXT:    lui a1, 32768
-; RV64M-NEXT:    addiw a1, a1, -1
-; RV64M-NEXT:    and a0, a0, a1
+; RV64M-NEXT:    slli a0, a0, 37
+; RV64M-NEXT:    srli a0, a0, 37
 ; RV64M-NEXT:    lui a1, 2341
 ; RV64M-NEXT:    addiw a1, a1, -1755
 ; RV64M-NEXT:    sltu a0, a0, a1
@@ -171,9 +161,8 @@
 ; RV32MV-NEXT:    slli a0, a0, 5
 ; RV32MV-NEXT:    srli a0, a0, 6
 ; RV32MV-NEXT:    or a0, a0, a1
-; RV32MV-NEXT:    lui a1, 32768
-; RV32MV-NEXT:    addi a1, a1, -1
-; RV32MV-NEXT:    and a0, a0, a1
+; RV32MV-NEXT:    slli a0, a0, 5
+; RV32MV-NEXT:    srli a0, a0, 5
 ; RV32MV-NEXT:    lui a1, 2341
 ; RV32MV-NEXT:    addi a1, a1, -1755
 ; RV32MV-NEXT:    sltu a0, a0, a1
@@ -188,9 +177,8 @@
 ; RV64MV-NEXT:    slli a0, a0, 37
 ; RV64MV-NEXT:    srli a0, a0, 38
 ; RV64MV-NEXT:    or a0, a0, a1
-; RV64MV-NEXT:    lui a1, 32768
-; RV64MV-NEXT:    addiw a1, a1, -1
-; RV64MV-NEXT:    and a0, a0, a1
+; RV64MV-NEXT:    slli a0, a0, 37
+; RV64MV-NEXT:    srli a0, a0, 37
 ; RV64MV-NEXT:    lui a1, 2341
 ; RV64MV-NEXT:    addiw a1, a1, -1755
 ; RV64MV-NEXT:    sltu a0, a0, a1