diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -106,3 +106,23 @@
 def FSTLE_S : FP_STORE_3R<0b00111000011101110, "fstle.s", FPR32>;
 
 } // Predicates = [HasBasicF]
+
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+/// Generic pattern classes
+
+class PatFprFpr<SDPatternOperator OpNode, LAInst Inst, RegisterClass RegTy>
+    : Pat<(OpNode RegTy:$fj, RegTy:$fk), (Inst $fj, $fk)>;
+
+let Predicates = [HasBasicF] in {
+
+/// Float arithmetic operations
+
+def : PatFprFpr<fadd, FADD_S, FPR32>;
+def : PatFprFpr<fsub, FSUB_S, FPR32>;
+def : PatFprFpr<fmul, FMUL_S, FPR32>;
+def : PatFprFpr<fdiv, FDIV_S, FPR32>;
+
+} // Predicates = [HasBasicF]
diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -131,3 +131,17 @@
 def MOVFR2GR_D  : FP_MOV<0b0000000100010100101110, "movfr2gr.d", GPR, FPR64>;
 } // Predicates = [HasBasicD, IsLA64]
 
+//===----------------------------------------------------------------------===//
+// Pseudo-instructions and codegen patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasBasicD] in {
+
+/// Float arithmetic operations
+
+def : PatFprFpr<fadd, FADD_D, FPR64>;
+def : PatFprFpr<fsub, FSUB_D, FPR64>;
+def : PatFprFpr<fmul, FMUL_D, FPR64>;
+def : PatFprFpr<fdiv, FDIV_D, FPR64>;
+
+} // Predicates = [HasBasicD]
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -31,6 +31,10 @@
   MVT GRLenVT = Subtarget.getGRLenVT();
   // Set up the register classes.
   addRegisterClass(GRLenVT, &LoongArch::GPRRegClass);
+  if (Subtarget.hasBasicF())
+    addRegisterClass(MVT::f32, &LoongArch::FPR32RegClass);
+  if (Subtarget.hasBasicD())
+    addRegisterClass(MVT::f64, &LoongArch::FPR64RegClass);
 
   // TODO: add necessary setOperationAction calls later.
 
@@ -39,6 +43,8 @@
 
   setStackPointerRegisterToSaveRestore(LoongArch::R3);
 
+  setBooleanContents(ZeroOrOneBooleanContent);
+
   // Function alignments.
   const Align FunctionAlignment(4);
   setMinFunctionAlignment(FunctionAlignment);
@@ -64,16 +70,29 @@
 //                     Calling Convention Implementation
 //===----------------------------------------------------------------------===//
 // FIXME: Now, we only support CallingConv::C with fixed arguments which are
-// passed with integer registers.
+// passed with integer or floating-point registers.
 static const MCPhysReg ArgGPRs[] = {
     LoongArch::R4, LoongArch::R5, LoongArch::R6,  LoongArch::R7,
     LoongArch::R8, LoongArch::R9, LoongArch::R10, LoongArch::R11};
+static const MCPhysReg ArgFPR32s[] = {
+    LoongArch::F0, LoongArch::F1, LoongArch::F2, LoongArch::F3,
+    LoongArch::F4, LoongArch::F5, LoongArch::F6, LoongArch::F7};
+static const MCPhysReg ArgFPR64s[] = {
+    LoongArch::F0_64, LoongArch::F1_64, LoongArch::F2_64, LoongArch::F3_64,
+    LoongArch::F4_64, LoongArch::F5_64, LoongArch::F6_64, LoongArch::F7_64};
 
 // Implements the LoongArch calling convention. Returns true upon failure.
 static bool CC_LoongArch(unsigned ValNo, MVT ValVT,
                          CCValAssign::LocInfo LocInfo, CCState &State) {
   // Allocate to a register if possible.
-  Register Reg = State.AllocateReg(ArgGPRs);
+  Register Reg;
+
+  if (ValVT == MVT::f32)
+    Reg = State.AllocateReg(ArgFPR32s);
+  else if (ValVT == MVT::f64)
+    Reg = State.AllocateReg(ArgFPR64s);
+  else
+    Reg = State.AllocateReg(ArgGPRs);
   if (Reg) {
     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, ValVT, LocInfo));
     return false;
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -509,35 +509,43 @@
     : Pat<(sext_inreg (OpNode GPR:$rj, ImmOpnd:$imm), i32),
           (Inst GPR:$rj, ImmOpnd:$imm)>;
 
-def : PatGprGpr<and, AND>;
-def : PatGprGpr<or, OR>;
-def : PatGprGpr<xor, XOR>;
-
-/// Branches and jumps
-
-let isBarrier = 1, isReturn = 1, isTerminator = 1 in
-def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
-                PseudoInstExpansion<(JIRL R0, R1, 0)>;
-
-/// LA32 patterns
+/// Simple arithmetic operations
 
 let Predicates = [IsLA32] in {
 def : PatGprGpr<add, ADD_W>;
 def : PatGprImm<add, ADDI_W, simm12>;
+def : PatGprGpr<sub, SUB_W>;
 } // Predicates = [IsLA32]
 
-/// LA64 patterns
-
 let Predicates = [IsLA64] in {
-def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
-
 def : PatGprGpr<add, ADD_D>;
 def : PatGprGpr_32<add, ADD_W>;
-
 def : PatGprImm<add, ADDI_D, simm12>;
 def : PatGprImm_32<add, ADDI_W, simm12>;
+def : PatGprGpr<sub, SUB_D>;
+def : PatGprGpr_32<sub, SUB_W>;
 } // Predicates = [IsLA64]
 
+def : PatGprGpr<and, AND>;
+def : PatGprGpr<or, OR>;
+def : PatGprGpr<xor, XOR>;
+
+/// sext and zext
+
+let Predicates = [IsLA64] in {
+def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
+} // Predicates = [IsLA64]
+
+/// Setcc
+
+def : PatGprGpr<setult, SLTU>;
+
+/// Branches and jumps
+
+let isBarrier = 1, isReturn = 1, isTerminator = 1 in
+def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>,
+                PseudoInstExpansion<(JIRL R0, R1, 0)>;
+
 //===----------------------------------------------------------------------===//
 // Assembler Pseudo Instructions
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
@@ -1,17 +1,183 @@
-; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=CHECK32
-; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=CHECK64
-
-define i32 @addRR(i32 %x, i32 %y) {
-; CHECK32-LABEL: addRR:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    add.w $a0, $a1, $a0
-; CHECK32-NEXT:    jirl $zero, $ra, 0
-;
-; CHECK64-LABEL: addRR:
-; CHECK64:       # %bb.0: # %entry
-; CHECK64-NEXT:    add.d $a0, $a1, $a0
-; CHECK64-NEXT:    jirl $zero, $ra, 0
-entry:
-  %add = add nsw i32 %y, %x
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'add' LLVM IR: https://llvm.org/docs/LangRef.html#add-instruction
+
+define i1 @add_i1(i1 %x, i1 %y) {
+; LA32-LABEL: add_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i1 %x, %y
+  ret i1 %add
+}
+
+define i8 @add_i8(i8 %x, i8 %y) {
+; LA32-LABEL: add_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i8 %x, %y
+  ret i8 %add
+}
+
+define i16 @add_i16(i16 %x, i16 %y) {
+; LA32-LABEL: add_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i16 %x, %y
+  ret i16 %add
+}
+
+define i32 @add_i32(i32 %x, i32 %y) {
+; LA32-LABEL: add_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, %y
+  ret i32 %add
+}
+
+; Match the pattern:
+; def : PatGprGpr_32<add, ADD_W>;
+define signext i32 @add_i32_sext(i32 %x, i32 %y) {
+; LA32-LABEL: add_i32_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, %y
   ret i32 %add
 }
+
+define i64 @add_i64(i64 %x, i64 %y) {
+; LA32-LABEL: add_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    add.w $a1, $a1, $a3
+; LA32-NEXT:    add.w $a2, $a0, $a2
+; LA32-NEXT:    sltu $a0, $a2, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a0
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    add.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i64 %x, %y
+  ret i64 %add
+}
+
+define i1 @add_i1_3(i1 %x) {
+; LA32-LABEL: add_i1_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i1_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i1 %x, 3
+  ret i1 %add
+}
+
+define i8 @add_i8_3(i8 %x) {
+; LA32-LABEL: add_i8_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i8_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i8 %x, 3
+  ret i8 %add
+}
+
+define i16 @add_i16_3(i16 %x) {
+; LA32-LABEL: add_i16_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i16_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i16 %x, 3
+  ret i16 %add
+}
+
+define i32 @add_i32_3(i32 %x) {
+; LA32-LABEL: add_i32_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, 3
+  ret i32 %add
+}
+
+; Match the pattern:
+; def : PatGprImm_32<add, ADDI_W, simm12>;
+define signext i32 @add_i32_3_sext(i32 %x) {
+; LA32-LABEL: add_i32_3_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i32_3_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.w $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i32 %x, 3
+  ret i32 %add
+}
+
+define i64 @add_i64_3(i64 %x) {
+; LA32-LABEL: add_i64_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a2, $a0, 3
+; LA32-NEXT:    sltu $a0, $a2, $a0
+; LA32-NEXT:    add.w $a1, $a1, $a0
+; LA32-NEXT:    move $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: add_i64_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = add i64 %x, 3
+  ret i64 %add
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fadd.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'fadd' LLVM IR: https://llvm.org/docs/LangRef.html#fadd-instruction
+
+define float @fadd_s(float %x, float %y) {
+; LA32-LABEL: fadd_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fadd_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fadd.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = fadd float %x, %y
+  ret float %add
+}
+
+define double @fadd_d(double %x, double %y) {
+; LA32-LABEL: fadd_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fadd.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fadd_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fadd.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %add = fadd double %x, %y
+  ret double %add
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fdiv.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'fdiv' LLVM IR: https://llvm.org/docs/LangRef.html#fdiv-instruction
+
+define float @fdiv_s(float %x, float %y) {
+; LA32-LABEL: fdiv_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fdiv.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fdiv_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fdiv.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %div = fdiv float %x, %y
+  ret float %div
+}
+
+define double @fdiv_d(double %x, double %y) {
+; LA32-LABEL: fdiv_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fdiv.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fdiv_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fdiv.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %div = fdiv double %x, %y
+  ret double %div
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fmul.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'fmul' LLVM IR: https://llvm.org/docs/LangRef.html#fmul-instruction
+
+define float @fmul_s(float %x, float %y) {
+; LA32-LABEL: fmul_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmul.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fmul_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmul.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %mul = fmul float %x, %y
+  ret float %mul
+}
+
+define double @fmul_d(double %x, double %y) {
+; LA32-LABEL: fmul_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmul.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fmul_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmul.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %mul = fmul double %x, %y
+  ret double %mul
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fsub.ll
@@ -0,0 +1,32 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'fsub' LLVM IR: https://llvm.org/docs/LangRef.html#fsub-instruction
+
+define float @fsub_s(float %x, float %y) {
+; LA32-LABEL: fsub_s:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fsub.s $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fsub_s:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fsub.s $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = fsub float %x, %y
+  ret float %sub
+}
+
+define double @fsub_d(double %x, double %y) {
+; LA32-LABEL: fsub_d:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fsub_d:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = fsub double %x, %y
+  ret double %sub
+}
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/sub.ll
@@ -0,0 +1,93 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+; Exercise the 'sub' LLVM IR: https://llvm.org/docs/LangRef.html#sub-instruction
+
+define i1 @sub_i1(i1 %x, i1 %y) {
+; LA32-LABEL: sub_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i1 %x, %y
+  ret i1 %sub
+}
+
+define i8 @sub_i8(i8 %x, i8 %y) {
+; LA32-LABEL: sub_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i8 %x, %y
+  ret i8 %sub
+}
+
+define i16 @sub_i16(i16 %x, i16 %y) {
+; LA32-LABEL: sub_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i16 %x, %y
+  ret i16 %sub
+}
+
+define i32 @sub_i32(i32 %x, i32 %y) {
+; LA32-LABEL: sub_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i32 %x, %y
+  ret i32 %sub
+}
+
+; Match the pattern:
+; def : PatGprGpr_32<sub, SUB_W>;
+define signext i32 @sub_i32_sext(i32 %x, i32 %y) {
+; LA32-LABEL: sub_i32_sext:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i32_sext:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i32 %x, %y
+  ret i32 %sub
+}
+
+define i64 @sub_i64(i64 %x, i64 %y) {
+; LA32-LABEL: sub_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sub.w $a1, $a1, $a3
+; LA32-NEXT:    sltu $a3, $a0, $a2
+; LA32-NEXT:    sub.w $a1, $a1, $a3
+; LA32-NEXT:    sub.w $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sub_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sub.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %sub = sub i64 %x, %y
+  ret i64 %sub
+}