diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -196,6 +196,10 @@
 
   // Function alignments.
   setMinFunctionAlignment(Align(4));
+  // Set preferred alignments.
+  setPrefFunctionAlignment(STI.getPrefFunctionAlignment());
+  setPrefLoopAlignment(STI.getPrefLoopAlignment());
+  setMaxBytesForAlignment(STI.getMaxBytesForLoopAlignment());
 
   setTargetDAGCombine(ISD::AND);
   setTargetDAGCombine(ISD::OR);
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
@@ -50,6 +50,9 @@
   LoongArchRegisterInfo RegInfo;
   LoongArchTargetLowering TLInfo;
   SelectionDAGTargetInfo TSInfo;
+  Align PrefFunctionAlignment;
+  Align PrefLoopAlignment;
+  unsigned MaxBytesForLoopAlignment = 0;
 
   /// Initializes using the passed in CPU and feature strings so that we can
   /// use initializer lists for subtarget initialization.
@@ -59,6 +62,9 @@
                                                       StringRef FS,
                                                       StringRef ABIName);
 
+  /// Initialize properties based on the selected processor family.
+  void initializeProperties();
+
 public:
   // Initializes the data members to match that of the specified triple.
   LoongArchSubtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU,
@@ -94,6 +100,11 @@
   MVT getGRLenVT() const { return GRLenVT; }
   unsigned getGRLen() const { return GRLen; }
   LoongArchABI::ABI getTargetABI() const { return TargetABI; }
+  Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
+  Align getPrefLoopAlignment() const { return PrefLoopAlignment; }
+  unsigned getMaxBytesForLoopAlignment() const {
+    return MaxBytesForLoopAlignment;
+  }
 };
 } // end namespace llvm
 
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp b/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
@@ -35,6 +35,7 @@
     TuneCPU = CPU;
 
   ParseSubtargetFeatures(CPU, TuneCPU, FS);
+  initializeProperties();
   if (Is64Bit) {
     GRLenVT = MVT::i64;
     GRLen = 64;
@@ -54,6 +55,21 @@
   return *this;
 }
 
+void LoongArchSubtarget::initializeProperties() {
+  // Initialize CPU specific properties. We should add a tablegen feature for
+  // this in the future so we can specify it together with the subtarget
+  // features.
+  //
+  // Practically LA464 is the only micro-architecture we currently support,
+  // so set the appropriate values directly for now.
+
+  // LA464 instruction fetch and decode are 4-wide, and instructions are all
+  // 4 bytes long.
+  PrefFunctionAlignment = Align(16);
+  PrefLoopAlignment = Align(16);
+  MaxBytesForLoopAlignment = 16;
+}
+
 LoongArchSubtarget::LoongArchSubtarget(const Triple &TT, StringRef CPU,
                                        StringRef TuneCPU, StringRef FS,
                                        StringRef ABIName,
diff --git a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
--- a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
@@ -13,6 +13,7 @@
 ; LA64-NEXT:    andi $a0, $a0, 24
 ; LA64-NEXT:    nor $a4, $a4, $zero
 ; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB0_3 Depth 2
@@ -68,6 +69,7 @@
 ; LA64-NEXT:    andi $a0, $a0, 24
 ; LA64-NEXT:    nor $a4, $a4, $zero
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB1_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB1_3 Depth 2
@@ -115,6 +117,7 @@
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a3, $a0, 0
 ; LA64-NEXT:    bstrpick.d $a2, $a1, 31, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB2_3 Depth 2
@@ -156,6 +159,7 @@
 ; LA64-LABEL: atomicrmw_uinc_wrap_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.d $a2, $a0, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB3_3 Depth 2
@@ -203,6 +207,7 @@
 ; LA64-NEXT:    andi $a0, $a0, 24
 ; LA64-NEXT:    nor $a4, $a4, $zero
 ; LA64-NEXT:    andi $a5, $a1, 255
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB4_3 Depth 2
@@ -261,6 +266,7 @@
 ; LA64-NEXT:    andi $a0, $a0, 24
 ; LA64-NEXT:    nor $a4, $a4, $zero
 ; LA64-NEXT:    bstrpick.d $a5, $a1, 15, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB5_3 Depth 2
@@ -311,6 +317,7 @@
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.w $a4, $a0, 0
 ; LA64-NEXT:    bstrpick.d $a3, $a1, 31, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB6_3 Depth 2
@@ -355,6 +362,7 @@
 ; LA64-LABEL: atomicrmw_udec_wrap_i64:
 ; LA64:       # %bb.0:
 ; LA64-NEXT:    ld.d $a2, $a0, 0
+; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB7_3 Depth 2
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -10,6 +10,7 @@
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB0_3 Depth 2
@@ -45,6 +46,7 @@
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB0_3 Depth 2
@@ -85,6 +87,7 @@
 ; LA64F-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI1_0)
 ; LA64F-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI1_0)
 ; LA64F-NEXT:    fld.s $fa1, $a1, 0
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB1_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB1_3 Depth 2
@@ -120,6 +123,7 @@
 ; LA64D-NEXT:    pcalau12i $a1, %pc_hi20(.LCPI1_0)
 ; LA64D-NEXT:    addi.d $a1, $a1, %pc_lo12(.LCPI1_0)
 ; LA64D-NEXT:    fld.s $fa1, $a1, 0
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB1_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB1_3 Depth 2
@@ -158,6 +162,7 @@
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB2_3 Depth 2
@@ -194,6 +199,7 @@
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB2_3 Depth 2
@@ -234,6 +240,7 @@
 ; LA64F-NEXT:    fld.s $fa0, $a0, 0
 ; LA64F-NEXT:    addi.w $a1, $zero, 1
 ; LA64F-NEXT:    movgr2fr.w $fa1, $a1
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Loop Header: Depth=1
 ; LA64F-NEXT:    # Child Loop BB3_3 Depth 2
@@ -270,6 +277,7 @@
 ; LA64D-NEXT:    fld.s $fa0, $a0, 0
 ; LA64D-NEXT:    addi.w $a1, $zero, 1
 ; LA64D-NEXT:    movgr2fr.w $fa1, $a1
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB3_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Loop Header: Depth=1
 ; LA64D-NEXT:    # Child Loop BB3_3 Depth 2
@@ -323,6 +331,7 @@
 ; LA64F-NEXT:    addi.d $s1, $sp, 8
 ; LA64F-NEXT:    addi.d $s2, $sp, 0
 ; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64F-NEXT:    st.d $a0, $sp, 8
@@ -371,6 +380,7 @@
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
 ; LA64D-NEXT:    ori $s3, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
@@ -419,6 +429,7 @@
 ; LA64F-NEXT:    addi.d $s1, $sp, 8
 ; LA64F-NEXT:    addi.d $s2, $sp, 0
 ; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64F-NEXT:    st.d $a0, $sp, 8
@@ -468,6 +479,7 @@
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
 ; LA64D-NEXT:    ori $s3, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
@@ -515,6 +527,7 @@
 ; LA64F-NEXT:    addi.d $s1, $sp, 8
 ; LA64F-NEXT:    addi.d $s2, $sp, 0
 ; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64F-NEXT:    st.d $a0, $sp, 8
@@ -563,6 +576,7 @@
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
 ; LA64D-NEXT:    ori $s3, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
@@ -612,6 +626,7 @@
 ; LA64F-NEXT:    addi.d $s1, $sp, 8
 ; LA64F-NEXT:    addi.d $s2, $sp, 0
 ; LA64F-NEXT:    ori $s3, $zero, 2
+; LA64F-NEXT:    .p2align 4, , 16
 ; LA64F-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64F-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64F-NEXT:    st.d $a0, $sp, 8
@@ -660,6 +675,7 @@
 ; LA64D-NEXT:    addi.d $s1, $sp, 16
 ; LA64D-NEXT:    addi.d $s2, $sp, 8
 ; LA64D-NEXT:    ori $s3, $zero, 2
+; LA64D-NEXT:    .p2align 4, , 16
 ; LA64D-NEXT:  .LBB7_1: # %atomicrmw.start
 ; LA64D-NEXT:    # =>This Inner Loop Header: Depth=1
 ; LA64D-NEXT:    fst.d $fa0, $sp, 16
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll
@@ -5,6 +5,7 @@
 define void @foo() noreturn nounwind {
 ; ALL-LABEL: foo:
 ; ALL:       # %bb.0: # %entry
+; ALL-NEXT:    .p2align 4, , 16
 ; ALL-NEXT:  .LBB0_1: # %loop
 ; ALL-NEXT:    # =>This Inner Loop Header: Depth=1
 ; ALL-NEXT:    b .LBB0_1