diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1625,7 +1625,7 @@ } defm : LdPat; -defm : LdPat; +defm : LdPat; // Prefer unsigned due to no c.lb in Zcb. defm : LdPat; defm : LdPat; defm : LdPat, Requires<[IsRV32]>; diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -2024,7 +2024,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB35_4 ; RV32I-NEXT: .LBB35_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2095,7 +2095,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB35_4 ; RV64I-NEXT: .LBB35_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2170,7 +2170,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB36_4 ; RV32I-NEXT: .LBB36_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2241,7 +2241,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB36_4 ; RV64I-NEXT: .LBB36_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2316,7 +2316,7 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB37_4 ; RV32I-NEXT: .LBB37_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2387,7 +2387,7 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB37_4 ; RV64I-NEXT: .LBB37_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2462,7 +2462,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB38_4 ; RV32I-NEXT: .LBB38_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2533,7 +2533,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB38_4 ; RV64I-NEXT: .LBB38_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2608,7 +2608,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB39_4 ; RV32I-NEXT: .LBB39_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2679,7 +2679,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB39_4 ; RV64I-NEXT: .LBB39_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2754,7 +2754,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB40_4 ; RV32I-NEXT: .LBB40_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2825,7 +2825,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB40_4 ; RV64I-NEXT: .LBB40_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2900,7 +2900,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB41_4 ; RV32I-NEXT: .LBB41_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2971,7 +2971,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB41_4 ; RV64I-NEXT: .LBB41_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3046,7 +3046,7 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB42_4 ; RV32I-NEXT: .LBB42_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3117,7 +3117,7 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB42_4 ; RV64I-NEXT: .LBB42_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3192,7 +3192,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB43_4 ; RV32I-NEXT: .LBB43_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3263,7 +3263,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB43_4 ; RV64I-NEXT: .LBB43_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3338,7 +3338,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB44_4 ; RV32I-NEXT: .LBB44_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3409,7 +3409,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB44_4 ; RV64I-NEXT: .LBB44_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3483,7 +3483,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB45_4 ; RV32I-NEXT: .LBB45_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3547,7 +3547,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB45_4 ; RV64I-NEXT: .LBB45_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3615,7 +3615,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB46_4 ; RV32I-NEXT: .LBB46_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3679,7 +3679,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB46_4 ; RV64I-NEXT: .LBB46_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3747,7 +3747,7 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB47_4 ; RV32I-NEXT: .LBB47_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3811,7 +3811,7 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB47_4 ; RV64I-NEXT: .LBB47_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3879,7 +3879,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB48_4 ; RV32I-NEXT: .LBB48_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -3943,7 +3943,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB48_4 ; RV64I-NEXT: .LBB48_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4011,7 +4011,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB49_4 ; RV32I-NEXT: .LBB49_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4075,7 +4075,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB49_4 ; RV64I-NEXT: .LBB49_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4143,7 +4143,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB50_4 ; RV32I-NEXT: .LBB50_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4207,7 +4207,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB50_4 ; RV64I-NEXT: .LBB50_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4275,7 +4275,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB51_4 ; RV32I-NEXT: .LBB51_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4339,7 +4339,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB51_4 ; RV64I-NEXT: .LBB51_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4407,7 +4407,7 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB52_4 ; RV32I-NEXT: .LBB52_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4471,7 +4471,7 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB52_4 ; RV64I-NEXT: .LBB52_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4539,7 +4539,7 @@ ; RV32I-NEXT: li a4, 2 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB53_4 ; RV32I-NEXT: .LBB53_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4603,7 +4603,7 @@ ; RV64I-NEXT: li a4, 2 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB53_4 ; RV64I-NEXT: .LBB53_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4671,7 +4671,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB54_4 ; RV32I-NEXT: .LBB54_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -4735,7 +4735,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB54_4 ; RV64I-NEXT: .LBB54_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -596,7 +596,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB10_4 ; RV32I-NEXT: .LBB10_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -670,7 +670,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB10_4 ; RV64I-NEXT: .LBB10_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -748,7 +748,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB11_4 ; RV32I-NEXT: .LBB11_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -822,7 +822,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB11_4 ; RV64I-NEXT: .LBB11_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -899,7 +899,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB12_4 ; RV32I-NEXT: .LBB12_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -966,7 +966,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB12_4 ; RV64I-NEXT: .LBB12_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -1037,7 +1037,7 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: li a4, 0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB13_4 ; RV32I-NEXT: .LBB13_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -1104,7 +1104,7 @@ ; RV64I-NEXT: li a3, 0 ; RV64I-NEXT: li a4, 0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB13_4 ; RV64I-NEXT: .LBB13_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll --- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll +++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll @@ -41,7 +41,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 3(sp) +; RV32I-NEXT: lbu a3, 3(sp) ; RV32I-NEXT: beqz a0, .LBB0_1 ; RV32I-NEXT: # %bb.2: # %atomicrmw.end ; RV32I-NEXT: mv a0, a3 @@ -117,7 +117,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 7(sp) +; RV64I-NEXT: lbu a3, 7(sp) ; RV64I-NEXT: beqz a0, .LBB0_1 ; RV64I-NEXT: # %bb.2: # %atomicrmw.end ; RV64I-NEXT: mv a0, a3 @@ -670,7 +670,7 @@ ; RV32I-NEXT: li a4, 5 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __atomic_compare_exchange_1@plt -; RV32I-NEXT: lb a3, 15(sp) +; RV32I-NEXT: lbu a3, 15(sp) ; RV32I-NEXT: bnez a0, .LBB4_4 ; RV32I-NEXT: .LBB4_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 @@ -766,7 +766,7 @@ ; RV64I-NEXT: li a4, 5 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __atomic_compare_exchange_1@plt -; RV64I-NEXT: lb a3, 15(sp) +; RV64I-NEXT: lbu a3, 15(sp) ; RV64I-NEXT: bnez a0, .LBB4_4 ; RV64I-NEXT: .LBB4_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll --- a/llvm/test/CodeGen/RISCV/forced-atomics.ll +++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll @@ -137,7 +137,7 @@ ; RV32-NO-ATOMIC-NEXT: li a3, 5 ; RV32-NO-ATOMIC-NEXT: li a4, 5 ; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1@plt -; RV32-NO-ATOMIC-NEXT: lb a0, 11(sp) +; RV32-NO-ATOMIC-NEXT: lbu a0, 11(sp) ; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16 ; RV32-NO-ATOMIC-NEXT: ret @@ -163,7 +163,7 @@ ; RV64-NO-ATOMIC-NEXT: li a3, 5 ; RV64-NO-ATOMIC-NEXT: li a4, 5 ; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1@plt -; RV64-NO-ATOMIC-NEXT: lb a0, 7(sp) +; RV64-NO-ATOMIC-NEXT: lbu a0, 7(sp) ; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16 ; RV64-NO-ATOMIC-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll --- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll +++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll @@ -382,7 +382,7 @@ ; RV32-LABEL: rmw_addi_addi: ; RV32: # %bb.0: # %entry ; RV32-NEXT: lui a0, %hi(bar+3211) -; RV32-NEXT: lb a1, %lo(bar+3211)(a0) +; RV32-NEXT: lbu a1, %lo(bar+3211)(a0) ; RV32-NEXT: addi a1, a1, 10 ; RV32-NEXT: sb a1, %lo(bar+3211)(a0) ; RV32-NEXT: ret @@ -390,7 +390,7 @@ ; RV64-LABEL: rmw_addi_addi: ; RV64: # %bb.0: # %entry ; RV64-NEXT: lui a0, %hi(bar+3211) -; RV64-NEXT: lb a1, %lo(bar+3211)(a0) +; RV64-NEXT: lbu a1, %lo(bar+3211)(a0) ; RV64-NEXT: addiw a1, a1, 10 ; RV64-NEXT: sb a1, %lo(bar+3211)(a0) ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll --- a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll +++ b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll @@ -15,8 +15,8 @@ ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: addi a0, a0, 1704 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lb a1, 4(a0) -; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lbu a1, 4(a0) +; RV32I-NEXT: lbu a0, 0(a0) ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: addi a0, a0, 1712 ; RV32I-NEXT: add sp, sp, a0 @@ -31,8 +31,8 @@ ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: addiw a0, a0, 1704 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lb a1, 4(a0) -; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lbu a1, 4(a0) +; RV64I-NEXT: lbu a0, 0(a0) ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: addiw a0, a0, 1712 ; RV64I-NEXT: add sp, sp, a0 @@ -57,10 +57,10 @@ ; RV32I-NEXT: .cfi_def_cfa_offset 100608 ; RV32I-NEXT: lui a0, 25 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lb a0, -292(a0) +; RV32I-NEXT: lbu a0, -292(a0) ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lb a0, 1704(a0) +; RV32I-NEXT: lbu a0, 1704(a0) ; RV32I-NEXT: lui a0, 25 ; RV32I-NEXT: addi a0, a0, -1792 ; RV32I-NEXT: add sp, sp, a0 @@ -74,10 +74,10 @@ ; RV64I-NEXT: .cfi_def_cfa_offset 100608 ; RV64I-NEXT: lui a0, 25 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lb a0, -292(a0) +; RV64I-NEXT: lbu a0, -292(a0) ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lb a0, 1704(a0) +; RV64I-NEXT: lbu a0, 1704(a0) ; RV64I-NEXT: lui a0, 25 ; RV64I-NEXT: addiw a0, a0, -1792 ; RV64I-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll --- a/llvm/test/CodeGen/RISCV/mem.ll +++ b/llvm/test/CodeGen/RISCV/mem.ll @@ -8,7 +8,7 @@ ; RV32I-LABEL: lb: ; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 1(a0) -; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lbu a0, 0(a0) ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i8, ptr %a, i32 1 @@ -123,7 +123,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lbu a0, 0(a0) ; RV32I-NEXT: sub a0, a2, a1 ; RV32I-NEXT: ret ; sextload i1 @@ -145,7 +145,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lb a0, 0(a0) +; RV32I-NEXT: lbu a0, 0(a0) ; RV32I-NEXT: sub a0, a2, a1 ; RV32I-NEXT: ret ; sextload i1 diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll --- a/llvm/test/CodeGen/RISCV/mem64.ll +++ b/llvm/test/CodeGen/RISCV/mem64.ll @@ -8,7 +8,7 @@ ; RV64I-LABEL: lb: ; RV64I: # %bb.0: ; RV64I-NEXT: lb a1, 1(a0) -; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lbu a0, 0(a0) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i8, ptr %a, i32 1 @@ -168,7 +168,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lbu a1, 1(a0) ; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lbu a0, 0(a0) ; RV64I-NEXT: sub a0, a2, a1 ; RV64I-NEXT: ret ; sextload i1 @@ -190,7 +190,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lbu a1, 1(a0) ; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lbu a0, 0(a0) ; RV64I-NEXT: sub a0, a2, a1 ; RV64I-NEXT: ret ; sextload i1 diff --git a/llvm/test/CodeGen/RISCV/memcpy-inline.ll b/llvm/test/CodeGen/RISCV/memcpy-inline.ll --- a/llvm/test/CodeGen/RISCV/memcpy-inline.ll +++ b/llvm/test/CodeGen/RISCV/memcpy-inline.ll @@ -28,7 +28,7 @@ ; RV32-NEXT: lui a2, %hi(dst) ; RV32-NEXT: sw a1, %lo(dst)(a2) ; RV32-NEXT: addi a0, a0, %lo(src) -; RV32-NEXT: lb a1, 10(a0) +; RV32-NEXT: lbu a1, 10(a0) ; RV32-NEXT: lh a3, 8(a0) ; RV32-NEXT: lw a0, 4(a0) ; RV32-NEXT: addi a2, a2, %lo(dst) @@ -44,7 +44,7 @@ ; RV64-NEXT: ld a1, %lo(src)(a0) ; RV64-NEXT: lui a2, %hi(dst) ; RV64-NEXT: addi a0, a0, %lo(src) -; RV64-NEXT: lb a3, 10(a0) +; RV64-NEXT: lbu a3, 10(a0) ; RV64-NEXT: lh a0, 8(a0) ; RV64-NEXT: sd a1, %lo(dst)(a2) ; RV64-NEXT: addi a1, a2, %lo(dst) diff --git a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll --- a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll @@ -177,11 +177,11 @@ ; RV64I-NEXT: li a2, 1 ; RV64I-NEXT: subw a2, a2, a1 ; RV64I-NEXT: add a2, a0, a2 -; RV64I-NEXT: lb a2, 0(a2) +; RV64I-NEXT: lbu a2, 0(a2) ; RV64I-NEXT: li a3, 2 ; RV64I-NEXT: subw a3, a3, a1 ; RV64I-NEXT: add a0, a0, a3 -; RV64I-NEXT: lb a0, 0(a0) +; RV64I-NEXT: lbu a0, 0(a0) ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret %3 = mul i64 %1, -4294967296 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -162,7 +162,7 @@ ; RV32-NEXT: vmv1r.v v0, v8 ; RV32-NEXT: vmerge.vim v8, v16, 1, v0 ; RV32-NEXT: vs8r.v v8, (a2) -; RV32-NEXT: lb a0, 0(a1) +; RV32-NEXT: lbu a0, 0(a1) ; RV32-NEXT: addi sp, s0, -80 ; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload @@ -202,7 +202,7 @@ ; RV64-NEXT: vmv1r.v v0, v8 ; RV64-NEXT: vmerge.vim v8, v16, 1, v0 ; RV64-NEXT: vs8r.v v8, (a2) -; RV64-NEXT: lb a0, 0(a1) +; RV64-NEXT: lbu a0, 0(a1) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll @@ -845,8 +845,8 @@ ; CHECK-NEXT: add a1, a1, a4 ; CHECK-NEXT: .LBB13_6: # %bb35 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: lb a3, 0(a1) -; CHECK-NEXT: lb a4, 0(a0) +; CHECK-NEXT: lbu a3, 0(a1) +; CHECK-NEXT: lbu a4, 0(a0) ; CHECK-NEXT: add a3, a4, a3 ; CHECK-NEXT: sb a3, 0(a0) ; CHECK-NEXT: addiw a2, a2, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -343,7 +343,7 @@ ; RV32-NEXT: vmerge.vim v8, v16, 1, v0 ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: vse8.v v8, (a0) -; RV32-NEXT: lb a0, 0(a1) +; RV32-NEXT: lbu a0, 0(a1) ; RV32-NEXT: addi sp, s0, -384 ; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload @@ -374,7 +374,7 @@ ; RV64-NEXT: vmerge.vim v8, v16, 1, v0 ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: vse8.v v8, (a0) -; RV64-NEXT: lb a0, 0(a1) +; RV64-NEXT: lbu a0, 0(a1) ; RV64-NEXT: addi sp, s0, -384 ; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -405,7 +405,7 @@ ; RV32ZBS-NEXT: vmerge.vim v8, v16, 1, v0 ; RV32ZBS-NEXT: addi a0, sp, 128 ; RV32ZBS-NEXT: vse8.v v8, (a0) -; RV32ZBS-NEXT: lb a0, 0(a1) +; RV32ZBS-NEXT: lbu a0, 0(a1) ; RV32ZBS-NEXT: addi sp, s0, -384 ; RV32ZBS-NEXT: lw ra, 380(sp) # 4-byte Folded Reload ; RV32ZBS-NEXT: lw s0, 376(sp) # 4-byte Folded Reload @@ -436,7 +436,7 @@ ; RV64ZBS-NEXT: vmerge.vim v8, v16, 1, v0 ; RV64ZBS-NEXT: addi a0, sp, 128 ; RV64ZBS-NEXT: vse8.v v8, (a0) -; RV64ZBS-NEXT: lb a0, 0(a1) +; RV64ZBS-NEXT: lbu a0, 0(a1) ; RV64ZBS-NEXT: addi sp, s0, -384 ; RV64ZBS-NEXT: ld ra, 376(sp) # 8-byte Folded Reload ; RV64ZBS-NEXT: ld s0, 368(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -82,13 +82,13 @@ ; RV64ZVE32F-NEXT: .LBB1_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB1_3: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB1_2 ; RV64ZVE32F-NEXT: .LBB1_4: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -129,14 +129,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB2_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB2_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB2_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -182,14 +182,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB3_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB3_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB3_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -235,14 +235,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB4_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB4_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB4_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -288,14 +288,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB5_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB5_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB5_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -349,14 +349,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB6_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB6_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB6_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -411,14 +411,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB7_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB7_2: # %else ; RV64ZVE32F-NEXT: andi a2, a2, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB7_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: lb a0, 0(a1) +; RV64ZVE32F-NEXT: lbu a0, 0(a1) ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -471,14 +471,14 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB8_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB8_2 ; RV64ZVE32F-NEXT: .LBB8_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma @@ -487,7 +487,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB8_3 ; RV64ZVE32F-NEXT: .LBB8_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma @@ -496,7 +496,7 @@ ; RV64ZVE32F-NEXT: beqz a1, .LBB8_4 ; RV64ZVE32F-NEXT: .LBB8_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 @@ -539,14 +539,14 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB9_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_2 ; RV64ZVE32F-NEXT: .LBB9_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma @@ -555,7 +555,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB9_3 ; RV64ZVE32F-NEXT: .LBB9_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma @@ -564,7 +564,7 @@ ; RV64ZVE32F-NEXT: beqz a1, .LBB9_4 ; RV64ZVE32F-NEXT: .LBB9_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 @@ -641,14 +641,14 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB11_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_2 ; RV64ZVE32F-NEXT: .LBB11_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma @@ -657,7 +657,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB11_3 ; RV64ZVE32F-NEXT: .LBB11_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma @@ -666,7 +666,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB11_4 ; RV64ZVE32F-NEXT: .LBB11_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma @@ -675,7 +675,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB11_5 ; RV64ZVE32F-NEXT: .LBB11_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma @@ -684,7 +684,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB11_6 ; RV64ZVE32F-NEXT: .LBB11_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma @@ -693,7 +693,7 @@ ; RV64ZVE32F-NEXT: beqz a2, .LBB11_7 ; RV64ZVE32F-NEXT: .LBB11_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma @@ -702,7 +702,7 @@ ; RV64ZVE32F-NEXT: beqz a1, .LBB11_8 ; RV64ZVE32F-NEXT: .LBB11_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7 @@ -739,7 +739,7 @@ ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB12_2: # %else @@ -750,7 +750,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 @@ -762,7 +762,7 @@ ; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 @@ -782,7 +782,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 @@ -802,7 +802,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 @@ -812,7 +812,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma @@ -823,7 +823,7 @@ ; RV64ZVE32F-NEXT: .LBB12_15: # %cond.load16 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 @@ -834,7 +834,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 @@ -12329,7 +12329,7 @@ ; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB97_2: # %else @@ -12340,7 +12340,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 @@ -12352,7 +12352,7 @@ ; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 2 @@ -12372,7 +12372,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5 @@ -12397,7 +12397,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 9 @@ -12409,7 +12409,7 @@ ; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 10 @@ -12429,7 +12429,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 13 @@ -12441,7 +12441,7 @@ ; RV64ZVE32F-NEXT: # %bb.22: # %cond.load40 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 14 @@ -12454,7 +12454,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15 @@ -12466,7 +12466,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 3 @@ -12476,7 +12476,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma @@ -12487,7 +12487,7 @@ ; RV64ZVE32F-NEXT: .LBB97_28: # %cond.load16 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 6 @@ -12498,7 +12498,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 7 @@ -12508,7 +12508,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma @@ -12521,7 +12521,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 11 @@ -12531,7 +12531,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, ma @@ -12589,7 +12589,7 @@ ; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 @@ -12601,7 +12601,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12615,7 +12615,7 @@ ; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12637,7 +12637,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v12, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12664,7 +12664,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12678,7 +12678,7 @@ ; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12694,7 +12694,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12708,7 +12708,7 @@ ; RV64ZVE32F-NEXT: # %bb.20: # %cond.load34 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12722,7 +12722,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12747,7 +12747,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12761,7 +12761,7 @@ ; RV64ZVE32F-NEXT: # %bb.29: # %cond.load52 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12783,7 +12783,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12810,7 +12810,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12824,7 +12824,7 @@ ; RV64ZVE32F-NEXT: # %bb.40: # %cond.load76 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12846,7 +12846,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12860,7 +12860,7 @@ ; RV64ZVE32F-NEXT: # %bb.46: # %cond.load88 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12875,7 +12875,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: lb a0, 0(a0) +; RV64ZVE32F-NEXT: lbu a0, 0(a0) ; RV64ZVE32F-NEXT: li a1, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 @@ -12889,7 +12889,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12901,7 +12901,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12913,7 +12913,7 @@ ; RV64ZVE32F-NEXT: .LBB98_52: # %cond.load16 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12926,7 +12926,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12938,7 +12938,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 @@ -12950,7 +12950,7 @@ ; RV64ZVE32F-NEXT: .LBB98_55: # %cond.load40 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12963,7 +12963,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12975,7 +12975,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -12989,7 +12989,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13001,7 +13001,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13013,7 +13013,7 @@ ; RV64ZVE32F-NEXT: .LBB98_60: # %cond.load64 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13026,7 +13026,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13038,7 +13038,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13052,7 +13052,7 @@ ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 @@ -13064,7 +13064,7 @@ ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: lb a2, 0(a2) +; RV64ZVE32F-NEXT: lbu a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 ; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -62,7 +62,7 @@ ; RV32-NEXT: .LBB4_3: # %cond.load ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vmv.x.s a1, v8 -; RV32-NEXT: lb a2, 1(a1) +; RV32-NEXT: lbu a2, 1(a1) ; RV32-NEXT: lbu a1, 0(a1) ; RV32-NEXT: slli a2, a2, 8 ; RV32-NEXT: or a1, a2, a1 @@ -74,7 +74,7 @@ ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: lb a1, 1(a0) +; RV32-NEXT: lbu a1, 1(a0) ; RV32-NEXT: lbu a0, 0(a0) ; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: or a0, a1, a0 @@ -99,7 +99,7 @@ ; RV64-NEXT: .LBB4_3: # %cond.load ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a1, v8 -; RV64-NEXT: lb a2, 1(a1) +; RV64-NEXT: lbu a2, 1(a1) ; RV64-NEXT: lbu a1, 0(a1) ; RV64-NEXT: slli a2, a2, 8 ; RV64-NEXT: or a1, a2, a1 @@ -111,7 +111,7 @@ ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vmv.x.s a0, v8 -; RV64-NEXT: lb a1, 1(a0) +; RV64-NEXT: lbu a1, 1(a0) ; RV64-NEXT: lbu a0, 0(a0) ; RV64-NEXT: slli a1, a1, 8 ; RV64-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -310,7 +310,7 @@ ; RV32-NEXT: sw s5, 4(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32-NEXT: mv s0, a0 -; RV32-NEXT: lb a0, 12(a0) +; RV32-NEXT: lbu a0, 12(a0) ; RV32-NEXT: lw a1, 8(s0) ; RV32-NEXT: slli a2, a0, 30 ; RV32-NEXT: lw a3, 4(s0) @@ -389,7 +389,7 @@ ; RV64-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64-NEXT: mv s0, a0 -; RV64-NEXT: lb a0, 12(a0) +; RV64-NEXT: lbu a0, 12(a0) ; RV64-NEXT: lwu a1, 8(s0) ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: ld a2, 0(s0) @@ -460,7 +460,7 @@ ; RV32M-NEXT: sw s5, 4(sp) # 4-byte Folded Spill ; RV32M-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32M-NEXT: mv s0, a0 -; RV32M-NEXT: lb a0, 12(a0) +; RV32M-NEXT: lbu a0, 12(a0) ; RV32M-NEXT: lw a1, 8(s0) ; RV32M-NEXT: slli a2, a0, 30 ; RV32M-NEXT: lw a3, 4(s0) @@ -535,7 +535,7 @@ ; RV64M-NEXT: ld a1, 0(a0) ; RV64M-NEXT: lwu a2, 8(a0) ; RV64M-NEXT: srli a3, a1, 2 -; RV64M-NEXT: lb a4, 12(a0) +; RV64M-NEXT: lbu a4, 12(a0) ; RV64M-NEXT: slli a5, a2, 62 ; RV64M-NEXT: or a3, a5, a3 ; RV64M-NEXT: srai a3, a3, 31 @@ -610,7 +610,7 @@ ; RV32MV-NEXT: mv s2, a0 ; RV32MV-NEXT: lw a0, 8(a0) ; RV32MV-NEXT: lw a1, 4(s2) -; RV32MV-NEXT: lb a2, 12(s2) +; RV32MV-NEXT: lbu a2, 12(s2) ; RV32MV-NEXT: slli a3, a0, 31 ; RV32MV-NEXT: srli a4, a1, 1 ; RV32MV-NEXT: or s3, a4, a3 @@ -712,7 +712,7 @@ ; RV64MV-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64MV-NEXT: addi s0, sp, 64 ; RV64MV-NEXT: andi sp, sp, -32 -; RV64MV-NEXT: lb a1, 12(a0) +; RV64MV-NEXT: lbu a1, 12(a0) ; RV64MV-NEXT: lwu a2, 8(a0) ; RV64MV-NEXT: slli a1, a1, 32 ; RV64MV-NEXT: ld a3, 0(a0) diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -13,7 +13,7 @@ define i8 @load_i8(ptr %p) { ; ALL-LABEL: load_i8: ; ALL: # %bb.0: -; ALL-NEXT: lb a0, 0(a0) +; ALL-NEXT: lbu a0, 0(a0) ; ALL-NEXT: ret %res = load i8, ptr %p, align 1 ret i8 %res @@ -22,7 +22,7 @@ define i16 @load_i16(ptr %p) { ; NOMISALIGN-LABEL: load_i16: ; NOMISALIGN: # %bb.0: -; NOMISALIGN-NEXT: lb a1, 1(a0) +; NOMISALIGN-NEXT: lbu a1, 1(a0) ; NOMISALIGN-NEXT: lbu a0, 0(a0) ; NOMISALIGN-NEXT: slli a1, a1, 8 ; NOMISALIGN-NEXT: or a0, a1, a0 @@ -41,7 +41,7 @@ ; NOMISALIGN: # %bb.0: ; NOMISALIGN-NEXT: lbu a1, 1(a0) ; NOMISALIGN-NEXT: lbu a2, 0(a0) -; NOMISALIGN-NEXT: lb a0, 2(a0) +; NOMISALIGN-NEXT: lbu a0, 2(a0) ; NOMISALIGN-NEXT: slli a1, a1, 8 ; NOMISALIGN-NEXT: or a1, a1, a2 ; NOMISALIGN-NEXT: slli a0, a0, 16 @@ -50,7 +50,7 @@ ; ; MISALIGN-LABEL: load_i24: ; MISALIGN: # %bb.0: -; MISALIGN-NEXT: lb a1, 2(a0) +; MISALIGN-NEXT: lbu a1, 2(a0) ; MISALIGN-NEXT: lhu a0, 0(a0) ; MISALIGN-NEXT: slli a1, a1, 16 ; MISALIGN-NEXT: or a0, a0, a1 @@ -60,33 +60,19 @@ } define i32 @load_i32(ptr %p) { -; RV32I-LABEL: load_i32: -; RV32I: # %bb.0: -; RV32I-NEXT: lbu a1, 1(a0) -; RV32I-NEXT: lbu a2, 0(a0) -; RV32I-NEXT: lbu a3, 2(a0) -; RV32I-NEXT: lbu a0, 3(a0) -; RV32I-NEXT: slli a1, a1, 8 -; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: slli a3, a3, 16 -; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: ret -; -; RV64I-LABEL: load_i32: -; RV64I: # %bb.0: -; RV64I-NEXT: lbu a1, 1(a0) -; RV64I-NEXT: lbu a2, 0(a0) -; RV64I-NEXT: lbu a3, 2(a0) -; RV64I-NEXT: lb a0, 3(a0) -; RV64I-NEXT: slli a1, a1, 8 -; RV64I-NEXT: or a1, a1, a2 -; RV64I-NEXT: slli a3, a3, 16 -; RV64I-NEXT: slli a0, a0, 24 -; RV64I-NEXT: or a0, a0, a3 -; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: ret +; NOMISALIGN-LABEL: load_i32: +; NOMISALIGN: # %bb.0: +; NOMISALIGN-NEXT: lbu a1, 1(a0) +; NOMISALIGN-NEXT: lbu a2, 0(a0) +; NOMISALIGN-NEXT: lbu a3, 2(a0) +; NOMISALIGN-NEXT: lbu a0, 3(a0) +; NOMISALIGN-NEXT: slli a1, a1, 8 +; NOMISALIGN-NEXT: or a1, a1, a2 +; NOMISALIGN-NEXT: slli a3, a3, 16 +; NOMISALIGN-NEXT: slli a0, a0, 24 +; NOMISALIGN-NEXT: or a0, a0, a3 +; NOMISALIGN-NEXT: or a0, a0, a1 +; NOMISALIGN-NEXT: ret ; ; MISALIGN-LABEL: load_i32: ; MISALIGN: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -330,7 +330,7 @@ ; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32-NEXT: mv s0, a0 -; RV32-NEXT: lb a0, 4(a0) +; RV32-NEXT: lbu a0, 4(a0) ; RV32-NEXT: lw a1, 0(s0) ; RV32-NEXT: slli a0, a0, 10 ; RV32-NEXT: srli s1, a1, 22 @@ -437,7 +437,7 @@ ; ; RV32M-LABEL: test_urem_vec: ; RV32M: # %bb.0: -; RV32M-NEXT: lb a1, 4(a0) +; RV32M-NEXT: lbu a1, 4(a0) ; RV32M-NEXT: lw a2, 0(a0) ; RV32M-NEXT: slli a1, a1, 10 ; RV32M-NEXT: srli a3, a2, 22 @@ -528,7 +528,7 @@ ; RV32MV-NEXT: slli a2, a1, 10 ; RV32MV-NEXT: srli a2, a2, 21 ; RV32MV-NEXT: sh a2, 10(sp) -; RV32MV-NEXT: lb a2, 4(a0) +; RV32MV-NEXT: lbu a2, 4(a0) ; RV32MV-NEXT: slli a2, a2, 10 ; RV32MV-NEXT: srli a1, a1, 22 ; RV32MV-NEXT: or a1, a1, a2