diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -438,14 +438,21 @@ } }; - // For now we support the compressible instructions which can encode all - // registers and have a single register source. - // TODO: Add more compressed instructions. + // This is all of the compressible binary instructions. If an instruction + // needs GPRC register class operands \p NeedGPRC will be set to true. auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) { NeedGPRC = false; switch (MI.getOpcode()) { default: return false; + case RISCV::AND: + case RISCV::OR: + case RISCV::XOR: + case RISCV::SUB: + case RISCV::ADDW: + case RISCV::SUBW: + NeedGPRC = true; + return true; case RISCV::ANDI: NeedGPRC = true; return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); @@ -462,18 +469,35 @@ } }; + // Returns true if this operand is compressible. For non-registers it always + // returns true. Immediate range was already checked in isCompressible. + // For registers, it checks if the register is a GPRC register. reg-reg + // instructions that require GPRC need all register operands to be GPRC. + auto isCompressibleOpnd = [&](const MachineOperand &MO) { + if (!MO.isReg()) + return true; + Register Reg = MO.getReg(); + Register PhysReg = + Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg)); + return PhysReg && RISCV::GPRCRegClass.contains(PhysReg); + }; + for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) { const MachineInstr &MI = *MO.getParent(); + unsigned OpIdx = MI.getOperandNo(&MO); bool NeedGPRC; if (isCompressible(MI, NeedGPRC)) { - unsigned OpIdx = MI.getOperandNo(&MO); if (OpIdx == 0 && MI.getOperand(1).isReg()) { - tryAddHint(MO, MI.getOperand(1), NeedGPRC); - if (MI.isCommutable() && MI.getOperand(2).isReg()) + if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2))) + tryAddHint(MO, MI.getOperand(1), NeedGPRC); + if (MI.isCommutable() && MI.getOperand(2).isReg() && + (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) tryAddHint(MO, MI.getOperand(2), NeedGPRC); - } else if (OpIdx == 1) { + } else if (OpIdx == 1 && + (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) { tryAddHint(MO, MI.getOperand(0), NeedGPRC); - } else if (MI.isCommutable() && OpIdx == 2) { + } else if (MI.isCommutable() && OpIdx == 2 && + (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) { tryAddHint(MO, MI.getOperand(0), NeedGPRC); } } diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll @@ -18,8 +18,8 @@ ; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB0_1 ; RV64IA-NEXT: .LBB0_3: # %entry -; RV64IA-NEXT: xor a0, a3, a1 -; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: xor a1, a3, a1 +; RV64IA-NEXT: seqz a0, a1 ; RV64IA-NEXT: ret i32 signext %val) nounwind { entry: diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -6893,23 +6893,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB90_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB90_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_monotonic: @@ -6966,23 +6966,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB90_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB90_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b monotonic ret i16 %1 @@ -7043,23 +7043,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB91_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB91_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_acquire: @@ -7116,23 +7116,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB91_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB91_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acquire ret i16 %1 @@ -7193,23 +7193,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB92_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 ; RV32IA-NEXT: sc.w.rl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB92_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_release: @@ -7266,23 +7266,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB92_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 ; RV64IA-NEXT: sc.w.rl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB92_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b release ret i16 %1 @@ -7343,23 +7343,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB93_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 ; RV32IA-NEXT: sc.w.rl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB93_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_acq_rel: @@ -7416,23 +7416,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB93_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 ; RV64IA-NEXT: sc.w.rl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB93_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acq_rel ret i16 %1 @@ -7493,23 +7493,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aqrl a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB94_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 ; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB94_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_seq_cst: @@ -7566,23 +7566,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aqrl a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB94_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 ; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB94_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b seq_cst ret i16 %1 @@ -7643,23 +7643,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB95_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB95_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_monotonic: @@ -7716,23 +7716,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB95_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB95_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b monotonic ret i16 %1 @@ -7793,23 +7793,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB96_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB96_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_acquire: @@ -7866,23 +7866,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB96_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB96_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acquire ret i16 %1 @@ -7943,23 +7943,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB97_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 ; RV32IA-NEXT: sc.w.rl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB97_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_release: @@ -8016,23 +8016,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB97_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 ; RV64IA-NEXT: sc.w.rl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB97_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b release ret i16 %1 @@ -8093,23 +8093,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB98_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 ; RV32IA-NEXT: sc.w.rl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB98_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_acq_rel: @@ -8166,23 +8166,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB98_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 ; RV64IA-NEXT: sc.w.rl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB98_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acq_rel ret i16 %1 @@ -8243,23 +8243,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w.aqrl a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB99_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 ; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB99_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_seq_cst: @@ -8316,23 +8316,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w.aqrl a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB99_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 ; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB99_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b seq_cst ret i16 %1 diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -1678,23 +1678,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a7, a1, .LBB21_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB21_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: slli a0, a0, 16 ; RV32IA-NEXT: srai a0, a0, 16 ; RV32IA-NEXT: ret @@ -1754,23 +1754,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a7, a1, .LBB21_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB21_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: slli a0, a0, 48 ; RV64IA-NEXT: srai a0, a0, 48 ; RV64IA-NEXT: ret @@ -1834,23 +1834,23 @@ ; RV32IA-NEXT: srai a1, a1, 16 ; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: li a5, 16 -; RV32IA-NEXT: sub a3, a5, a3 +; RV32IA-NEXT: sub a5, a5, a3 ; RV32IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a2) -; RV32IA-NEXT: and a7, a5, a4 -; RV32IA-NEXT: mv a6, a5 -; RV32IA-NEXT: sll a7, a7, a3 -; RV32IA-NEXT: sra a7, a7, a3 +; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: and a7, a3, a4 +; RV32IA-NEXT: mv a6, a3 +; RV32IA-NEXT: sll a7, a7, a5 +; RV32IA-NEXT: sra a7, a7, a5 ; RV32IA-NEXT: bge a1, a7, .LBB22_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1 -; RV32IA-NEXT: xor a6, a5, a1 +; RV32IA-NEXT: xor a6, a3, a1 ; RV32IA-NEXT: and a6, a6, a4 -; RV32IA-NEXT: xor a6, a5, a6 +; RV32IA-NEXT: xor a6, a3, a6 ; RV32IA-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1 ; RV32IA-NEXT: sc.w a6, a6, (a2) ; RV32IA-NEXT: bnez a6, .LBB22_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a3, a0 ; RV32IA-NEXT: slli a0, a0, 16 ; RV32IA-NEXT: srai a0, a0, 16 ; RV32IA-NEXT: ret @@ -1910,23 +1910,23 @@ ; RV64IA-NEXT: srai a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: li a5, 48 -; RV64IA-NEXT: sub a3, a5, a3 +; RV64IA-NEXT: sub a5, a5, a3 ; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a2) -; RV64IA-NEXT: and a7, a5, a4 -; RV64IA-NEXT: mv a6, a5 -; RV64IA-NEXT: sll a7, a7, a3 -; RV64IA-NEXT: sra a7, a7, a3 +; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: and a7, a3, a4 +; RV64IA-NEXT: mv a6, a3 +; RV64IA-NEXT: sll a7, a7, a5 +; RV64IA-NEXT: sra a7, a7, a5 ; RV64IA-NEXT: bge a1, a7, .LBB22_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1 -; RV64IA-NEXT: xor a6, a5, a1 +; RV64IA-NEXT: xor a6, a3, a1 ; RV64IA-NEXT: and a6, a6, a4 -; RV64IA-NEXT: xor a6, a5, a6 +; RV64IA-NEXT: xor a6, a3, a6 ; RV64IA-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1 ; RV64IA-NEXT: sc.w a6, a6, (a2) ; RV64IA-NEXT: bnez a6, .LBB22_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a3, a0 ; RV64IA-NEXT: slli a0, a0, 48 ; RV64IA-NEXT: srai a0, a0, 48 ; RV64IA-NEXT: ret @@ -3864,9 +3864,9 @@ ; RV32IA-NEXT: sc.w a5, a5, (a3) ; RV32IA-NEXT: bnez a5, .LBB48_1 ; RV32IA-NEXT: .LBB48_3: -; RV32IA-NEXT: and a0, a2, a4 -; RV32IA-NEXT: xor a0, a1, a0 -; RV32IA-NEXT: seqz a0, a0 +; RV32IA-NEXT: and a2, a2, a4 +; RV32IA-NEXT: xor a1, a1, a2 +; RV32IA-NEXT: seqz a0, a1 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic_val1: @@ -3903,9 +3903,9 @@ ; RV64IA-NEXT: sc.w a5, a5, (a3) ; RV64IA-NEXT: bnez a5, .LBB48_1 ; RV64IA-NEXT: .LBB48_3: -; RV64IA-NEXT: and a0, a2, a4 -; RV64IA-NEXT: xor a0, a1, a0 -; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: and a2, a2, a4 +; RV64IA-NEXT: xor a1, a1, a2 +; RV64IA-NEXT: seqz a0, a1 ; RV64IA-NEXT: ret %1 = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic %2 = extractvalue { i8, i1 } %1, 1 @@ -4035,9 +4035,9 @@ ; RV32IA-NEXT: sc.w a4, a4, (a3) ; RV32IA-NEXT: bnez a4, .LBB50_1 ; RV32IA-NEXT: .LBB50_3: -; RV32IA-NEXT: and a0, a2, a5 -; RV32IA-NEXT: xor a0, a1, a0 -; RV32IA-NEXT: seqz a0, a0 +; RV32IA-NEXT: and a2, a2, a5 +; RV32IA-NEXT: xor a1, a1, a2 +; RV32IA-NEXT: seqz a0, a1 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic_val1: @@ -4075,9 +4075,9 @@ ; RV64IA-NEXT: sc.w a4, a4, (a3) ; RV64IA-NEXT: bnez a4, .LBB50_1 ; RV64IA-NEXT: .LBB50_3: -; RV64IA-NEXT: and a0, a2, a5 -; RV64IA-NEXT: xor a0, a1, a0 -; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: and a2, a2, a5 +; RV64IA-NEXT: xor a1, a1, a2 +; RV64IA-NEXT: seqz a0, a1 ; RV64IA-NEXT: ret %1 = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic %2 = extractvalue { i16, i1 } %1, 1 @@ -4164,8 +4164,8 @@ ; RV32IA-NEXT: sc.w a4, a2, (a0) ; RV32IA-NEXT: bnez a4, .LBB52_1 ; RV32IA-NEXT: .LBB52_3: -; RV32IA-NEXT: xor a0, a3, a1 -; RV32IA-NEXT: seqz a0, a0 +; RV32IA-NEXT: xor a1, a3, a1 +; RV32IA-NEXT: seqz a0, a1 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic_val1: @@ -4190,8 +4190,8 @@ ; RV64IA-NEXT: sc.w a4, a2, (a0) ; RV64IA-NEXT: bnez a4, .LBB52_1 ; RV64IA-NEXT: .LBB52_3: -; RV64IA-NEXT: xor a0, a3, a1 -; RV64IA-NEXT: seqz a0, a0 +; RV64IA-NEXT: xor a1, a3, a1 +; RV64IA-NEXT: seqz a0, a1 ; RV64IA-NEXT: ret %1 = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic %2 = extractvalue { i32, i1 } %1, 1 diff --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll @@ -139,11 +139,11 @@ ; RV64I-NEXT: slli a5, a5, 24 ; RV64I-NEXT: or a3, a5, a3 ; RV64I-NEXT: or a1, a3, a1 -; RV64I-NEXT: and a3, a0, a4 -; RV64I-NEXT: slli a3, a3, 24 -; RV64I-NEXT: srliw a4, a0, 24 -; RV64I-NEXT: slli a4, a4, 32 -; RV64I-NEXT: or a3, a3, a4 +; RV64I-NEXT: and a4, a0, a4 +; RV64I-NEXT: slli a4, a4, 24 +; RV64I-NEXT: srliw a3, a0, 24 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: or a3, a4, a3 ; RV64I-NEXT: and a2, a0, a2 ; RV64I-NEXT: slli a2, a2, 40 ; RV64I-NEXT: slli a0, a0, 56 @@ -611,11 +611,11 @@ ; RV64I-NEXT: slli a5, a5, 24 ; RV64I-NEXT: or a3, a5, a3 ; RV64I-NEXT: or a1, a3, a1 -; RV64I-NEXT: and a3, a0, a4 -; RV64I-NEXT: slli a3, a3, 24 -; RV64I-NEXT: srliw a4, a0, 24 -; RV64I-NEXT: slli a4, a4, 32 -; RV64I-NEXT: or a3, a3, a4 +; RV64I-NEXT: and a4, a0, a4 +; RV64I-NEXT: slli a4, a4, 24 +; RV64I-NEXT: srliw a3, a0, 24 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: or a3, a4, a3 ; RV64I-NEXT: and a2, a0, a2 ; RV64I-NEXT: slli a2, a2, 40 ; RV64I-NEXT: slli a0, a0, 56 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -202,8 +202,8 @@ ; RV32I-FPELIM-NEXT: xor a4, a7, a4 ; RV32I-FPELIM-NEXT: or a4, a4, a5 ; RV32I-FPELIM-NEXT: xor a0, a0, a1 -; RV32I-FPELIM-NEXT: xor a1, a3, a2 -; RV32I-FPELIM-NEXT: or a0, a1, a0 +; RV32I-FPELIM-NEXT: xor a2, a3, a2 +; RV32I-FPELIM-NEXT: or a0, a2, a0 ; RV32I-FPELIM-NEXT: or a0, a0, a4 ; RV32I-FPELIM-NEXT: seqz a0, a0 ; RV32I-FPELIM-NEXT: ret @@ -226,8 +226,8 @@ ; RV32I-WITHFP-NEXT: xor a4, a7, a4 ; RV32I-WITHFP-NEXT: or a4, a4, a5 ; RV32I-WITHFP-NEXT: xor a0, a0, a1 -; RV32I-WITHFP-NEXT: xor a1, a3, a2 -; RV32I-WITHFP-NEXT: or a0, a1, a0 +; RV32I-WITHFP-NEXT: xor a2, a3, a2 +; RV32I-WITHFP-NEXT: or a0, a2, a0 ; RV32I-WITHFP-NEXT: or a0, a0, a4 ; RV32I-WITHFP-NEXT: seqz a0, a0 ; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll @@ -118,8 +118,8 @@ ; RV64I-NEXT: xor a4, a7, a4 ; RV64I-NEXT: or a4, a4, a5 ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a3, a2 -; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: xor a2, a3, a2 +; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: or a0, a0, a4 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -2155,13 +2155,13 @@ ; RV32I-NEXT: lui a2, 349525 ; RV32I-NEXT: addi s2, a2, 1365 ; RV32I-NEXT: and a0, a0, s2 -; RV32I-NEXT: sub a0, a1, a0 -; RV32I-NEXT: lui a1, 209715 -; RV32I-NEXT: addi s3, a1, 819 -; RV32I-NEXT: and a1, a0, s3 -; RV32I-NEXT: srli a0, a0, 2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: sub a1, a1, a0 +; RV32I-NEXT: lui a0, 209715 +; RV32I-NEXT: addi s3, a0, 819 +; RV32I-NEXT: and a0, a1, s3 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: and a1, a1, s3 +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: lui a1, 61681 @@ -2174,11 +2174,11 @@ ; RV32I-NEXT: srli s5, a0, 24 ; RV32I-NEXT: srli a0, s0, 1 ; RV32I-NEXT: and a0, a0, s2 -; RV32I-NEXT: sub a0, s0, a0 -; RV32I-NEXT: and a1, a0, s3 -; RV32I-NEXT: srli a0, a0, 2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: sub s0, s0, a0 +; RV32I-NEXT: and a0, s0, s3 +; RV32I-NEXT: srli s0, s0, 2 +; RV32I-NEXT: and a1, s0, s3 +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: and a0, a0, s4 diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll --- a/llvm/test/CodeGen/RISCV/div-by-constant.ll +++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll @@ -82,8 +82,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -339,10 +339,10 @@ ; RV32-NEXT: lui a1, 449390 ; RV32-NEXT: addi a1, a1, -1171 ; RV32-NEXT: mulh a1, a0, a1 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: srli a1, a0, 31 -; RV32-NEXT: srai a0, a0, 2 -; RV32-NEXT: add a0, a0, a1 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: srli a0, a1, 31 +; RV32-NEXT: srai a1, a1, 2 +; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: sdiv_constant_sub_srai: @@ -352,10 +352,10 @@ ; RV64-NEXT: addiw a2, a2, -1171 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: subw a0, a1, a0 -; RV64-NEXT: srliw a1, a0, 31 -; RV64-NEXT: sraiw a0, a0, 2 -; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: subw a1, a1, a0 +; RV64-NEXT: srliw a0, a1, 31 +; RV64-NEXT: sraiw a1, a1, 2 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: ret %1 = sdiv i32 %a, -7 ret i32 %1 @@ -453,10 +453,10 @@ ; RV64-NEXT: lui a1, %hi(.LCPI15_0) ; RV64-NEXT: ld a1, %lo(.LCPI15_0)(a1) ; RV64-NEXT: mulh a1, a0, a1 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: srli a1, a0, 63 -; RV64-NEXT: srai a0, a0, 1 -; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: srli a0, a1, 63 +; RV64-NEXT: srai a1, a1, 1 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: ret %1 = sdiv i64 %a, -3 ret i64 %1 @@ -628,11 +628,11 @@ ; RV32IM-NEXT: li a2, 109 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: srli a1, a1, 8 -; RV32IM-NEXT: sub a0, a1, a0 -; RV32IM-NEXT: slli a0, a0, 24 -; RV32IM-NEXT: srli a1, a0, 31 -; RV32IM-NEXT: srai a0, a0, 26 -; RV32IM-NEXT: add a0, a0, a1 +; RV32IM-NEXT: sub a1, a1, a0 +; RV32IM-NEXT: slli a1, a1, 24 +; RV32IM-NEXT: srli a0, a1, 31 +; RV32IM-NEXT: srai a1, a1, 26 +; RV32IM-NEXT: add a0, a1, a0 ; RV32IM-NEXT: ret ; ; RV32IMZB-LABEL: sdiv8_constant_sub_srai: @@ -641,11 +641,11 @@ ; RV32IMZB-NEXT: li a2, 109 ; RV32IMZB-NEXT: mul a1, a1, a2 ; RV32IMZB-NEXT: srli a1, a1, 8 -; RV32IMZB-NEXT: sub a0, a1, a0 -; RV32IMZB-NEXT: slli a0, a0, 24 -; RV32IMZB-NEXT: srli a1, a0, 31 -; RV32IMZB-NEXT: srai a0, a0, 26 -; RV32IMZB-NEXT: add a0, a0, a1 +; RV32IMZB-NEXT: sub a1, a1, a0 +; RV32IMZB-NEXT: slli a1, a1, 24 +; RV32IMZB-NEXT: srli a0, a1, 31 +; RV32IMZB-NEXT: srai a1, a1, 26 +; RV32IMZB-NEXT: add a0, a1, a0 ; RV32IMZB-NEXT: ret ; ; RV64IM-LABEL: sdiv8_constant_sub_srai: @@ -655,11 +655,11 @@ ; RV64IM-NEXT: li a2, 109 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 8 -; RV64IM-NEXT: subw a0, a1, a0 -; RV64IM-NEXT: slli a0, a0, 56 -; RV64IM-NEXT: srli a1, a0, 63 -; RV64IM-NEXT: srai a0, a0, 58 -; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: slli a1, a1, 56 +; RV64IM-NEXT: srli a0, a1, 63 +; RV64IM-NEXT: srai a1, a1, 58 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMZB-LABEL: sdiv8_constant_sub_srai: @@ -668,11 +668,11 @@ ; RV64IMZB-NEXT: li a2, 109 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 8 -; RV64IMZB-NEXT: subw a0, a1, a0 -; RV64IMZB-NEXT: slli a0, a0, 56 -; RV64IMZB-NEXT: srli a1, a0, 63 -; RV64IMZB-NEXT: srai a0, a0, 58 -; RV64IMZB-NEXT: add a0, a0, a1 +; RV64IMZB-NEXT: subw a1, a1, a0 +; RV64IMZB-NEXT: slli a1, a1, 56 +; RV64IMZB-NEXT: srli a0, a1, 63 +; RV64IMZB-NEXT: srai a1, a1, 58 +; RV64IMZB-NEXT: add a0, a1, a0 ; RV64IMZB-NEXT: ret %1 = sdiv i8 %a, -7 ret i8 %1 @@ -849,11 +849,11 @@ ; RV32IM-NEXT: addi a2, a2, 1911 ; RV32IM-NEXT: mul a1, a1, a2 ; RV32IM-NEXT: srli a1, a1, 16 -; RV32IM-NEXT: sub a0, a1, a0 -; RV32IM-NEXT: slli a0, a0, 16 -; RV32IM-NEXT: srli a1, a0, 31 -; RV32IM-NEXT: srai a0, a0, 19 -; RV32IM-NEXT: add a0, a0, a1 +; RV32IM-NEXT: sub a1, a1, a0 +; RV32IM-NEXT: slli a1, a1, 16 +; RV32IM-NEXT: srli a0, a1, 31 +; RV32IM-NEXT: srai a1, a1, 19 +; RV32IM-NEXT: add a0, a1, a0 ; RV32IM-NEXT: ret ; ; RV32IMZB-LABEL: sdiv16_constant_sub_srai: @@ -863,11 +863,11 @@ ; RV32IMZB-NEXT: addi a2, a2, 1911 ; RV32IMZB-NEXT: mul a1, a1, a2 ; RV32IMZB-NEXT: srli a1, a1, 16 -; RV32IMZB-NEXT: sub a0, a1, a0 -; RV32IMZB-NEXT: slli a0, a0, 16 -; RV32IMZB-NEXT: srli a1, a0, 31 -; RV32IMZB-NEXT: srai a0, a0, 19 -; RV32IMZB-NEXT: add a0, a0, a1 +; RV32IMZB-NEXT: sub a1, a1, a0 +; RV32IMZB-NEXT: slli a1, a1, 16 +; RV32IMZB-NEXT: srli a0, a1, 31 +; RV32IMZB-NEXT: srai a1, a1, 19 +; RV32IMZB-NEXT: add a0, a1, a0 ; RV32IMZB-NEXT: ret ; ; RV64IM-LABEL: sdiv16_constant_sub_srai: @@ -878,11 +878,11 @@ ; RV64IM-NEXT: addiw a2, a2, 1911 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 16 -; RV64IM-NEXT: subw a0, a1, a0 -; RV64IM-NEXT: slli a0, a0, 48 -; RV64IM-NEXT: srli a1, a0, 63 -; RV64IM-NEXT: srai a0, a0, 51 -; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: slli a1, a1, 48 +; RV64IM-NEXT: srli a0, a1, 63 +; RV64IM-NEXT: srai a1, a1, 51 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: ret ; ; RV64IMZB-LABEL: sdiv16_constant_sub_srai: @@ -892,11 +892,11 @@ ; RV64IMZB-NEXT: addiw a2, a2, 1911 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 16 -; RV64IMZB-NEXT: subw a0, a1, a0 -; RV64IMZB-NEXT: slli a0, a0, 48 -; RV64IMZB-NEXT: srli a1, a0, 63 -; RV64IMZB-NEXT: srai a0, a0, 51 -; RV64IMZB-NEXT: add a0, a0, a1 +; RV64IMZB-NEXT: subw a1, a1, a0 +; RV64IMZB-NEXT: slli a1, a1, 48 +; RV64IMZB-NEXT: srli a0, a1, 63 +; RV64IMZB-NEXT: srai a1, a1, 51 +; RV64IMZB-NEXT: add a0, a1, a0 ; RV64IMZB-NEXT: ret %1 = sdiv i16 %a, -15 ret i16 %1 diff --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll --- a/llvm/test/CodeGen/RISCV/div-pow2.ll +++ b/llvm/test/CodeGen/RISCV/div-pow2.ll @@ -209,9 +209,9 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: slli a0, a1, 31 -; RV32I-NEXT: or a2, a3, a0 -; RV32I-NEXT: neg a0, a2 -; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: or a3, a3, a0 +; RV32I-NEXT: neg a0, a3 +; RV32I-NEXT: snez a2, a3 ; RV32I-NEXT: srai a1, a1, 1 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -265,9 +265,9 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: slli a0, a1, 21 -; RV32I-NEXT: or a2, a3, a0 -; RV32I-NEXT: neg a0, a2 -; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: or a3, a3, a0 +; RV32I-NEXT: neg a0, a3 +; RV32I-NEXT: snez a2, a3 ; RV32I-NEXT: srai a1, a1, 11 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -322,9 +322,9 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: slli a0, a1, 20 -; RV32I-NEXT: or a2, a3, a0 -; RV32I-NEXT: neg a0, a2 -; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: or a3, a3, a0 +; RV32I-NEXT: neg a0, a3 +; RV32I-NEXT: snez a2, a3 ; RV32I-NEXT: srai a1, a1, 12 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 @@ -379,9 +379,9 @@ ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a1, a1, a0 ; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: or a2, a3, a0 -; RV32I-NEXT: neg a0, a2 -; RV32I-NEXT: snez a2, a2 +; RV32I-NEXT: or a3, a3, a0 +; RV32I-NEXT: neg a0, a3 +; RV32I-NEXT: snez a2, a3 ; RV32I-NEXT: srai a1, a1, 16 ; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: neg a1, a1 diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll --- a/llvm/test/CodeGen/RISCV/div.ll +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -197,8 +197,8 @@ ; RV32IM-NEXT: mulhu a6, a5, a4 ; RV32IM-NEXT: add a3, a6, a3 ; RV32IM-NEXT: sltu a0, a0, a2 -; RV32IM-NEXT: sub a0, a1, a0 -; RV32IM-NEXT: mul a1, a0, a4 +; RV32IM-NEXT: sub a1, a1, a0 +; RV32IM-NEXT: mul a1, a1, a4 ; RV32IM-NEXT: add a1, a3, a1 ; RV32IM-NEXT: mul a0, a5, a4 ; RV32IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll --- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll @@ -293,8 +293,8 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a2, s1 @@ -378,9 +378,9 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a2, s5, a0 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a2, s5, a5 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: mv a2, s3 @@ -476,9 +476,9 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a3, s5, a0 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a3, s5, a5 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a2, s4 @@ -634,8 +634,8 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a3, a1, a0 +; RV32I-NEXT: lui a3, 524288 +; RV32I-NEXT: xor a3, a1, a3 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a4, s1 diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -196,8 +196,8 @@ ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: call __adddf3@plt -; RV32I-NEXT: lui a2, 524288 -; RV32I-NEXT: xor a3, a1, a2 +; RV32I-NEXT: lui a3, 524288 +; RV32I-NEXT: xor a3, a1, a3 ; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: call __eqdf2@plt ; RV32I-NEXT: seqz a0, a0 @@ -423,8 +423,8 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a2, s1 @@ -508,9 +508,9 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a2, s5, a0 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a2, s5, a5 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: mv a2, s3 @@ -606,9 +606,9 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a4, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a3, s5, a0 -; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: lui a5, 524288 +; RV32I-NEXT: xor a3, s5, a5 +; RV32I-NEXT: xor a5, a1, a5 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a2, s4 @@ -832,8 +832,8 @@ ; RV32I-NEXT: li a3, 0 ; RV32I-NEXT: call __adddf3@plt ; RV32I-NEXT: mv a2, a0 -; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: xor a3, a1, a0 +; RV32I-NEXT: lui a3, 524288 +; RV32I-NEXT: xor a3, a1, a3 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a4, s1 diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -293,8 +293,8 @@ ; CHECKIFD-NEXT: frflags a0 ; CHECKIFD-NEXT: flt.d a2, fa1, fa0 ; CHECKIFD-NEXT: fsflags a0 -; CHECKIFD-NEXT: or a0, a2, a1 -; CHECKIFD-NEXT: xori a0, a0, 1 +; CHECKIFD-NEXT: or a1, a2, a1 +; CHECKIFD-NEXT: xori a0, a1, 1 ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll --- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll @@ -279,8 +279,8 @@ ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3@plt -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a2, a0, a1 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a2, a0, a2 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call fmaf@plt @@ -301,8 +301,8 @@ ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a2, a0, a1 +; RV64I-NEXT: lui a2, 524288 +; RV64I-NEXT: xor a2, a0, a2 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call fmaf@plt diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -416,8 +416,8 @@ ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: call __addsf3@plt -; RV32I-NEXT: lui a1, 524288 -; RV32I-NEXT: xor a2, a0, a1 +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a2, a0, a2 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 ; RV32I-NEXT: call fmaf@plt @@ -438,8 +438,8 @@ ; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: li a1, 0 ; RV64I-NEXT: call __addsf3@plt -; RV64I-NEXT: lui a1, 524288 -; RV64I-NEXT: xor a2, a0, a1 +; RV64I-NEXT: lui a2, 524288 +; RV64I-NEXT: xor a2, a0, a2 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call fmaf@plt diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll @@ -195,8 +195,8 @@ ; RV32F-NEXT: mv s1, a0 ; RV32F-NEXT: call __muldf3@plt ; RV32F-NEXT: mv a2, a0 -; RV32F-NEXT: lui a0, 524288 -; RV32F-NEXT: xor a3, a1, a0 +; RV32F-NEXT: lui a3, 524288 +; RV32F-NEXT: xor a3, a1, a3 ; RV32F-NEXT: mv a0, s1 ; RV32F-NEXT: mv a1, s0 ; RV32F-NEXT: call __muldf3@plt @@ -321,8 +321,8 @@ ; RV32F-NEXT: mv s1, a0 ; RV32F-NEXT: call __muldf3@plt ; RV32F-NEXT: mv a2, a0 -; RV32F-NEXT: lui a0, 524288 -; RV32F-NEXT: or a3, a1, a0 +; RV32F-NEXT: lui a3, 524288 +; RV32F-NEXT: or a3, a1, a3 ; RV32F-NEXT: mv a0, s1 ; RV32F-NEXT: mv a1, s0 ; RV32F-NEXT: call __muldf3@plt diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -780,8 +780,8 @@ ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 -; RV32I-NEXT: neg a0, a0 -; RV32I-NEXT: or a1, a0, s1 +; RV32I-NEXT: neg a1, a0 +; RV32I-NEXT: or a1, a1, s1 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll @@ -285,8 +285,8 @@ ; CHECKIF-NEXT: frflags a0 ; CHECKIF-NEXT: flt.s a2, fa1, fa0 ; CHECKIF-NEXT: fsflags a0 -; CHECKIF-NEXT: or a0, a2, a1 -; CHECKIF-NEXT: xori a0, a0, 1 +; CHECKIF-NEXT: or a1, a2, a1 +; CHECKIF-NEXT: xori a0, a1, 1 ; CHECKIF-NEXT: feq.s zero, fa1, fa0 ; CHECKIF-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll --- a/llvm/test/CodeGen/RISCV/forced-atomics.ll +++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll @@ -2504,8 +2504,8 @@ ; RV32-NEXT: j .LBB49_2 ; RV32-NEXT: .LBB49_1: # %atomicrmw.start ; RV32-NEXT: # in Loop: Header=BB49_2 Depth=1 -; RV32-NEXT: neg a0, a0 -; RV32-NEXT: and a3, a0, a1 +; RV32-NEXT: neg a3, a0 +; RV32-NEXT: and a3, a3, a1 ; RV32-NEXT: sw a4, 0(sp) ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: mv a1, sp @@ -2599,8 +2599,8 @@ ; RV32-NEXT: j .LBB50_2 ; RV32-NEXT: .LBB50_1: # %atomicrmw.start ; RV32-NEXT: # in Loop: Header=BB50_2 Depth=1 -; RV32-NEXT: neg a0, a0 -; RV32-NEXT: and a3, a0, a1 +; RV32-NEXT: neg a3, a0 +; RV32-NEXT: and a3, a3, a1 ; RV32-NEXT: sw a4, 0(sp) ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: mv a1, sp @@ -2696,8 +2696,8 @@ ; RV32-NEXT: j .LBB51_2 ; RV32-NEXT: .LBB51_1: # %atomicrmw.start ; RV32-NEXT: # in Loop: Header=BB51_2 Depth=1 -; RV32-NEXT: neg a0, a0 -; RV32-NEXT: and a3, a0, a1 +; RV32-NEXT: neg a3, a0 +; RV32-NEXT: and a3, a3, a1 ; RV32-NEXT: sw a4, 0(sp) ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: mv a1, sp @@ -2791,8 +2791,8 @@ ; RV32-NEXT: j .LBB52_2 ; RV32-NEXT: .LBB52_1: # %atomicrmw.start ; RV32-NEXT: # in Loop: Header=BB52_2 Depth=1 -; RV32-NEXT: neg a0, a0 -; RV32-NEXT: and a3, a0, a1 +; RV32-NEXT: neg a3, a0 +; RV32-NEXT: and a3, a3, a1 ; RV32-NEXT: sw a4, 0(sp) ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: mv a1, sp diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -2369,20 +2369,20 @@ ; CHECK-NOV-NEXT: beqz a1, .LBB20_7 ; CHECK-NOV-NEXT: # %bb.5: # %entry ; CHECK-NOV-NEXT: sgtz a1, a1 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: bnez a2, .LBB20_8 ; CHECK-NOV-NEXT: .LBB20_6: -; CHECK-NOV-NEXT: snez a2, a0 +; CHECK-NOV-NEXT: snez a0, a4 ; CHECK-NOV-NEXT: j .LBB20_9 ; CHECK-NOV-NEXT: .LBB20_7: ; CHECK-NOV-NEXT: snez a1, a3 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: beqz a2, .LBB20_6 ; CHECK-NOV-NEXT: .LBB20_8: # %entry -; CHECK-NOV-NEXT: sgtz a2, a2 +; CHECK-NOV-NEXT: sgtz a0, a2 ; CHECK-NOV-NEXT: .LBB20_9: # %entry -; CHECK-NOV-NEXT: neg a2, a2 -; CHECK-NOV-NEXT: and a0, a2, a0 +; CHECK-NOV-NEXT: neg a0, a0 +; CHECK-NOV-NEXT: and a0, a0, a4 ; CHECK-NOV-NEXT: neg a1, a1 ; CHECK-NOV-NEXT: and a1, a1, a3 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -2435,20 +2435,20 @@ ; CHECK-V-NEXT: beqz a2, .LBB20_7 ; CHECK-V-NEXT: # %bb.5: # %entry ; CHECK-V-NEXT: sgtz a1, a2 -; CHECK-V-NEXT: and a2, a3, s1 +; CHECK-V-NEXT: and a3, a3, s1 ; CHECK-V-NEXT: bnez s0, .LBB20_8 ; CHECK-V-NEXT: .LBB20_6: -; CHECK-V-NEXT: snez a3, a2 +; CHECK-V-NEXT: snez a2, a3 ; CHECK-V-NEXT: j .LBB20_9 ; CHECK-V-NEXT: .LBB20_7: ; CHECK-V-NEXT: snez a1, a0 -; CHECK-V-NEXT: and a2, a3, s1 +; CHECK-V-NEXT: and a3, a3, s1 ; CHECK-V-NEXT: beqz s0, .LBB20_6 ; CHECK-V-NEXT: .LBB20_8: # %entry -; CHECK-V-NEXT: sgtz a3, s0 +; CHECK-V-NEXT: sgtz a2, s0 ; CHECK-V-NEXT: .LBB20_9: # %entry -; CHECK-V-NEXT: neg a3, a3 -; CHECK-V-NEXT: and a2, a3, a2 +; CHECK-V-NEXT: neg a2, a2 +; CHECK-V-NEXT: and a2, a2, a3 ; CHECK-V-NEXT: neg a1, a1 ; CHECK-V-NEXT: and a0, a1, a0 ; CHECK-V-NEXT: sd a0, 24(sp) @@ -2789,20 +2789,20 @@ ; CHECK-NOV-NEXT: beqz a1, .LBB23_7 ; CHECK-NOV-NEXT: # %bb.5: # %entry ; CHECK-NOV-NEXT: sgtz a1, a1 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: bnez a2, .LBB23_8 ; CHECK-NOV-NEXT: .LBB23_6: -; CHECK-NOV-NEXT: snez a2, a0 +; CHECK-NOV-NEXT: snez a0, a4 ; CHECK-NOV-NEXT: j .LBB23_9 ; CHECK-NOV-NEXT: .LBB23_7: ; CHECK-NOV-NEXT: snez a1, a3 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: beqz a2, .LBB23_6 ; CHECK-NOV-NEXT: .LBB23_8: # %entry -; CHECK-NOV-NEXT: sgtz a2, a2 +; CHECK-NOV-NEXT: sgtz a0, a2 ; CHECK-NOV-NEXT: .LBB23_9: # %entry -; CHECK-NOV-NEXT: neg a2, a2 -; CHECK-NOV-NEXT: and a0, a2, a0 +; CHECK-NOV-NEXT: neg a0, a0 +; CHECK-NOV-NEXT: and a0, a0, a4 ; CHECK-NOV-NEXT: neg a1, a1 ; CHECK-NOV-NEXT: and a1, a1, a3 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -2855,20 +2855,20 @@ ; CHECK-V-NEXT: beqz a2, .LBB23_7 ; CHECK-V-NEXT: # %bb.5: # %entry ; CHECK-V-NEXT: sgtz a1, a2 -; CHECK-V-NEXT: and a2, a3, s1 +; CHECK-V-NEXT: and a3, a3, s1 ; CHECK-V-NEXT: bnez s0, .LBB23_8 ; CHECK-V-NEXT: .LBB23_6: -; CHECK-V-NEXT: snez a3, a2 +; CHECK-V-NEXT: snez a2, a3 ; CHECK-V-NEXT: j .LBB23_9 ; CHECK-V-NEXT: .LBB23_7: ; CHECK-V-NEXT: snez a1, a0 -; CHECK-V-NEXT: and a2, a3, s1 +; CHECK-V-NEXT: and a3, a3, s1 ; CHECK-V-NEXT: beqz s0, .LBB23_6 ; CHECK-V-NEXT: .LBB23_8: # %entry -; CHECK-V-NEXT: sgtz a3, s0 +; CHECK-V-NEXT: sgtz a2, s0 ; CHECK-V-NEXT: .LBB23_9: # %entry -; CHECK-V-NEXT: neg a3, a3 -; CHECK-V-NEXT: and a2, a3, a2 +; CHECK-V-NEXT: neg a2, a2 +; CHECK-V-NEXT: and a2, a2, a3 ; CHECK-V-NEXT: neg a1, a1 ; CHECK-V-NEXT: and a0, a1, a0 ; CHECK-V-NEXT: sd a0, 24(sp) @@ -3198,20 +3198,20 @@ ; CHECK-NOV-NEXT: beqz a1, .LBB26_7 ; CHECK-NOV-NEXT: # %bb.5: # %entry ; CHECK-NOV-NEXT: sgtz a1, a1 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: bnez a2, .LBB26_8 ; CHECK-NOV-NEXT: .LBB26_6: -; CHECK-NOV-NEXT: snez a2, a0 +; CHECK-NOV-NEXT: snez a0, a4 ; CHECK-NOV-NEXT: j .LBB26_9 ; CHECK-NOV-NEXT: .LBB26_7: ; CHECK-NOV-NEXT: snez a1, a3 -; CHECK-NOV-NEXT: and a0, a4, s0 +; CHECK-NOV-NEXT: and a4, a4, s0 ; CHECK-NOV-NEXT: beqz a2, .LBB26_6 ; CHECK-NOV-NEXT: .LBB26_8: # %entry -; CHECK-NOV-NEXT: sgtz a2, a2 +; CHECK-NOV-NEXT: sgtz a0, a2 ; CHECK-NOV-NEXT: .LBB26_9: # %entry -; CHECK-NOV-NEXT: neg a2, a2 -; CHECK-NOV-NEXT: and a0, a2, a0 +; CHECK-NOV-NEXT: neg a0, a0 +; CHECK-NOV-NEXT: and a0, a0, a4 ; CHECK-NOV-NEXT: neg a1, a1 ; CHECK-NOV-NEXT: and a1, a1, a3 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -5363,8 +5363,8 @@ ; CHECK-NOV-NEXT: .LBB45_7: # %entry ; CHECK-NOV-NEXT: slti a6, a1, 0 ; CHECK-NOV-NEXT: slti a3, s1, 0 -; CHECK-NOV-NEXT: neg a3, a3 -; CHECK-NOV-NEXT: and a4, a3, s1 +; CHECK-NOV-NEXT: neg a4, a3 +; CHECK-NOV-NEXT: and a4, a4, s1 ; CHECK-NOV-NEXT: slli a3, a0, 63 ; CHECK-NOV-NEXT: mv a5, s0 ; CHECK-NOV-NEXT: bltz a4, .LBB45_20 @@ -5474,8 +5474,8 @@ ; CHECK-V-NEXT: mv a0, a4 ; CHECK-V-NEXT: .LBB45_7: # %entry ; CHECK-V-NEXT: slti a3, s1, 0 -; CHECK-V-NEXT: neg a3, a3 -; CHECK-V-NEXT: and a4, a3, s1 +; CHECK-V-NEXT: neg a4, a3 +; CHECK-V-NEXT: and a4, a4, s1 ; CHECK-V-NEXT: slti a6, a1, 0 ; CHECK-V-NEXT: slli a3, a2, 63 ; CHECK-V-NEXT: mv a5, s0 @@ -5850,8 +5850,8 @@ ; CHECK-NOV-NEXT: .LBB48_7: # %entry ; CHECK-NOV-NEXT: slti a6, a1, 0 ; CHECK-NOV-NEXT: slti a3, s1, 0 -; CHECK-NOV-NEXT: neg a3, a3 -; CHECK-NOV-NEXT: and a4, a3, s1 +; CHECK-NOV-NEXT: neg a4, a3 +; CHECK-NOV-NEXT: and a4, a4, s1 ; CHECK-NOV-NEXT: slli a3, a0, 63 ; CHECK-NOV-NEXT: mv a5, s0 ; CHECK-NOV-NEXT: bltz a4, .LBB48_20 @@ -5961,8 +5961,8 @@ ; CHECK-V-NEXT: mv a0, a4 ; CHECK-V-NEXT: .LBB48_7: # %entry ; CHECK-V-NEXT: slti a3, s1, 0 -; CHECK-V-NEXT: neg a3, a3 -; CHECK-V-NEXT: and a4, a3, s1 +; CHECK-V-NEXT: neg a4, a3 +; CHECK-V-NEXT: and a4, a4, s1 ; CHECK-V-NEXT: slti a6, a1, 0 ; CHECK-V-NEXT: slli a3, a2, 63 ; CHECK-V-NEXT: mv a5, s0 @@ -6339,8 +6339,8 @@ ; CHECK-NOV-NEXT: .LBB51_7: # %entry ; CHECK-NOV-NEXT: slti a6, a1, 0 ; CHECK-NOV-NEXT: slti a3, s1, 0 -; CHECK-NOV-NEXT: neg a3, a3 -; CHECK-NOV-NEXT: and a4, a3, s1 +; CHECK-NOV-NEXT: neg a4, a3 +; CHECK-NOV-NEXT: and a4, a4, s1 ; CHECK-NOV-NEXT: slli a3, a0, 63 ; CHECK-NOV-NEXT: mv a5, s0 ; CHECK-NOV-NEXT: bltz a4, .LBB51_20 @@ -6445,8 +6445,8 @@ ; CHECK-V-NEXT: .LBB51_7: # %entry ; CHECK-V-NEXT: slti a6, a1, 0 ; CHECK-V-NEXT: slti a3, s1, 0 -; CHECK-V-NEXT: neg a3, a3 -; CHECK-V-NEXT: and a4, a3, s1 +; CHECK-V-NEXT: neg a4, a3 +; CHECK-V-NEXT: and a4, a4, s1 ; CHECK-V-NEXT: slli a3, a2, 63 ; CHECK-V-NEXT: mv a5, s0 ; CHECK-V-NEXT: bltz a4, .LBB51_20 diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -1217,8 +1217,8 @@ ; RV32I-NEXT: call __gesf2@plt ; RV32I-NEXT: slti a0, a0, 0 ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: and a0, a0, s2 -; RV32I-NEXT: or a1, s1, a0 +; RV32I-NEXT: and a1, a0, s2 +; RV32I-NEXT: or a1, s1, a1 ; RV32I-NEXT: mv a0, s3 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll @@ -111,8 +111,8 @@ ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: flt.h a2, fa1, fa0 ; CHECK-NEXT: fsflags a0 -; CHECK-NEXT: or a0, a2, a1 -; CHECK-NEXT: xori a0, a0, 1 +; CHECK-NEXT: or a1, a2, a1 +; CHECK-NEXT: xori a0, a1, 1 ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -1149,17 +1149,17 @@ ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu t1, a5, a3 ; RV32I-NEXT: .LBB30_2: -; RV32I-NEXT: sub a1, a2, a1 -; RV32I-NEXT: sltu a2, a1, t1 -; RV32I-NEXT: sub a2, t0, a2 -; RV32I-NEXT: sub a1, a1, t1 -; RV32I-NEXT: sub a3, a5, a3 -; RV32I-NEXT: sub a3, a3, a7 +; RV32I-NEXT: sub a2, a2, a1 +; RV32I-NEXT: sltu a1, a2, t1 +; RV32I-NEXT: sub a1, t0, a1 +; RV32I-NEXT: sub a2, a2, t1 +; RV32I-NEXT: sub a5, a5, a3 +; RV32I-NEXT: sub a3, a5, a7 ; RV32I-NEXT: sub a4, a6, a4 ; RV32I-NEXT: sw a4, 0(a0) ; RV32I-NEXT: sw a3, 4(a0) -; RV32I-NEXT: sw a1, 8(a0) -; RV32I-NEXT: sw a2, 12(a0) +; RV32I-NEXT: sw a2, 8(a0) +; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muli128_m3840: @@ -1203,16 +1203,16 @@ ; RV32IM-NEXT: sub a3, t1, a3 ; RV32IM-NEXT: add a2, a3, a2 ; RV32IM-NEXT: sub a3, t3, a4 -; RV32IM-NEXT: sub a1, a3, a1 -; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: add a1, a1, t0 -; RV32IM-NEXT: add a1, a7, a1 -; RV32IM-NEXT: add a1, a1, s0 -; RV32IM-NEXT: mul a2, a4, a5 -; RV32IM-NEXT: sw a2, 0(a0) +; RV32IM-NEXT: sub a3, a3, a1 +; RV32IM-NEXT: add a2, a3, a2 +; RV32IM-NEXT: add a2, a2, t0 +; RV32IM-NEXT: add a2, a7, a2 +; RV32IM-NEXT: add a2, a2, s0 +; RV32IM-NEXT: mul a1, a4, a5 +; RV32IM-NEXT: sw a1, 0(a0) ; RV32IM-NEXT: sw a6, 4(a0) ; RV32IM-NEXT: sw t6, 8(a0) -; RV32IM-NEXT: sw a1, 12(a0) +; RV32IM-NEXT: sw a2, 12(a0) ; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload ; RV32IM-NEXT: lw s1, 8(sp) # 4-byte Folded Reload ; RV32IM-NEXT: addi sp, sp, 16 @@ -1280,8 +1280,8 @@ ; RV32I-NEXT: sub a7, t2, t0 ; RV32I-NEXT: sub a3, a3, a6 ; RV32I-NEXT: sub a3, a3, a4 -; RV32I-NEXT: sub a1, a2, a1 -; RV32I-NEXT: sw a1, 0(a0) +; RV32I-NEXT: sub a2, a2, a1 +; RV32I-NEXT: sw a2, 0(a0) ; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: sw a7, 8(a0) ; RV32I-NEXT: sw a5, 12(a0) @@ -1327,19 +1327,19 @@ ; RV32IM-NEXT: slli t1, a2, 6 ; RV32IM-NEXT: sub a2, a2, t1 ; RV32IM-NEXT: mulhu a5, a1, a5 -; RV32IM-NEXT: sub a1, a5, a1 +; RV32IM-NEXT: sub a5, a5, a1 +; RV32IM-NEXT: add a2, a5, a2 +; RV32IM-NEXT: sub a1, t3, a3 +; RV32IM-NEXT: sub a1, a1, a4 ; RV32IM-NEXT: add a1, a1, a2 -; RV32IM-NEXT: sub a2, t3, a3 -; RV32IM-NEXT: sub a2, a2, a4 -; RV32IM-NEXT: add a1, a2, a1 ; RV32IM-NEXT: neg a2, t5 ; RV32IM-NEXT: sltu a2, a2, t0 ; RV32IM-NEXT: add a1, a1, a2 ; RV32IM-NEXT: add a1, a7, a1 ; RV32IM-NEXT: add a1, a1, s0 ; RV32IM-NEXT: slli a2, a3, 6 -; RV32IM-NEXT: sub a2, a3, a2 -; RV32IM-NEXT: sw a2, 0(a0) +; RV32IM-NEXT: sub a3, a3, a2 +; RV32IM-NEXT: sw a3, 0(a0) ; RV32IM-NEXT: sw a6, 4(a0) ; RV32IM-NEXT: sw t6, 8(a0) ; RV32IM-NEXT: sw a1, 12(a0) @@ -1594,29 +1594,29 @@ ; RV32I-LABEL: muladd_demand_2: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 1 -; RV32I-NEXT: sub a0, a1, a0 -; RV32I-NEXT: ori a0, a0, -16 +; RV32I-NEXT: sub a1, a1, a0 +; RV32I-NEXT: ori a0, a1, -16 ; RV32I-NEXT: ret ; ; RV32IM-LABEL: muladd_demand_2: ; RV32IM: # %bb.0: ; RV32IM-NEXT: slli a0, a0, 1 -; RV32IM-NEXT: sub a0, a1, a0 -; RV32IM-NEXT: ori a0, a0, -16 +; RV32IM-NEXT: sub a1, a1, a0 +; RV32IM-NEXT: ori a0, a1, -16 ; RV32IM-NEXT: ret ; ; RV64I-LABEL: muladd_demand_2: ; RV64I: # %bb.0: ; RV64I-NEXT: slliw a0, a0, 1 -; RV64I-NEXT: subw a0, a1, a0 -; RV64I-NEXT: ori a0, a0, -16 +; RV64I-NEXT: subw a1, a1, a0 +; RV64I-NEXT: ori a0, a1, -16 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muladd_demand_2: ; RV64IM: # %bb.0: ; RV64IM-NEXT: slliw a0, a0, 1 -; RV64IM-NEXT: subw a0, a1, a0 -; RV64IM-NEXT: ori a0, a0, -16 +; RV64IM-NEXT: subw a1, a1, a0 +; RV64IM-NEXT: ori a0, a1, -16 ; RV64IM-NEXT: ret %m = mul i8 %x, 14 %a = add i8 %y, %m diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll @@ -306,9 +306,9 @@ ; CHECK-NEXT: and a2, a4, a2 ; CHECK-NEXT: sll a0, a1, a0 ; CHECK-NEXT: addi a3, a3, -1 -; CHECK-NEXT: and a1, a3, a0 +; CHECK-NEXT: and a3, a3, a0 ; CHECK-NEXT: not a0, a2 -; CHECK-NEXT: not a1, a1 +; CHECK-NEXT: not a1, a3 ; CHECK-NEXT: ret %1 = shl i64 1, %x %2 = xor i64 %1, -1 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll @@ -327,13 +327,13 @@ ; RV32I-NEXT: lui a2, 349525 ; RV32I-NEXT: addi s2, a2, 1365 ; RV32I-NEXT: and a0, a0, s2 -; RV32I-NEXT: sub a0, a1, a0 -; RV32I-NEXT: lui a1, 209715 -; RV32I-NEXT: addi s3, a1, 819 -; RV32I-NEXT: and a1, a0, s3 -; RV32I-NEXT: srli a0, a0, 2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: sub a1, a1, a0 +; RV32I-NEXT: lui a0, 209715 +; RV32I-NEXT: addi s3, a0, 819 +; RV32I-NEXT: and a0, a1, s3 +; RV32I-NEXT: srli a1, a1, 2 +; RV32I-NEXT: and a1, a1, s3 +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: lui a1, 61681 @@ -346,11 +346,11 @@ ; RV32I-NEXT: srli s5, a0, 24 ; RV32I-NEXT: srli a0, s0, 1 ; RV32I-NEXT: and a0, a0, s2 -; RV32I-NEXT: sub a0, s0, a0 -; RV32I-NEXT: and a1, a0, s3 -; RV32I-NEXT: srli a0, a0, 2 -; RV32I-NEXT: and a0, a0, s3 -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: sub s0, s0, a0 +; RV32I-NEXT: and a0, s0, s3 +; RV32I-NEXT: srli s0, s0, 2 +; RV32I-NEXT: and a1, s0, s3 +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: and a0, a0, s4 diff --git a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll --- a/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-shift-sext.ll @@ -134,9 +134,9 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lui a2, 30141 ; RV64I-NEXT: addiw a2, a2, -747 -; RV64I-NEXT: subw a1, a2, a1 -; RV64I-NEXT: slli a1, a1, 2 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: subw a2, a2, a1 +; RV64I-NEXT: slli a2, a2, 2 +; RV64I-NEXT: add a0, a0, a2 ; RV64I-NEXT: lw a0, 0(a0) ; RV64I-NEXT: ret %3 = mul i64 %1, -4294967296 @@ -179,8 +179,8 @@ ; RV64I-NEXT: add a2, a0, a2 ; RV64I-NEXT: lb a2, 0(a2) ; RV64I-NEXT: li a3, 2 -; RV64I-NEXT: subw a1, a3, a1 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: subw a3, a3, a1 +; RV64I-NEXT: add a0, a0, a3 ; RV64I-NEXT: lb a0, 0(a0) ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: ret @@ -201,11 +201,11 @@ ; RV64I-LABEL: test14: ; RV64I: # %bb.0: ; RV64I-NEXT: li a3, 1 -; RV64I-NEXT: subw a2, a3, a2 -; RV64I-NEXT: add a0, a0, a2 +; RV64I-NEXT: subw a3, a3, a2 +; RV64I-NEXT: add a0, a0, a3 ; RV64I-NEXT: lbu a0, 0(a0) -; RV64I-NEXT: slli a2, a2, 2 -; RV64I-NEXT: add a1, a1, a2 +; RV64I-NEXT: slli a3, a3, 2 +; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: lw a1, 0(a1) ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -1018,11 +1018,11 @@ ; RV64I-NEXT: slli a5, a5, 24 ; RV64I-NEXT: or a3, a5, a3 ; RV64I-NEXT: or a1, a3, a1 -; RV64I-NEXT: and a3, a0, a4 -; RV64I-NEXT: slli a3, a3, 24 -; RV64I-NEXT: srliw a4, a0, 24 -; RV64I-NEXT: slli a4, a4, 32 -; RV64I-NEXT: or a3, a3, a4 +; RV64I-NEXT: and a4, a0, a4 +; RV64I-NEXT: slli a4, a4, 24 +; RV64I-NEXT: srliw a3, a0, 24 +; RV64I-NEXT: slli a3, a3, 32 +; RV64I-NEXT: or a3, a4, a3 ; RV64I-NEXT: and a2, a0, a2 ; RV64I-NEXT: slli a2, a2, 40 ; RV64I-NEXT: slli a0, a0, 56 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll @@ -847,8 +847,8 @@ ; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: beq a2, a3, .LBB13_7 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a3, 1023 -; CHECK-NEXT: subw a4, a3, a2 +; CHECK-NEXT: li a4, 1023 +; CHECK-NEXT: subw a4, a4, a2 ; CHECK-NEXT: li a5, 31 ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: bltu a4, a5, .LBB13_5 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -650,8 +650,8 @@ ; CHECK-NEXT: vle8.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: and a0, a4, a2 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: and a2, a4, a2 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmseq.vv v1, v16, v8, v0.t ; CHECK-NEXT: bltu a3, a1, .LBB51_2 ; CHECK-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -1328,8 +1328,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: and a0, a5, a3 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: and a3, a5, a3 +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -1328,8 +1328,8 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: and a0, a5, a3 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: and a3, a5, a3 +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll --- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll +++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll @@ -226,8 +226,8 @@ ; RV32I-NEXT: beqz a0, .LBB9_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: sltu a0, a3, a1 -; RV32I-NEXT: sub a2, a4, a2 -; RV32I-NEXT: sub a4, a2, a0 +; RV32I-NEXT: sub a4, a4, a2 +; RV32I-NEXT: sub a4, a4, a0 ; RV32I-NEXT: sub a3, a3, a1 ; RV32I-NEXT: .LBB9_2: ; RV32I-NEXT: mv a0, a3 diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll --- a/llvm/test/CodeGen/RISCV/setcc-logic.ll +++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll @@ -8,16 +8,16 @@ ; RV32I-LABEL: and_icmp_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret %cmp1 = icmp eq i32 %a, %b @@ -30,16 +30,16 @@ ; RV32I-LABEL: or_icmp_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_icmp_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: snez a0, a0 ; RV64I-NEXT: ret %cmp1 = icmp ne i32 %a, %b @@ -299,8 +299,8 @@ ; RV32I-LABEL: and_sge_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB13_2 ; RV32I-NEXT: # %bb.1: @@ -311,8 +311,8 @@ ; RV64I-LABEL: and_sge_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB13_2 ; RV64I-NEXT: # %bb.1: @@ -336,8 +336,8 @@ ; RV32I-LABEL: and_sle_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB14_2 ; RV32I-NEXT: # %bb.1: @@ -348,8 +348,8 @@ ; RV64I-LABEL: and_sle_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB14_2 ; RV64I-NEXT: # %bb.1: @@ -373,8 +373,8 @@ ; RV32I-LABEL: and_uge_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB15_2 ; RV32I-NEXT: # %bb.1: @@ -385,8 +385,8 @@ ; RV64I-LABEL: and_uge_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB15_2 ; RV64I-NEXT: # %bb.1: @@ -410,8 +410,8 @@ ; RV32I-LABEL: and_ule_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB16_2 ; RV32I-NEXT: # %bb.1: @@ -422,8 +422,8 @@ ; RV64I-LABEL: and_ule_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB16_2 ; RV64I-NEXT: # %bb.1: @@ -447,8 +447,8 @@ ; RV32I-LABEL: and_sge_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB17_2 ; RV32I-NEXT: # %bb.1: @@ -459,8 +459,8 @@ ; RV64I-LABEL: and_sge_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB17_2 ; RV64I-NEXT: # %bb.1: @@ -484,8 +484,8 @@ ; RV32I-LABEL: and_sle_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB18_2 ; RV32I-NEXT: # %bb.1: @@ -496,8 +496,8 @@ ; RV64I-LABEL: and_sle_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB18_2 ; RV64I-NEXT: # %bb.1: @@ -521,8 +521,8 @@ ; RV32I-LABEL: and_uge_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB19_2 ; RV32I-NEXT: # %bb.1: @@ -533,8 +533,8 @@ ; RV64I-LABEL: and_uge_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB19_2 ; RV64I-NEXT: # %bb.1: @@ -558,8 +558,8 @@ ; RV32I-LABEL: and_ule_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB20_2 ; RV32I-NEXT: # %bb.1: @@ -570,8 +570,8 @@ ; RV64I-LABEL: and_ule_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB20_2 ; RV64I-NEXT: # %bb.1: @@ -595,8 +595,8 @@ ; RV32I-LABEL: or_sge_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB21_2 ; RV32I-NEXT: # %bb.1: @@ -607,8 +607,8 @@ ; RV64I-LABEL: or_sge_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB21_2 ; RV64I-NEXT: # %bb.1: @@ -632,8 +632,8 @@ ; RV32I-LABEL: or_sle_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB22_2 ; RV32I-NEXT: # %bb.1: @@ -644,8 +644,8 @@ ; RV64I-LABEL: or_sle_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB22_2 ; RV64I-NEXT: # %bb.1: @@ -669,8 +669,8 @@ ; RV32I-LABEL: or_uge_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB23_2 ; RV32I-NEXT: # %bb.1: @@ -681,8 +681,8 @@ ; RV64I-LABEL: or_uge_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB23_2 ; RV64I-NEXT: # %bb.1: @@ -706,8 +706,8 @@ ; RV32I-LABEL: or_ule_eq: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: snez a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: snez a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB24_2 ; RV32I-NEXT: # %bb.1: @@ -718,8 +718,8 @@ ; RV64I-LABEL: or_ule_eq: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: snez a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: snez a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB24_2 ; RV64I-NEXT: # %bb.1: @@ -743,8 +743,8 @@ ; RV32I-LABEL: or_sge_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB25_2 ; RV32I-NEXT: # %bb.1: @@ -755,8 +755,8 @@ ; RV64I-LABEL: or_sge_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB25_2 ; RV64I-NEXT: # %bb.1: @@ -780,8 +780,8 @@ ; RV32I-LABEL: or_sle_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB26_2 ; RV32I-NEXT: # %bb.1: @@ -792,8 +792,8 @@ ; RV64I-LABEL: or_sle_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB26_2 ; RV64I-NEXT: # %bb.1: @@ -817,8 +817,8 @@ ; RV32I-LABEL: or_uge_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB27_2 ; RV32I-NEXT: # %bb.1: @@ -829,8 +829,8 @@ ; RV64I-LABEL: or_uge_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB27_2 ; RV64I-NEXT: # %bb.1: @@ -854,8 +854,8 @@ ; RV32I-LABEL: or_ule_ne: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: xor a2, a2, a3 +; RV32I-NEXT: seqz a1, a2 ; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: bnez a0, .LBB28_2 ; RV32I-NEXT: # %bb.1: @@ -866,8 +866,8 @@ ; RV64I-LABEL: or_ule_ne: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a1, a0 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: xor a2, a2, a3 +; RV64I-NEXT: seqz a1, a2 ; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: bnez a0, .LBB28_2 ; RV64I-NEXT: # %bb.1: diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -121,9 +121,9 @@ ; RV64-NEXT: call bar@plt ; RV64-NEXT: mv s3, a0 ; RV64-NEXT: call bar@plt -; RV64-NEXT: addw a1, s0, s1 +; RV64-NEXT: addw s0, s0, s1 ; RV64-NEXT: addw a0, s3, a0 -; RV64-NEXT: addw a0, a1, a0 +; RV64-NEXT: addw a0, s0, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll --- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll +++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll @@ -23,8 +23,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -48,8 +48,8 @@ ; RV64-NEXT: mulhu a6, a4, a2 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a3 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a4, a2 ; RV64-NEXT: ret @@ -76,8 +76,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -101,8 +101,8 @@ ; RV64-NEXT: mulhu a6, a4, a2 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a3 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a4, a2 ; RV64-NEXT: ret @@ -183,8 +183,8 @@ ; RV32-NEXT: mulhu a6, a3, a4 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a5, a1 ; RV32-NEXT: mul a0, a3, a4 ; RV32-NEXT: ret @@ -210,8 +210,8 @@ ; RV64-NEXT: mulhu a6, a3, a5 ; RV64-NEXT: add a4, a6, a4 ; RV64-NEXT: sltu a0, a0, a2 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a5 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a5 ; RV64-NEXT: add a1, a4, a1 ; RV64-NEXT: mul a0, a3, a5 ; RV64-NEXT: ret @@ -238,8 +238,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -263,8 +263,8 @@ ; RV64-NEXT: mulhu a6, a4, a2 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a3 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a4, a2 ; RV64-NEXT: ret @@ -293,8 +293,8 @@ ; RV32-NEXT: mulhu a6, a3, a4 ; RV32-NEXT: add a5, a6, a5 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a5, a1 ; RV32-NEXT: mul a0, a3, a4 ; RV32-NEXT: ret @@ -320,8 +320,8 @@ ; RV64-NEXT: mulhu a6, a3, a5 ; RV64-NEXT: add a4, a6, a4 ; RV64-NEXT: sltu a0, a0, a2 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a5 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a5 ; RV64-NEXT: add a1, a4, a1 ; RV64-NEXT: mul a0, a3, a5 ; RV64-NEXT: ret @@ -348,8 +348,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -373,8 +373,8 @@ ; RV64-NEXT: mulhu a6, a4, a2 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a3 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a4, a2 ; RV64-NEXT: ret @@ -403,9 +403,9 @@ ; RV32-NEXT: mulhu a4, a3, a4 ; RV32-NEXT: add a4, a4, a5 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: slli a1, a0, 16 -; RV32-NEXT: add a0, a1, a0 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: slli a0, a1, 16 +; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: sub a1, a4, a0 ; RV32-NEXT: slli a0, a3, 16 ; RV32-NEXT: add a0, a0, a3 @@ -435,8 +435,8 @@ ; RV64-NEXT: mulhu a6, a3, a4 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a2 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a4 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a4 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a3, a4 ; RV64-NEXT: ret @@ -462,9 +462,9 @@ ; RV32-NEXT: slli a5, a3, 16 ; RV32-NEXT: sub a4, a4, a5 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: slli a1, a0, 16 -; RV32-NEXT: sub a1, a0, a1 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: slli a0, a1, 16 +; RV32-NEXT: sub a1, a1, a0 ; RV32-NEXT: add a1, a4, a1 ; RV32-NEXT: sub a0, a3, a5 ; RV32-NEXT: ret @@ -490,8 +490,8 @@ ; RV64-NEXT: mulhu a6, a5, a4 ; RV64-NEXT: add a3, a6, a3 ; RV64-NEXT: sltu a0, a0, a2 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a4 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a4 ; RV64-NEXT: add a1, a3, a1 ; RV64-NEXT: mul a0, a5, a4 ; RV64-NEXT: ret @@ -522,8 +522,8 @@ ; RV32-NEXT: mulhu a6, a5, a4 ; RV32-NEXT: add a3, a6, a3 ; RV32-NEXT: sltu a0, a0, a2 -; RV32-NEXT: sub a0, a1, a0 -; RV32-NEXT: mul a1, a0, a4 +; RV32-NEXT: sub a1, a1, a0 +; RV32-NEXT: mul a1, a1, a4 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: mul a0, a5, a4 ; RV32-NEXT: ret @@ -551,8 +551,8 @@ ; RV64-NEXT: mulhu a6, a4, a2 ; RV64-NEXT: add a5, a6, a5 ; RV64-NEXT: sltu a0, a0, a3 -; RV64-NEXT: sub a0, a1, a0 -; RV64-NEXT: mul a1, a0, a2 +; RV64-NEXT: sub a1, a1, a0 +; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: add a1, a5, a1 ; RV64-NEXT: mul a0, a4, a2 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -674,8 +674,8 @@ ; RV32MV-NEXT: vslidedown.vi v10, v8, 2 ; RV32MV-NEXT: vmv.x.s a1, v10 ; RV32MV-NEXT: slli a2, a1, 1 -; RV32MV-NEXT: sub a0, a2, a0 -; RV32MV-NEXT: sw a0, 4(s2) +; RV32MV-NEXT: sub a2, a2, a0 +; RV32MV-NEXT: sw a2, 4(s2) ; RV32MV-NEXT: vslidedown.vi v10, v8, 4 ; RV32MV-NEXT: vmv.x.s a0, v10 ; RV32MV-NEXT: srli a2, a0, 30 @@ -782,8 +782,8 @@ ; RV64MV-NEXT: slli a2, a2, 2 ; RV64MV-NEXT: slli a3, a3, 31 ; RV64MV-NEXT: srli a3, a3, 62 -; RV64MV-NEXT: or a1, a3, a2 -; RV64MV-NEXT: sw a1, 8(a0) +; RV64MV-NEXT: or a2, a3, a2 +; RV64MV-NEXT: sw a2, 8(a0) ; RV64MV-NEXT: addi sp, s0, -64 ; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64MV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -179,16 +179,16 @@ ; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6) ; RV64IM-NEXT: li a7, 98 ; RV64IM-NEXT: mulw a3, a3, a7 -; RV64IM-NEXT: subw a3, a5, a3 -; RV64IM-NEXT: mulh a5, a4, a6 -; RV64IM-NEXT: srli a6, a5, 63 -; RV64IM-NEXT: srli a5, a5, 7 -; RV64IM-NEXT: addw a5, a5, a6 +; RV64IM-NEXT: subw a5, a5, a3 +; RV64IM-NEXT: mulh a3, a4, a6 +; RV64IM-NEXT: srli a6, a3, 63 +; RV64IM-NEXT: srli a3, a3, 7 +; RV64IM-NEXT: addw a3, a3, a6 ; RV64IM-NEXT: li a6, -1003 -; RV64IM-NEXT: mulw a5, a5, a6 -; RV64IM-NEXT: subw a4, a4, a5 +; RV64IM-NEXT: mulw a3, a3, a6 +; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 6(a0) -; RV64IM-NEXT: sh a3, 4(a0) +; RV64IM-NEXT: sh a5, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: ret @@ -360,8 +360,8 @@ ; RV64IM-NEXT: srli a3, a3, 6 ; RV64IM-NEXT: addw a3, a3, a6 ; RV64IM-NEXT: mulw a3, a3, a7 -; RV64IM-NEXT: subw a3, a4, a3 -; RV64IM-NEXT: sh a3, 6(a0) +; RV64IM-NEXT: subw a4, a4, a3 +; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a5, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 0(a0) @@ -749,13 +749,13 @@ ; RV64IM-NEXT: srli a3, a5, 59 ; RV64IM-NEXT: add a3, a5, a3 ; RV64IM-NEXT: andi a3, a3, -32 -; RV64IM-NEXT: subw a3, a5, a3 -; RV64IM-NEXT: srli a5, a4, 61 -; RV64IM-NEXT: add a5, a4, a5 -; RV64IM-NEXT: andi a5, a5, -8 -; RV64IM-NEXT: subw a4, a4, a5 +; RV64IM-NEXT: subw a5, a5, a3 +; RV64IM-NEXT: srli a3, a4, 61 +; RV64IM-NEXT: add a3, a4, a3 +; RV64IM-NEXT: andi a3, a3, -8 +; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 4(a0) -; RV64IM-NEXT: sh a3, 2(a0) +; RV64IM-NEXT: sh a5, 2(a0) ; RV64IM-NEXT: sh a1, 0(a0) ; RV64IM-NEXT: sh a2, 6(a0) ; RV64IM-NEXT: ret @@ -911,9 +911,9 @@ ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mulw a3, a3, a5 -; RV64IM-NEXT: subw a3, a4, a3 +; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh zero, 0(a0) -; RV64IM-NEXT: sh a3, 6(a0) +; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 4(a0) ; RV64IM-NEXT: ret @@ -1055,15 +1055,15 @@ ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mulw a3, a3, a5 -; RV64IM-NEXT: subw a3, a4, a3 -; RV64IM-NEXT: srli a4, a1, 49 -; RV64IM-NEXT: add a4, a1, a4 +; RV64IM-NEXT: subw a4, a4, a3 +; RV64IM-NEXT: srli a3, a1, 49 +; RV64IM-NEXT: add a3, a1, a3 ; RV64IM-NEXT: lui a5, 8 -; RV64IM-NEXT: and a4, a4, a5 -; RV64IM-NEXT: subw a1, a1, a4 +; RV64IM-NEXT: and a3, a3, a5 +; RV64IM-NEXT: subw a1, a1, a3 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a1, 2(a0) -; RV64IM-NEXT: sh a3, 6(a0) +; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a2, 4(a0) ; RV64IM-NEXT: ret %1 = srem <4 x i16> %x, @@ -1279,9 +1279,9 @@ ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mul a3, a3, a5 -; RV64IM-NEXT: sub a3, a4, a3 +; RV64IM-NEXT: sub a4, a4, a3 ; RV64IM-NEXT: sd zero, 0(a0) -; RV64IM-NEXT: sd a3, 24(a0) +; RV64IM-NEXT: sd a4, 24(a0) ; RV64IM-NEXT: sd a1, 8(a0) ; RV64IM-NEXT: sd a2, 16(a0) ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -132,8 +132,8 @@ ; RV64I-NEXT: or a1, a1, a2 ; RV64I-NEXT: slli a3, a3, 16 ; RV64I-NEXT: slli a4, a4, 24 -; RV64I-NEXT: or a2, a4, a3 -; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: or a3, a4, a3 +; RV64I-NEXT: or a1, a3, a1 ; RV64I-NEXT: lbu a2, 5(a0) ; RV64I-NEXT: lbu a3, 4(a0) ; RV64I-NEXT: lbu a4, 6(a0) diff --git a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll --- a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll +++ b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll @@ -478,18 +478,18 @@ ; CHECK-I-LABEL: in_complex_y0_m0: ; CHECK-I: # %bb.0: ; CHECK-I-NEXT: and a1, a1, a2 -; CHECK-I-NEXT: xor a2, a3, a4 +; CHECK-I-NEXT: xor a3, a3, a4 ; CHECK-I-NEXT: xor a0, a0, a1 -; CHECK-I-NEXT: and a0, a0, a2 +; CHECK-I-NEXT: and a0, a0, a3 ; CHECK-I-NEXT: xor a0, a0, a1 ; CHECK-I-NEXT: ret ; ; CHECK-ZBB-LABEL: in_complex_y0_m0: ; CHECK-ZBB: # %bb.0: ; CHECK-ZBB-NEXT: and a1, a1, a2 -; CHECK-ZBB-NEXT: xor a2, a3, a4 -; CHECK-ZBB-NEXT: andn a1, a1, a2 -; CHECK-ZBB-NEXT: and a0, a0, a2 +; CHECK-ZBB-NEXT: xor a3, a3, a4 +; CHECK-ZBB-NEXT: andn a1, a1, a3 +; CHECK-ZBB-NEXT: and a0, a0, a3 ; CHECK-ZBB-NEXT: or a0, a0, a1 ; CHECK-ZBB-NEXT: ret %y = and i32 %y_hi, %y_low @@ -504,18 +504,18 @@ ; CHECK-I-LABEL: in_complex_y1_m0: ; CHECK-I: # %bb.0: ; CHECK-I-NEXT: and a1, a1, a2 -; CHECK-I-NEXT: xor a2, a3, a4 +; CHECK-I-NEXT: xor a3, a3, a4 ; CHECK-I-NEXT: xor a0, a0, a1 -; CHECK-I-NEXT: and a0, a0, a2 +; CHECK-I-NEXT: and a0, a0, a3 ; CHECK-I-NEXT: xor a0, a1, a0 ; CHECK-I-NEXT: ret ; ; CHECK-ZBB-LABEL: in_complex_y1_m0: ; CHECK-ZBB: # %bb.0: ; CHECK-ZBB-NEXT: and a1, a1, a2 -; CHECK-ZBB-NEXT: xor a2, a3, a4 -; CHECK-ZBB-NEXT: andn a1, a1, a2 -; CHECK-ZBB-NEXT: and a0, a0, a2 +; CHECK-ZBB-NEXT: xor a3, a3, a4 +; CHECK-ZBB-NEXT: andn a1, a1, a3 +; CHECK-ZBB-NEXT: and a0, a0, a3 ; CHECK-ZBB-NEXT: or a0, a0, a1 ; CHECK-ZBB-NEXT: ret %y = and i32 %y_hi, %y_low @@ -530,18 +530,18 @@ ; CHECK-I-LABEL: in_complex_y0_m1: ; CHECK-I: # %bb.0: ; CHECK-I-NEXT: and a1, a1, a2 -; CHECK-I-NEXT: xor a2, a3, a4 +; CHECK-I-NEXT: xor a3, a3, a4 ; CHECK-I-NEXT: xor a0, a0, a1 -; CHECK-I-NEXT: and a0, a2, a0 +; CHECK-I-NEXT: and a0, a3, a0 ; CHECK-I-NEXT: xor a0, a0, a1 ; CHECK-I-NEXT: ret ; ; CHECK-ZBB-LABEL: in_complex_y0_m1: ; CHECK-ZBB: # %bb.0: ; CHECK-ZBB-NEXT: and a1, a1, a2 -; CHECK-ZBB-NEXT: xor a2, a3, a4 -; CHECK-ZBB-NEXT: andn a1, a1, a2 -; CHECK-ZBB-NEXT: and a0, a0, a2 +; CHECK-ZBB-NEXT: xor a3, a3, a4 +; CHECK-ZBB-NEXT: andn a1, a1, a3 +; CHECK-ZBB-NEXT: and a0, a0, a3 ; CHECK-ZBB-NEXT: or a0, a0, a1 ; CHECK-ZBB-NEXT: ret %y = and i32 %y_hi, %y_low @@ -556,18 +556,18 @@ ; CHECK-I-LABEL: in_complex_y1_m1: ; CHECK-I: # %bb.0: ; CHECK-I-NEXT: and a1, a1, a2 -; CHECK-I-NEXT: xor a2, a3, a4 +; CHECK-I-NEXT: xor a3, a3, a4 ; CHECK-I-NEXT: xor a0, a0, a1 -; CHECK-I-NEXT: and a0, a2, a0 +; CHECK-I-NEXT: and a0, a3, a0 ; CHECK-I-NEXT: xor a0, a1, a0 ; CHECK-I-NEXT: ret ; ; CHECK-ZBB-LABEL: in_complex_y1_m1: ; CHECK-ZBB: # %bb.0: ; CHECK-ZBB-NEXT: and a1, a1, a2 -; CHECK-ZBB-NEXT: xor a2, a3, a4 -; CHECK-ZBB-NEXT: andn a1, a1, a2 -; CHECK-ZBB-NEXT: and a0, a0, a2 +; CHECK-ZBB-NEXT: xor a3, a3, a4 +; CHECK-ZBB-NEXT: andn a1, a1, a3 +; CHECK-ZBB-NEXT: and a0, a0, a3 ; CHECK-ZBB-NEXT: or a0, a0, a1 ; CHECK-ZBB-NEXT: ret %y = and i32 %y_hi, %y_low @@ -780,8 +780,8 @@ ; ; CHECK-ZBB-LABEL: out_constant_mone_vary_invmask: ; CHECK-ZBB: # %bb.0: -; CHECK-ZBB-NEXT: and a0, a2, a1 -; CHECK-ZBB-NEXT: orn a0, a0, a2 +; CHECK-ZBB-NEXT: and a1, a2, a1 +; CHECK-ZBB-NEXT: orn a0, a1, a2 ; CHECK-ZBB-NEXT: ret %notmask = xor i32 %mask, -1 %mx = and i32 %notmask, -1 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -333,8 +333,8 @@ ; RV32-NEXT: lb a0, 4(a0) ; RV32-NEXT: lw a1, 0(s0) ; RV32-NEXT: slli a0, a0, 10 -; RV32-NEXT: srli a2, a1, 22 -; RV32-NEXT: or s1, a2, a0 +; RV32-NEXT: srli s1, a1, 22 +; RV32-NEXT: or s1, s1, a0 ; RV32-NEXT: srli s2, a1, 11 ; RV32-NEXT: andi a0, a1, 2047 ; RV32-NEXT: li a1, 683 diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll @@ -171,14 +171,14 @@ ; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6) ; RV64IM-NEXT: li a7, 98 ; RV64IM-NEXT: mulw a3, a3, a7 -; RV64IM-NEXT: subw a3, a5, a3 -; RV64IM-NEXT: mulhu a5, a4, a6 -; RV64IM-NEXT: srli a5, a5, 7 +; RV64IM-NEXT: subw a5, a5, a3 +; RV64IM-NEXT: mulhu a3, a4, a6 +; RV64IM-NEXT: srli a3, a3, 7 ; RV64IM-NEXT: li a6, 1003 -; RV64IM-NEXT: mulw a5, a5, a6 -; RV64IM-NEXT: subw a4, a4, a5 +; RV64IM-NEXT: mulw a3, a3, a6 +; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 6(a0) -; RV64IM-NEXT: sh a3, 4(a0) +; RV64IM-NEXT: sh a5, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 0(a0) ; RV64IM-NEXT: ret @@ -350,8 +350,8 @@ ; RV64IM-NEXT: add a3, a6, a3 ; RV64IM-NEXT: srli a3, a3, 6 ; RV64IM-NEXT: mulw a3, a3, a7 -; RV64IM-NEXT: subw a3, a4, a3 -; RV64IM-NEXT: sh a3, 6(a0) +; RV64IM-NEXT: subw a4, a4, a3 +; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a5, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 0(a0) @@ -857,9 +857,9 @@ ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mulw a3, a3, a5 -; RV64IM-NEXT: subw a3, a4, a3 +; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh zero, 0(a0) -; RV64IM-NEXT: sh a3, 6(a0) +; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a1, 2(a0) ; RV64IM-NEXT: sh a2, 4(a0) ; RV64IM-NEXT: ret @@ -1082,9 +1082,9 @@ ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mul a3, a3, a5 -; RV64IM-NEXT: sub a3, a4, a3 +; RV64IM-NEXT: sub a4, a4, a3 ; RV64IM-NEXT: sd zero, 0(a0) -; RV64IM-NEXT: sd a3, 24(a0) +; RV64IM-NEXT: sd a4, 24(a0) ; RV64IM-NEXT: sd a1, 8(a0) ; RV64IM-NEXT: sd a2, 16(a0) ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -1957,10 +1957,10 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a4, a0, a2 ; RV32-NEXT: sub a5, a1, a3 -; RV32-NEXT: sub a4, a5, a4 -; RV32-NEXT: xor a4, a1, a4 -; RV32-NEXT: xor a5, a1, a3 -; RV32-NEXT: and a4, a5, a4 +; RV32-NEXT: sub a5, a5, a4 +; RV32-NEXT: xor a5, a1, a5 +; RV32-NEXT: xor a4, a1, a3 +; RV32-NEXT: and a4, a4, a5 ; RV32-NEXT: bltz a4, .LBB38_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a2 @@ -1983,10 +1983,10 @@ ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a4, a0, a2 ; RV32ZBA-NEXT: sub a5, a1, a3 -; RV32ZBA-NEXT: sub a4, a5, a4 -; RV32ZBA-NEXT: xor a4, a1, a4 -; RV32ZBA-NEXT: xor a5, a1, a3 -; RV32ZBA-NEXT: and a4, a5, a4 +; RV32ZBA-NEXT: sub a5, a5, a4 +; RV32ZBA-NEXT: xor a5, a1, a5 +; RV32ZBA-NEXT: xor a4, a1, a3 +; RV32ZBA-NEXT: and a4, a4, a5 ; RV32ZBA-NEXT: bltz a4, .LBB38_2 ; RV32ZBA-NEXT: # %bb.1: # %entry ; RV32ZBA-NEXT: mv a0, a2 @@ -2016,11 +2016,11 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sub a2, a1, a3 -; RV32-NEXT: sub a0, a2, a0 -; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: sub a2, a2, a0 +; RV32-NEXT: xor a2, a1, a2 ; RV32-NEXT: xor a1, a1, a3 -; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: slti a0, a0, 0 +; RV32-NEXT: and a1, a1, a2 +; RV32-NEXT: slti a0, a1, 0 ; RV32-NEXT: xori a0, a0, 1 ; RV32-NEXT: ret ; @@ -2037,11 +2037,11 @@ ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sub a2, a1, a3 -; RV32ZBA-NEXT: sub a0, a2, a0 -; RV32ZBA-NEXT: xor a0, a1, a0 +; RV32ZBA-NEXT: sub a2, a2, a0 +; RV32ZBA-NEXT: xor a2, a1, a2 ; RV32ZBA-NEXT: xor a1, a1, a3 -; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: slti a0, a0, 0 +; RV32ZBA-NEXT: and a1, a1, a2 +; RV32ZBA-NEXT: slti a0, a1, 0 ; RV32ZBA-NEXT: xori a0, a0, 1 ; RV32ZBA-NEXT: ret ; @@ -3186,11 +3186,11 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: sub a2, a1, a3 -; RV32-NEXT: sub a0, a2, a0 -; RV32-NEXT: xor a0, a1, a0 +; RV32-NEXT: sub a2, a2, a0 +; RV32-NEXT: xor a2, a1, a2 ; RV32-NEXT: xor a1, a1, a3 -; RV32-NEXT: and a0, a1, a0 -; RV32-NEXT: bgez a0, .LBB57_2 +; RV32-NEXT: and a1, a1, a2 +; RV32-NEXT: bgez a1, .LBB57_2 ; RV32-NEXT: # %bb.1: # %overflow ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret @@ -3215,11 +3215,11 @@ ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: sltu a0, a0, a2 ; RV32ZBA-NEXT: sub a2, a1, a3 -; RV32ZBA-NEXT: sub a0, a2, a0 -; RV32ZBA-NEXT: xor a0, a1, a0 +; RV32ZBA-NEXT: sub a2, a2, a0 +; RV32ZBA-NEXT: xor a2, a1, a2 ; RV32ZBA-NEXT: xor a1, a1, a3 -; RV32ZBA-NEXT: and a0, a1, a0 -; RV32ZBA-NEXT: bgez a0, .LBB57_2 +; RV32ZBA-NEXT: and a1, a1, a2 +; RV32ZBA-NEXT: bgez a1, .LBB57_2 ; RV32ZBA-NEXT: # %bb.1: # %overflow ; RV32ZBA-NEXT: li a0, 0 ; RV32ZBA-NEXT: ret