diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h @@ -18,11 +18,20 @@ namespace llvm { class RISCVSubtarget; +class MachineIRBuilder; /// This class provides the information for the target register banks. class RISCVLegalizerInfo : public LegalizerInfo { public: RISCVLegalizerInfo(const RISCVSubtarget &ST); + + bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI) const override; + +private: + bool legalizeWOp(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const; + bool legalizeWOpWithSExt(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const; }; } // end namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp @@ -11,6 +11,8 @@ //===----------------------------------------------------------------------===// #include "RISCVLegalizerInfo.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" @@ -19,5 +21,140 @@ using namespace llvm; RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) { + const LLT s1 = LLT::scalar(1); + const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); + const LLT s128 = LLT::scalar(128); + + bool IsRV64 = ST.is64Bit(); + const LLT &XLenLLT = IsRV64 ? s64 : s32; + + using namespace TargetOpcode; + + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .legalFor({XLenLLT}) + .customFor({s32}) + .clampScalar(0, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) + .legalFor({{XLenLLT, XLenLLT}}) + .customFor({s32}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + if (ST.hasStdExtM()) { + // Double XLen types can be handled through type legalization but for s128 + // on RV32 we use a libcall. + if (IsRV64) + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({XLenLLT}) + .customFor({s32}) + .clampScalar(0, XLenLLT, XLenLLT); + else + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({XLenLLT}) + .libcallFor({s128}) + .clampScalar(0, XLenLLT, XLenLLT); + } else + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcall() + .clampScalar(0, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // Split operations on double XLen types. + getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE}) + .lowerFor({{XLenLLT, s1}}); + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder(G_UMULO) + .lowerFor({{XLenLLT, s1}}); + + getActionDefinitionsBuilder(G_UMULH) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + } + + getActionDefinitionsBuilder(G_ICMP) + .legalFor({XLenLLT, XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .maxScalar(1, XLenLLT); + + // G_SEXT_INREG -> G_SHL + G_ASHR + getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + getLegacyLegalizerInfo().computeTables(); } + +bool RISCVLegalizerInfo::legalizeWOpWithSExt( + MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + const LLT s64 = LLT::scalar(64); + + MIRBuilder.setInstr(MI); + + auto NewOp0 = MIRBuilder.buildAnyExt(s64, MI.getOperand(1).getReg()); + auto NewOp1 = MIRBuilder.buildAnyExt(s64, MI.getOperand(2).getReg()); + auto NewDst = MIRBuilder.buildInstr(MI.getOpcode(), {s64}, {NewOp0, NewOp1}); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + auto SExtDst = MIRBuilder.buildSExtInReg(s64, NewDst, 32); + MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), SExtDst); + + MI.eraseFromParent(); + return true; +} + +bool RISCVLegalizerInfo::legalizeWOp(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + const LLT s64 = LLT::scalar(64); + + MIRBuilder.setInstr(MI); + + auto NewOp0 = MIRBuilder.buildAnyExt(s64, MI.getOperand(1).getReg()); + auto NewOp1 = MIRBuilder.buildAnyExt(s64, MI.getOperand(2).getReg()); + auto NewDst = MIRBuilder.buildInstr(MI.getOpcode(), {s64}, {NewOp0, NewOp1}); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + auto ZExtDst = MIRBuilder.buildZExtInReg(s64, NewDst, 32); + MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), ZExtDst); + + MI.eraseFromParent(); + return true; +} + +bool RISCVLegalizerInfo::legalizeCustom(LegalizerHelper &Helper, + MachineInstr &MI) const { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); + + switch (MI.getOpcode()) { + case TargetOpcode::G_ADD: + case TargetOpcode::G_SUB: + case TargetOpcode::G_MUL: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_SREM: + return legalizeWOpWithSExt(MI, MRI, MIRBuilder); + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_UREM: + return legalizeWOp(MI, MRI, MIRBuilder); + default: + return false; + } + return true; +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir @@ -0,0 +1,787 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv32 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32I %s +# RUN: llc -march=riscv32 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32IM %s + +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ADD]], [[C]](s32) + ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32IM-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ADD]], [[C]](s32) + ; RV32IM-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]] + ; RV32I-NEXT: $x10 = COPY [[AND]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]] + ; RV32IM-NEXT: $x10 = COPY [[AND]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ADD]], [[C]](s32) + ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32IM-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ADD]], [[C]](s32) + ; RV32IM-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]] + ; RV32I-NEXT: $x10 = COPY [[AND]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]] + ; RV32IM-NEXT: $x10 = COPY [[AND]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ADD %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sub_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SUB %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: shl_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: shl_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) + ; RV32I-NEXT: $x10 = COPY [[SHL]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: shl_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) + ; RV32IM-NEXT: $x10 = COPY [[SHL]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SHL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: ashr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: ashr_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32) + ; RV32I-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: ashr_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32) + ; RV32IM-NEXT: $x10 = COPY [[ASHR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ASHR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: lshr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: lshr_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY1]](s32) + ; RV32I-NEXT: $x10 = COPY [[LSHR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: lshr_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY1]](s32) + ; RV32IM-NEXT: $x10 = COPY [[LSHR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_LSHR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: and_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: and_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[AND]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: and_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[AND]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_AND %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: or_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: or_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[OR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: or_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[OR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_OR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: xor_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: xor_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32I-NEXT: $x10 = COPY [[XOR]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: xor_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[XOR]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_XOR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[COPY2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: mul_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[MUL]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_MUL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[COPY2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sdiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[SDIV]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[COPY2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: srem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[SREM]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[COPY2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: udiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[UDIV]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[COPY2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: urem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY]], [[COPY1]] + ; RV32IM-NEXT: $x10 = COPY [[UREM]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: add_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32I-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]] + ; RV32I-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: $x11 = COPY [[ADD2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: add_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32IM-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32IM-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]] + ; RV32IM-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32IM-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32IM-NEXT: $x11 = COPY [[ADD2]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_ADD %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: sub_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32I-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32I-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]] + ; RV32I-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32I-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32I-NEXT: $x11 = COPY [[SUB2]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: sub_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32IM-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32IM-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32IM-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]] + ; RV32IM-NEXT: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32IM-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32IM-NEXT: $x11 = COPY [[SUB2]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_SUB %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: mul_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I-NEXT: {{ $}} + ; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I-NEXT: $x10 = COPY [[COPY]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY1]](s32) + ; RV32I-NEXT: $x12 = COPY [[COPY2]](s32) + ; RV32I-NEXT: $x13 = COPY [[COPY3]](s32) + ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV32I-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[COPY4]](s32) + ; RV32I-NEXT: $x11 = COPY [[COPY5]](s32) + ; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: mul_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM-NEXT: {{ $}} + ; RV32IM-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY2]] + ; RV32IM-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[COPY2]] + ; RV32IM-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY3]] + ; RV32IM-NEXT: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY2]] + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] + ; RV32IM-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]] + ; RV32IM-NEXT: $x10 = COPY [[MUL]](s32) + ; RV32IM-NEXT: $x11 = COPY [[ADD1]](s32) + ; RV32IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_MUL %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir @@ -0,0 +1,1169 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv64 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64I %s +# RUN: llc -march=riscv64 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64IM %s + +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV64I-LABEL: name: add_i16_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ADD]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SUB]], [[C]](s64) + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SUB]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SUB %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: shl_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: shl_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: shl_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SHL]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SHL %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: ashr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: ashr_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ASHR]], [[C]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: ashr_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ASHR]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ASHR %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: lshr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: lshr_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: lshr_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_LSHR %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MUL]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_MUL %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SDIV]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SREM]], [[C]](s64) + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[UDIV]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[COPY]], [[COPY1]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[UREM]], [[C]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ADD %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SUB %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: shl_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: shl_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: $x10 = COPY [[SHL]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: shl_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: $x10 = COPY [[SHL]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SHL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: ashr_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: ashr_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: ashr_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: $x10 = COPY [[ASHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ASHR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: lshr_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: lshr_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64I-NEXT: $x10 = COPY [[LSHR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: lshr_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64IM-NEXT: $x10 = COPY [[LSHR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_LSHR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: and_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: and_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: and_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_AND %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: or_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: or_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[OR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: or_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[OR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_OR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: xor_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: xor_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64I-NEXT: $x10 = COPY [[XOR]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: xor_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[XOR]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_XOR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[MUL]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_MUL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[SDIV]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[SREM]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[UDIV]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[COPY2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[COPY]], [[COPY1]] + ; RV64IM-NEXT: $x10 = COPY [[UREM]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: add_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ICMP]], [[C]] + ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64I-NEXT: $x11 = COPY [[ADD2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: add_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64IM-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64IM-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ICMP]], [[C]] + ; RV64IM-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64IM-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64IM-NEXT: $x11 = COPY [[ADD2]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_ADD %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: sub_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64I-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ICMP]], [[C]] + ; RV64I-NEXT: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64I-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64I-NEXT: $x11 = COPY [[SUB2]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: sub_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64IM-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64IM-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64IM-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ICMP]], [[C]] + ; RV64IM-NEXT: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64IM-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64IM-NEXT: $x11 = COPY [[SUB2]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_SUB %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: mul_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I-NEXT: {{ $}} + ; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I-NEXT: $x10 = COPY [[COPY]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY1]](s64) + ; RV64I-NEXT: $x12 = COPY [[COPY2]](s64) + ; RV64I-NEXT: $x13 = COPY [[COPY3]](s64) + ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) &__multi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV64I-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[COPY4]](s64) + ; RV64I-NEXT: $x11 = COPY [[COPY5]](s64) + ; RV64I-NEXT: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: mul_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM-NEXT: {{ $}} + ; RV64IM-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY2]] + ; RV64IM-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[COPY2]] + ; RV64IM-NEXT: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY3]] + ; RV64IM-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY2]] + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]] + ; RV64IM-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]] + ; RV64IM-NEXT: $x10 = COPY [[MUL]](s64) + ; RV64IM-NEXT: $x11 = COPY [[ADD1]](s64) + ; RV64IM-NEXT: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_MUL %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +...