diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -403,6 +403,8 @@ assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); switch (Opcode) { + case TargetOpcode::G_MUL: + RTLIBCASE(MUL_I); case TargetOpcode::G_SDIV: RTLIBCASE(SDIV_I); case TargetOpcode::G_UDIV: @@ -639,6 +641,7 @@ switch (MI.getOpcode()) { default: return UnableToLegalize; + case TargetOpcode::G_MUL: case TargetOpcode::G_SDIV: case TargetOpcode::G_UDIV: case TargetOpcode::G_SREM: diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.h @@ -23,6 +23,16 @@ class RISCVLegalizerInfo : public LegalizerInfo { public: RISCVLegalizerInfo(const RISCVSubtarget &ST); + + bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const override; + +private: + bool legalizeWOp(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const; + bool legalizeWOpWithSExt(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const; }; } // end namespace llvm #endif diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp @@ -11,6 +11,8 @@ //===----------------------------------------------------------------------===// #include "RISCVLegalizerInfo.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" @@ -19,5 +21,210 @@ using namespace llvm; RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) { + const LLT s1 = LLT::scalar(1); + const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); + const LLT s128 = LLT::scalar(128); + + bool IsRV64 = ST.is64Bit(); + const LLT &XLenLLT = IsRV64 ? s64 : s32; + + using namespace TargetOpcode; + + if (IsRV64) { + // Account for availability of single word instructions on RV64. + + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .customFor({s32}) + .legalFor({s64}) + .clampScalar(0, s64, s64); + + getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) + .customFor({{s32, s32}}) + .legalFor({{s64, s64}}) + .clampScalar(0, s64, s64) + .clampScalar(1, s64, s64); + + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .customFor({s32}) + .legalFor({s64}) + .clampScalar(0, s64, s64); + } else { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcallFor({s64, s128}) + .clampScalar(0, s64, s128); + } + } else { + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) + .legalFor({{s32, s32}}) + .clampScalar(0, s32, s32) + .clampScalar(1, s32, s32); + + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({s32}) + .libcallFor({s128}) + .clampScalar(0, s32, s32); + } else { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcallFor({s32, s64, s128}) + .clampScalar(0, s32, s128); + } + } + + getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // Split operations on double XLen types. + getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE}) + .lowerFor({{XLenLLT, s1}}); + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder(G_UMULO) + .lowerFor({{XLenLLT, s1}}); + + getActionDefinitionsBuilder(G_UMULH) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + } + + getActionDefinitionsBuilder(G_ICMP) + .legalFor({XLenLLT, XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .maxScalar(1, XLenLLT); + + // G_SEXT_INREG -> G_SHL + G_ASHR + // TODO: Find a way to selectively avoid lowering extends from 32 bits. + getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + computeTables(); } + +static unsigned getRISCVWOpcodeWithSExt(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unexpected opcode"); + case TargetOpcode::G_ADD: + return RISCV::ADDW; + case TargetOpcode::G_SUB: + return RISCV::SUBW; + case TargetOpcode::G_MUL: + return RISCV::MULW; + case TargetOpcode::G_SDIV: + return RISCV::DIVW; + case TargetOpcode::G_SREM: + return RISCV::REMW; + } +} + +// TODO: For now this function is identical to legalizeWOp. When a way to +// preserve G_SEXT_INREG for this pattern only is found, this should be updated. +bool RISCVLegalizerInfo::legalizeWOpWithSExt( + MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + const RISCVSubtarget &ST = + static_cast(MI.getMF()->getSubtarget()); + const LLT s64 = LLT::scalar(64); + + MIRBuilder.setInstr(MI); + + Register NewOp0 = MRI.createGenericVirtualRegister(s64); + MIRBuilder.buildAnyExt({NewOp0}, {MI.getOperand(1).getReg()}); + Register NewOp1 = MRI.createGenericVirtualRegister(s64); + MIRBuilder.buildAnyExt({NewOp1}, {MI.getOperand(2).getReg()}); + + Register NewDst = MRI.createGenericVirtualRegister(s64); + MIRBuilder + .buildInstr(getRISCVWOpcodeWithSExt(MI.getOpcode()), {NewDst}, + {NewOp0, NewOp1}) + .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(), + *ST.getRegBankInfo()); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildTrunc({MI.getOperand(0).getReg()}, {NewDst}); + + MI.eraseFromParent(); + return true; +} + +static unsigned getRISCVWOpcode(unsigned Opcode) { + switch (Opcode) { + default: + llvm_unreachable("Unexpected opcode"); + case TargetOpcode::G_SHL: + return RISCV::SLLW; + case TargetOpcode::G_ASHR: + return RISCV::SRAW; + case TargetOpcode::G_LSHR: + return RISCV::SRLW; + case TargetOpcode::G_UDIV: + return RISCV::DIVUW; + case TargetOpcode::G_UREM: + return RISCV::REMUW; + } +} + +bool RISCVLegalizerInfo::legalizeWOp(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder) const { + const RISCVSubtarget &ST = + static_cast(MI.getMF()->getSubtarget()); + const LLT s64 = LLT::scalar(64); + + MIRBuilder.setInstr(MI); + + Register NewOp0 = MRI.createGenericVirtualRegister(s64); + MIRBuilder.buildAnyExt({NewOp0}, {MI.getOperand(1).getReg()}); + Register NewOp1 = MRI.createGenericVirtualRegister(s64); + MIRBuilder.buildAnyExt({NewOp1}, {MI.getOperand(2).getReg()}); + + Register NewDst = MRI.createGenericVirtualRegister(s64); + MIRBuilder + .buildInstr(getRISCVWOpcode(MI.getOpcode()), {NewDst}, {NewOp0, NewOp1}) + .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(), + *ST.getRegBankInfo()); + + MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); + MIRBuilder.buildTrunc({MI.getOperand(0).getReg()}, {NewDst}); + + MI.eraseFromParent(); + return true; +} + +bool RISCVLegalizerInfo::legalizeCustom(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const { + switch (MI.getOpcode()) { + case TargetOpcode::G_ADD: + case TargetOpcode::G_SUB: + case TargetOpcode::G_MUL: + case TargetOpcode::G_SDIV: + case TargetOpcode::G_SREM: + return legalizeWOpWithSExt(MI, MRI, MIRBuilder); + case TargetOpcode::G_SHL: + case TargetOpcode::G_ASHR: + case TargetOpcode::G_LSHR: + case TargetOpcode::G_UDIV: + case TargetOpcode::G_UREM: + return legalizeWOp(MI, MRI, MIRBuilder); + default: + return false; + } + return true; +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir @@ -0,0 +1,814 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv32 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32I %s +# RUN: llc -march=riscv32 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32IM %s + +--- | + + ; Types < 32 bits exhaustively tested for add only. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + + ; Operations on XLen types. + define void @add_i32() { entry: ret void } + define void @sub_i32() { entry: ret void } + define void @shl_i32() { entry: ret void } + define void @ashr_i32() { entry: ret void } + define void @lshr_i32() { entry: ret void } + define void @and_i32() { entry: ret void } + define void @or_i32() { entry: ret void } + define void @xor_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } + + ; Operations on double XLen types which lower to split operations. + define void @add_i64() { entry: ret void } + define void @sub_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: $x10 = COPY [[COPY4]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32I: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32I: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I: $x10 = COPY [[ASHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32IM: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32IM: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM: $x10 = COPY [[ASHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: $x10 = COPY [[COPY4]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32I: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32I: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I: $x10 = COPY [[ASHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32IM: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32IM: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM: $x10 = COPY [[ASHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[ADD]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[ADD]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ADD %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[SUB]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sub_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SUB]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SUB %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: shl_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: shl_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) + ; RV32I: $x10 = COPY [[SHL]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: shl_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) + ; RV32IM: $x10 = COPY [[SHL]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SHL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: ashr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: ashr_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32) + ; RV32I: $x10 = COPY [[ASHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: ashr_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32) + ; RV32IM: $x10 = COPY [[ASHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ASHR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: lshr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: lshr_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY1]](s32) + ; RV32I: $x10 = COPY [[LSHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: lshr_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY1]](s32) + ; RV32IM: $x10 = COPY [[LSHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_LSHR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: and_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: and_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: and_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_AND %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: or_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: or_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[OR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: or_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[OR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_OR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: xor_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: xor_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[XOR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: xor_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[XOR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_XOR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__mulsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: mul_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[MUL]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_MUL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__divsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sdiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SDIV]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__modsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: srem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SREM]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__udivsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: udiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[UDIV]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__umodsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: urem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[UREM]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: add_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32I: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32I: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32I: $x10 = COPY [[ADD]](s32) + ; RV32I: $x11 = COPY [[ADD2]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: add_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32IM: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32IM: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32IM: $x10 = COPY [[ADD]](s32) + ; RV32IM: $x11 = COPY [[ADD2]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_ADD %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: sub_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32I: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32I: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32I: $x10 = COPY [[SUB]](s32) + ; RV32I: $x11 = COPY [[SUB2]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: sub_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32IM: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32IM: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32IM: $x10 = COPY [[SUB]](s32) + ; RV32IM: $x11 = COPY [[SUB2]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_SUB %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: mul_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: $x12 = COPY [[COPY2]](s32) + ; RV32I: $x13 = COPY [[COPY3]](s32) + ; RV32I: PseudoCALL target-flags(riscv-call) &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY5:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: $x11 = COPY [[COPY5]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: mul_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY2]] + ; RV32IM: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[COPY2]] + ; RV32IM: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY3]] + ; RV32IM: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY2]] + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] + ; RV32IM: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]] + ; RV32IM: $x10 = COPY [[MUL]](s32) + ; RV32IM: $x11 = COPY [[ADD1]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_MUL %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir @@ -0,0 +1,1231 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv64 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64I %s +# RUN: llc -march=riscv64 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64IM %s + +--- | + + ; Types < 32 bits exhaustively tested for add only. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + + ; Operations with single word instructions. + define void @add_i32() { entry: ret void } + define void @sub_i32() { entry: ret void } + define void @shl_i32() { entry: ret void } + define void @ashr_i32() { entry: ret void } + define void @lshr_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } + + ; Operations on XLen types. + define void @add_i64() { entry: ret void } + define void @sub_i64() { entry: ret void } + define void @shl_i64() { entry: ret void } + define void @ashr_i64() { entry: ret void } + define void @lshr_i64() { entry: ret void } + define void @and_i64() { entry: ret void } + define void @or_i64() { entry: ret void } + define void @xor_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } + define void @sdiv_i64() { entry: ret void } + define void @srem_i64() { entry: ret void } + define void @udiv_i64() { entry: ret void } + define void @urem_i64() { entry: ret void } + + ; Operations on double XLen types which lower to split operations. + define void @add_i128() { entry: ret void } + define void @sub_i128() { entry: ret void } + define void @mul_i128() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64IM: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64IM: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM: $x10 = COPY [[ASHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64IM: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64IM: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM: $x10 = COPY [[ASHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV64I-LABEL: name: add_i16_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADDW:%[0-9]+]]:gpr(s64) = ADDW [[COPY2]](s64), [[COPY3]](s64) + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADDW]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADDW:%[0-9]+]]:gpr(s64) = ADDW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADDW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64I: [[SUBW:%[0-9]+]]:gpr(s64) = SUBW [[COPY2]](s64), [[COPY3]](s64) + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SUBW]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[SUBW:%[0-9]+]]:gpr(s64) = SUBW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SUBW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SUB %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: shl_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: shl_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64I: [[SLLW:%[0-9]+]]:gpr(s64) = SLLW [[COPY2]](s64), [[COPY3]](s64) + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SLLW]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: shl_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[SLLW:%[0-9]+]]:gpr(s64) = SLLW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SLLW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SHL %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: ashr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: ashr_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64I: [[SRAW:%[0-9]+]]:gpr(s64) = SRAW [[COPY2]](s64), [[COPY3]](s64) + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SRAW]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: ashr_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[SRAW:%[0-9]+]]:gpr(s64) = SRAW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SRAW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ASHR %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: lshr_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: lshr_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64I: [[SRLW:%[0-9]+]]:gpr(s64) = SRLW [[COPY2]](s64), [[COPY3]](s64) + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SRLW]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: lshr_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[SRLW:%[0-9]+]]:gpr(s64) = SRLW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[SRLW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_LSHR %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: $x11 = COPY [[COPY3]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[MULW:%[0-9]+]]:gpr(s64) = MULW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[MULW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_MUL %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY3]], [[C1]](s64) + ; RV64I: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C1]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: $x11 = COPY [[ASHR1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[DIVW:%[0-9]+]]:gpr(s64) = DIVW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[DIVW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY3]], [[C1]](s64) + ; RV64I: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C1]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: $x11 = COPY [[ASHR1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[REMW:%[0-9]+]]:gpr(s64) = REMW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[REMW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]] + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY3]], [[C1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: $x11 = COPY [[AND1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[DIVUW:%[0-9]+]]:gpr(s64) = DIVUW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[DIVUW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]] + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY3]], [[C1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: $x11 = COPY [[AND1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[REMUW:%[0-9]+]]:gpr(s64) = REMUW [[COPY2]](s64), [[COPY3]](s64) + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[REMUW]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[ADD]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[ADD]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ADD %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[SUB]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SUB]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SUB %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: shl_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: shl_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64I: $x10 = COPY [[SHL]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: shl_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[COPY1]](s64) + ; RV64IM: $x10 = COPY [[SHL]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SHL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: ashr_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: ashr_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: ashr_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[COPY1]](s64) + ; RV64IM: $x10 = COPY [[ASHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ASHR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: lshr_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: lshr_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64I: $x10 = COPY [[LSHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: lshr_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[COPY1]](s64) + ; RV64IM: $x10 = COPY [[LSHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_LSHR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: and_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: and_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: and_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_AND %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: or_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: or_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[OR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: or_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[OR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_OR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: xor_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: xor_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[XOR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: xor_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[XOR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_XOR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[MUL]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_MUL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SDIV]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SREM]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[UDIV]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[UREM]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: add_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64I: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64I: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64I: $x10 = COPY [[ADD]](s64) + ; RV64I: $x11 = COPY [[ADD2]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: add_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64IM: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64IM: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64IM: $x10 = COPY [[ADD]](s64) + ; RV64IM: $x11 = COPY [[ADD2]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_ADD %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: sub_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64I: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64I: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64I: $x10 = COPY [[SUB]](s64) + ; RV64I: $x11 = COPY [[SUB2]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: sub_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64IM: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64IM: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64IM: $x10 = COPY [[SUB]](s64) + ; RV64IM: $x11 = COPY [[SUB2]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_SUB %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: mul_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: $x12 = COPY [[COPY2]](s64) + ; RV64I: $x13 = COPY [[COPY3]](s64) + ; RV64I: PseudoCALL target-flags(riscv-call) &__multi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: $x11 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: mul_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY2]] + ; RV64IM: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[COPY2]] + ; RV64IM: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY3]] + ; RV64IM: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY2]] + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]] + ; RV64IM: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]] + ; RV64IM: $x10 = COPY [[MUL]](s64) + ; RV64IM: $x11 = COPY [[ADD1]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_MUL %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +...