diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -403,6 +403,8 @@ assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); switch (Opcode) { + case TargetOpcode::G_MUL: + RTLIBCASE(MUL_I); case TargetOpcode::G_SDIV: RTLIBCASE(SDIV_I); case TargetOpcode::G_UDIV: @@ -639,6 +641,7 @@ switch (MI.getOpcode()) { default: return UnableToLegalize; + case TargetOpcode::G_MUL: case TargetOpcode::G_SDIV: case TargetOpcode::G_UDIV: case TargetOpcode::G_SREM: diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "RISCVLegalizerInfo.h" +#include "RISCVSubtarget.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" @@ -19,5 +20,89 @@ using namespace llvm; RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) { + const LLT s1 = LLT::scalar(1); + const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); + const LLT s128 = LLT::scalar(128); + + bool IsRV64 = ST.is64Bit(); + const LLT &XLenLLT = IsRV64 ? s64 : s32; + + using namespace TargetOpcode; + + if (IsRV64) { + // Account for availability of single word instructions on RV64. + + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .legalFor({s32, s64}) + .clampScalar(0, s64, s64); + + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({s32, s64}) + .clampScalar(0, s64, s64); + } else { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcallFor({s64, s128}) + .clampScalar(0, s64, s128); + } + } else { + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({s32}) + .libcallFor({s128}) + .clampScalar(0, s32, s32); + } else { + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcallFor({s32, s64, s128}) + .clampScalar(0, s32, s128); + } + } + + // Split operations on double XLen types. + getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE}) + .lowerFor({{XLenLLT, s1}}); + if (ST.hasStdExtM()) { + getActionDefinitionsBuilder(G_UMULO) + .lowerFor({{XLenLLT, s1}}); + + getActionDefinitionsBuilder(G_UMULH) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + } + + getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder(G_ICMP) + .legalFor({XLenLLT, XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT) + .maxScalar(1, XLenLLT); + + getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}) + .legalFor({{XLenLLT, XLenLLT}}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // G_SEXT_INREG -> G_SHL + G_ASHR + // TODO: We have better patterns for this depending on the operand. + getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + computeTables(); } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir @@ -0,0 +1,727 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv32 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32I %s +# RUN: llc -march=riscv32 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV32IM %s + +--- | + + ; Types < 32 bits exhaustively tested for add only. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + + ; Operations on XLen types. + define void @add_i32() { entry: ret void } + define void @sub_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } + define void @and_i32() { entry: ret void } + define void @or_i32() { entry: ret void } + define void @xor_i32() { entry: ret void } + + ; Operations on double XLen types which lower to split operations. + define void @add_i64() { entry: ret void } + define void @sub_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: $x10 = COPY [[COPY4]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32I: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32I: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I: $x10 = COPY [[ASHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32IM: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32IM: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM: $x10 = COPY [[ASHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i8_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: $x10 = COPY [[COPY4]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32I: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32I: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32I: $x10 = COPY [[ASHR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_signext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32IM: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) + ; RV32IM: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; RV32IM: $x10 = COPY [[ASHR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i16_zeroext + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[ADD]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: add_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[ADD]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ADD %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[SUB]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sub_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SUB]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SUB %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL &__mulsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: mul_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[MUL]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_MUL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL &__divsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: sdiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SDIV]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL &__modsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: srem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[SREM]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL &__udivsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: udiv_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[UDIV]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: PseudoCALL &__umodsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: $x10 = COPY [[COPY2]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: urem_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[UREM]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: and_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: and_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[AND]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: and_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[AND]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_AND %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: or_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: or_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[OR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: or_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[OR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_OR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: xor_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: xor_i32 + ; RV32I: liveins: $x10, $x11 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32I: $x10 = COPY [[XOR]](s32) + ; RV32I: PseudoRET implicit $x10 + ; RV32IM-LABEL: name: xor_i32 + ; RV32IM: liveins: $x10, $x11 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]] + ; RV32IM: $x10 = COPY [[XOR]](s32) + ; RV32IM: PseudoRET implicit $x10 + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_XOR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: add_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32I: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32I: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32I: $x10 = COPY [[ADD]](s32) + ; RV32I: $x11 = COPY [[ADD2]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: add_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]] + ; RV32IM: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]] + ; RV32IM: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[AND]] + ; RV32IM: $x10 = COPY [[ADD]](s32) + ; RV32IM: $x11 = COPY [[ADD2]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_ADD %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: sub_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32I: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32I: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32I: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32I: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32I: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32I: $x10 = COPY [[SUB]](s32) + ; RV32I: $x11 = COPY [[SUB2]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: sub_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY2]] + ; RV32IM: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY2]] + ; RV32IM: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[COPY3]] + ; RV32IM: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; RV32IM: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32) + ; RV32IM: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]] + ; RV32IM: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[AND]] + ; RV32IM: $x10 = COPY [[SUB]](s32) + ; RV32IM: $x11 = COPY [[SUB2]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_SUB %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV32I-LABEL: name: mul_i64 + ; RV32I: liveins: $x10, $x11, $x12, $x13 + ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32I: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32I: $x10 = COPY [[COPY]](s32) + ; RV32I: $x11 = COPY [[COPY1]](s32) + ; RV32I: $x12 = COPY [[COPY2]](s32) + ; RV32I: $x13 = COPY [[COPY3]](s32) + ; RV32I: PseudoCALL &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV32I: [[COPY4:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I: [[COPY5:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I: $x10 = COPY [[COPY4]](s32) + ; RV32I: $x11 = COPY [[COPY5]](s32) + ; RV32I: PseudoRET implicit $x10, implicit $x11 + ; RV32IM-LABEL: name: mul_i64 + ; RV32IM: liveins: $x10, $x11, $x12, $x13 + ; RV32IM: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32IM: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32IM: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY2]] + ; RV32IM: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[COPY1]], [[COPY2]] + ; RV32IM: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY3]] + ; RV32IM: [[UMULH:%[0-9]+]]:_(s32) = G_UMULH [[COPY]], [[COPY2]] + ; RV32IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] + ; RV32IM: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]] + ; RV32IM: $x10 = COPY [[MUL]](s32) + ; RV32IM: $x11 = COPY [[ADD1]](s32) + ; RV32IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s32) = COPY $x10 + %3:_(s32) = COPY $x11 + %4:_(s32) = COPY $x12 + %5:_(s32) = COPY $x13 + %1:_(s64) = G_MERGE_VALUES %4(s32), %5(s32) + %0:_(s64) = G_MERGE_VALUES %2(s32), %3(s32) + %6:_(s64) = G_MUL %0, %1 + %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(s64) + $x10 = COPY %7(s32) + $x11 = COPY %8(s32) + PseudoRET implicit $x10, implicit $x11 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir @@ -0,0 +1,1030 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=riscv64 -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64I %s +# RUN: llc -march=riscv64 -mattr=+m -run-pass=legalizer -simplify-mir -verify-machineinstrs %s -o - \ +# RUN: | FileCheck -check-prefix=RV64IM %s + +--- | + + ; Types < 32 bits exhaustively tested for add only. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + + ; Operations with single word instructions. + define void @add_i32() { entry: ret void } + define void @sub_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } + + ; Operations on XLen types. + define void @add_i64() { entry: ret void } + define void @sub_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } + define void @sdiv_i64() { entry: ret void } + define void @srem_i64() { entry: ret void } + define void @udiv_i64() { entry: ret void } + define void @urem_i64() { entry: ret void } + define void @and_i64() { entry: ret void } + define void @or_i64() { entry: ret void } + define void @xor_i64() { entry: ret void } + + ; Operations on double XLen types which lower to split operations. + define void @add_i128() { entry: ret void } + define void @sub_i128() { entry: ret void } + define void @mul_i128() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64IM: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64IM: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM: $x10 = COPY [[ASHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i8_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: $x10 = COPY [[COPY4]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16_signext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_signext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64IM: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C]](s64) + ; RV64IM: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64IM: $x10 = COPY [[ASHR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + + ; RV64I-LABEL: name: add_i16_zeroext + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i16_zeroext + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY2]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64I: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]] + ; RV64I: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32) + ; RV64I: $x10 = COPY [[ANYEXT]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64I: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]] + ; RV64I: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SUB]](s32) + ; RV64I: $x10 = COPY [[ANYEXT]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SUB]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SUB %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: $x11 = COPY [[COPY3]](s64) + ; RV64I: PseudoCALL &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_MUL %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY3]], [[C1]](s64) + ; RV64I: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C1]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: $x11 = COPY [[ASHR1]](s64) + ; RV64I: PseudoCALL &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SDIV]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]](s64) + ; RV64I: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[COPY3]], [[C1]](s64) + ; RV64I: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C1]](s64) + ; RV64I: $x10 = COPY [[ASHR]](s64) + ; RV64I: $x11 = COPY [[ASHR1]](s64) + ; RV64I: PseudoCALL &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SREM]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_SREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]] + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY3]], [[C1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: $x11 = COPY [[AND1]](s64) + ; RV64I: PseudoCALL &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UDIV]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UDIV %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i32 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]] + ; RV64I: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64) + ; RV64I: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY3]], [[C1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: $x11 = COPY [[AND1]](s64) + ; RV64I: PseudoCALL &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY [[COPY4]](s64) + ; RV64I: $x10 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i32 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; RV64IM: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[TRUNC]], [[TRUNC1]] + ; RV64IM: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UREM]](s32) + ; RV64IM: $x10 = COPY [[ANYEXT]](s64) + ; RV64IM: PseudoRET implicit $x10 + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_UREM %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[ADD]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: add_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[ADD]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ADD %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[SUB]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sub_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SUB]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SUB %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: mul_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[MUL]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_MUL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: sdiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SDIV]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: srem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[SREM]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: udiv_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[UDIV]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: PseudoCALL &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: $x10 = COPY [[COPY2]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: urem_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[UREM]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: and_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: and_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[AND]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: and_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[AND]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_AND %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: or_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: or_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[OR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: or_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[OR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_OR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: xor_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: xor_i64 + ; RV64I: liveins: $x10, $x11 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64I: $x10 = COPY [[XOR]](s64) + ; RV64I: PseudoRET implicit $x10 + ; RV64IM-LABEL: name: xor_i64 + ; RV64IM: liveins: $x10, $x11 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[COPY1]] + ; RV64IM: $x10 = COPY [[XOR]](s64) + ; RV64IM: PseudoRET implicit $x10 + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_XOR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: add_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64I: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64I: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64I: $x10 = COPY [[ADD]](s64) + ; RV64I: $x11 = COPY [[ADD2]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: add_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]] + ; RV64IM: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]] + ; RV64IM: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[AND]] + ; RV64IM: $x10 = COPY [[ADD]](s64) + ; RV64IM: $x11 = COPY [[ADD2]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_ADD %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: sub_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: sub_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64I: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64I: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64I: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64I: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64I: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64I: $x10 = COPY [[SUB]](s64) + ; RV64I: $x11 = COPY [[SUB2]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: sub_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY2]] + ; RV64IM: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY2]] + ; RV64IM: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[COPY1]], [[COPY3]] + ; RV64IM: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 + ; RV64IM: [[COPY4:%[0-9]+]]:_(s64) = COPY [[ICMP]](s64) + ; RV64IM: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]] + ; RV64IM: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[SUB1]], [[AND]] + ; RV64IM: $x10 = COPY [[SUB]](s64) + ; RV64IM: $x11 = COPY [[SUB2]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_SUB %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +... +--- +name: mul_i128 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11, $x12, $x13 + + ; RV64I-LABEL: name: mul_i128 + ; RV64I: liveins: $x10, $x11, $x12, $x13 + ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64I: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64I: $x10 = COPY [[COPY]](s64) + ; RV64I: $x11 = COPY [[COPY1]](s64) + ; RV64I: $x12 = COPY [[COPY2]](s64) + ; RV64I: $x13 = COPY [[COPY3]](s64) + ; RV64I: PseudoCALL &__multi3, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 + ; RV64I: [[COPY4:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I: [[COPY5:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I: $x10 = COPY [[COPY4]](s64) + ; RV64I: $x11 = COPY [[COPY5]](s64) + ; RV64I: PseudoRET implicit $x10, implicit $x11 + ; RV64IM-LABEL: name: mul_i128 + ; RV64IM: liveins: $x10, $x11, $x12, $x13 + ; RV64IM: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64IM: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64IM: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY2]] + ; RV64IM: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[COPY2]] + ; RV64IM: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY3]] + ; RV64IM: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY2]] + ; RV64IM: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]] + ; RV64IM: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]] + ; RV64IM: $x10 = COPY [[MUL]](s64) + ; RV64IM: $x11 = COPY [[ADD1]](s64) + ; RV64IM: PseudoRET implicit $x10, implicit $x11 + %2:_(s64) = COPY $x10 + %3:_(s64) = COPY $x11 + %4:_(s64) = COPY $x12 + %5:_(s64) = COPY $x13 + %1:_(s128) = G_MERGE_VALUES %4(s64), %5(s64) + %0:_(s128) = G_MERGE_VALUES %2(s64), %3(s64) + %6:_(s128) = G_MUL %0, %1 + %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6(s128) + $x10 = COPY %7(s64) + $x11 = COPY %8(s64) + PseudoRET implicit $x10, implicit $x11 + +...