diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -403,6 +403,8 @@ assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); switch (Opcode) { + case TargetOpcode::G_MUL: + RTLIBCASE(MUL_I); case TargetOpcode::G_SDIV: RTLIBCASE(SDIV_I); case TargetOpcode::G_UDIV: @@ -639,6 +641,7 @@ switch (MI.getOpcode()) { default: return UnableToLegalize; + case TargetOpcode::G_MUL: case TargetOpcode::G_SDIV: case TargetOpcode::G_UDIV: case TargetOpcode::G_SREM: diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "RISCVLegalizerInfo.h" +#include "RISCVSubtarget.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" @@ -19,5 +20,48 @@ using namespace llvm; RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) { + const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); + + bool IsRV64 = ST.is64Bit(); + const LLT &XLenLLT = IsRV64 ? s64 : s32; + + using namespace TargetOpcode; + + getActionDefinitionsBuilder({G_ADD, G_SUB}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + if (ST.hasStdExtM()) + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + else + getActionDefinitionsBuilder({G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .libcallFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .clampScalar(0, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}) + .legalFor({{XLenLLT, XLenLLT}}) + .clampScalar(0, XLenLLT, XLenLLT) + .clampScalar(1, XLenLLT, XLenLLT); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({XLenLLT}) + .clampScalar(0, XLenLLT, XLenLLT); + + // G_SEXT_INREG -> G_SHL + G_ASHR + // TODO: We have better patterns for this depending on the operand. + getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + computeTables(); } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir @@ -0,0 +1,611 @@ +# RUN: llc -march=riscv32 -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV32I %s +# RUN: llc -march=riscv32 -mattr=+m -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV32IM %s + +--- | + + ; Extends only exhaustively tested for add to avoid excessive tests. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + define void @add_i32() { entry: ret void } + + define void @sub_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } + define void @and_i32() { entry: ret void } + define void @or_i32() { entry: ret void } + define void @xor_i32() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i8 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i8_signext + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32IM-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32IM-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32IM-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i8_zeroext + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32IM-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i16 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i16_signext + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32IM-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32IM-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32IM-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i16_zeroext + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32IM-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32IM-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32IM-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32IM-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32IM-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32IM-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: add_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ADD %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: sub_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SUB %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[VREG1]](s32) + ; RV32I-NEXT: $x11 = COPY [[VREG2]](s32) + ; RV32I-NEXT: PseudoCALL &__mulsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[MUL]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: mul_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[MUL]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_MUL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[VREG1]](s32) + ; RV32I-NEXT: $x11 = COPY [[VREG2]](s32) + ; RV32I-NEXT: PseudoCALL &__divsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[SDIV:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[SDIV]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: sdiv_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[SDIV]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[VREG1]](s32) + ; RV32I-NEXT: $x11 = COPY [[VREG2]](s32) + ; RV32I-NEXT: PseudoCALL &__modsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[SREM:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[SREM]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: srem_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[SREM]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[VREG1]](s32) + ; RV32I-NEXT: $x11 = COPY [[VREG2]](s32) + ; RV32I-NEXT: PseudoCALL &__udivsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[UDIV:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[UDIV]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: udiv_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[UDIV]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: $x10 = COPY [[VREG1]](s32) + ; RV32I-NEXT: $x11 = COPY [[VREG2]](s32) + ; RV32I-NEXT: PseudoCALL &__umodsi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV32I-NEXT: [[UREM:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: $x10 = COPY [[UREM]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: urem_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[UREM]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: and_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: and_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[AND]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: and_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[AND]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_AND %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: or_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: or_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[OR]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: or_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[OR]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_OR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: xor_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: xor_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[XOR]](s32) + ; RV32I-NEXT: PseudoRET + + ; RV32IM-LABEL: name: xor_i32 + ; RV32IM: bb.0.entry: + ; RV32IM-NEXT: liveins: $x10, $x11 + ; RV32IM: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32IM-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32IM-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[VREG1]], [[VREG2]] + ; RV32IM-NEXT: $x10 = COPY [[XOR]](s32) + ; RV32IM-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_XOR %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir @@ -0,0 +1,746 @@ +# RUN: llc -march=riscv64 -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV64I %s +# RUN: llc -march=riscv64 -mattr=+m -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV64IM %s + +--- | + + ; Extends only exhaustively tested for add to avoid excessive tests. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + define void @add_i32() { entry: ret void } + define void @add_i32_signext() { entry: ret void } + define void @add_i32_zeroext() { entry: ret void } + define void @add_i64() { entry: ret void } + + define void @sub_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } + define void @sdiv_i64() { entry: ret void } + define void @srem_i64() { entry: ret void } + define void @udiv_i64() { entry: ret void } + define void @urem_i64() { entry: ret void } + define void @and_i64() { entry: ret void } + define void @or_i64() { entry: ret void } + define void @xor_i64() { entry: ret void } + +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i8 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_signext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i8_signext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64IM-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_zeroext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i8_zeroext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64IM-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i16 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16_signext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i16_signext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64IM-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i16_zeroext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i16_zeroext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64IM-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i32 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32_signext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i32_signext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 32 + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64IM-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64IM-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i32_zeroext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i32_zeroext + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64IM-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64IM-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV64IM-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64IM-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64IM-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64IM-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG1]], [[VREG2]] + ; RV64I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: add_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[ADD]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ADD %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sub_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[VREG1]], [[VREG2]] + ; RV64I-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: sub_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[SUB]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SUB %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: mul_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[VREG1]](s64) + ; RV64I-NEXT: $x11 = COPY [[VREG2]](s64) + ; RV64I-NEXT: PseudoCALL &__muldi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[MUL]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: mul_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[MUL]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_MUL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: sdiv_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[VREG1]](s64) + ; RV64I-NEXT: $x11 = COPY [[VREG2]](s64) + ; RV64I-NEXT: PseudoCALL &__divdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[SDIV:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[SDIV]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: sdiv_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[SDIV]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: srem_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[VREG1]](s64) + ; RV64I-NEXT: $x11 = COPY [[VREG2]](s64) + ; RV64I-NEXT: PseudoCALL &__moddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[SREM:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[SREM]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: srem_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[SREM]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: udiv_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[VREG1]](s64) + ; RV64I-NEXT: $x11 = COPY [[VREG2]](s64) + ; RV64I-NEXT: PseudoCALL &__udivdi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[UDIV:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[UDIV]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: udiv_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[UDIV]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: urem_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: $x10 = COPY [[VREG1]](s64) + ; RV64I-NEXT: $x11 = COPY [[VREG2]](s64) + ; RV64I-NEXT: PseudoCALL &__umoddi3, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 + ; RV64I-NEXT: [[UREM:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: $x10 = COPY [[UREM]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: urem_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[UREM]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: and_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: and_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[VREG1]], [[VREG2]] + ; RV64I-NEXT: $x10 = COPY [[AND]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: and_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[AND]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_AND %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: or_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: or_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[VREG1]], [[VREG2]] + ; RV64I-NEXT: $x10 = COPY [[OR]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: or_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[OR]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_OR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: xor_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: xor_i64 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[VREG1]], [[VREG2]] + ; RV64I-NEXT: $x10 = COPY [[XOR]](s64) + ; RV64I-NEXT: PseudoRET + + ; RV64IM-LABEL: name: xor_i64 + ; RV64IM: bb.0.entry: + ; RV64IM-NEXT: liveins: $x10, $x11 + ; RV64IM: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64IM-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64IM-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[VREG1]], [[VREG2]] + ; RV64IM-NEXT: $x10 = COPY [[XOR]](s64) + ; RV64IM-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_XOR %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +