diff --git a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVLegalizerInfo.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "RISCVLegalizerInfo.h" +#include "RISCVSubtarget.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" @@ -19,5 +20,65 @@ using namespace llvm; RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST) { + const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); + + using namespace TargetOpcode; + + bool IsRV64 = ST.is64Bit(); + if (IsRV64) { + getActionDefinitionsBuilder( + {G_ADD, G_SUB, G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({s64}) + .clampScalar(0, s64, s64); + + getActionDefinitionsBuilder(G_AND) + .legalFor({s64}) + .clampScalar(0, s64, s64); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .legalIf([](const LegalityQuery &Query) { return false; }) + .clampScalar(0, s64, s64); + + getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}) + .legalFor({{s64, s64}}) + .clampScalar(0, s64, s64) + .clampScalar(1, s64, s64); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({s64}) + .clampScalar(0, s64, s64); + } else { + getActionDefinitionsBuilder( + {G_ADD, G_SUB, G_MUL, G_SDIV, G_SREM, G_UDIV, G_UREM}) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + getActionDefinitionsBuilder(G_AND) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + // G_ZEXT -> G_AND + // G_SEXT -> G_SEXT_INREG + getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}) + .legalIf([](const LegalityQuery &Query) { return false; }) + .clampScalar(0, s32, s32); + + getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}) + .legalFor({{s32, s32}}) + .clampScalar(0, s32, s32) + .clampScalar(1, s32, s32); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({s32}) + .clampScalar(0, s32, s32); + } + + // G_SEXT_INREG -> G_SHL + G_ASHR + // TODO: We have better patterns for this depending on the operand. + getActionDefinitionsBuilder(G_SEXT_INREG).lower(); + computeTables(); } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu32.mir @@ -0,0 +1,353 @@ +# RUN: llc -march=riscv32 -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV32I %s + +--- | + + ; Extends only exhaustively tested for add to avoid excessive tests. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + define void @add_i32() { entry: ret void } + + define void @sub_i32() { entry: ret void } + define void @mul_i32() { entry: ret void } + define void @sdiv_i32() { entry: ret void } + define void @srem_i32() { entry: ret void } + define void @udiv_i32() { entry: ret void } + define void @urem_i32() { entry: ret void } +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i8_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s32) + %4:_(s8) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s8) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ANYEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_SHL [[VREG5]], [[SHAMT]](s32) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s32) = G_ASHR [[VREG6]], [[SHAMT]](s32) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_SEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s32) = COPY [[VREG1]](s32) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s32) = COPY [[VREG2]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s32) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s32) + ; RV32I-NEXT: PseudoRET + %2:_(s32) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s32) + %3:_(s32) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s32) + %4:_(s16) = G_ADD %0, %1 + %5:_(s32) = G_ZEXT %4(s16) + $x10 = COPY %5(s32) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_ADD %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sub_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SUB]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SUB %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: mul_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[MUL]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_MUL %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SDIV]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: srem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SREM]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_SREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: udiv_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[UDIV]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UDIV %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... +--- +name: urem_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s32) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32I-NEXT: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[UREM]](s32) + ; RV32I-NEXT: PseudoRET + %0:_(s32) = COPY $x10 + %1:_(s32) = COPY $x11 + %2:_(s32) = G_UREM %0, %1 + $x10 = COPY %2(s32) + PseudoRET implicit $x10 + +... diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/alu64.mir @@ -0,0 +1,446 @@ +# RUN: llc -march=riscv64 -x mir -run-pass=legalizer -simplify-mir -verify-machineinstrs < %s \ +# RUN: | FileCheck -check-prefix=RV64I %s + +--- | + + ; Extends only exhaustively tested for add to avoid excessive tests. + define void @add_i8() { entry: ret void } + define void @add_i8_signext() { entry: ret void } + define void @add_i8_zeroext() { entry: ret void } + define void @add_i16() { entry: ret void } + define void @add_i16_signext() { entry: ret void } + define void @add_i16_zeroext() { entry: ret void } + define void @add_i32() { entry: ret void } + define void @add_i32_signext() { entry: ret void } + define void @add_i32_zeroext() { entry: ret void } + define void @add_i64() { entry: ret void } + + define void @sub_i64() { entry: ret void } + define void @mul_i64() { entry: ret void } + define void @sdiv_i64() { entry: ret void } + define void @srem_i64() { entry: ret void } + define void @udiv_i64() { entry: ret void } + define void @urem_i64() { entry: ret void } + +... +--- +name: add_i8 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8 + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV64I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_signext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 56 + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV64I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV64I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV64I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i8_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV64I-LABEL: name: add_i8_zeroext + ; RV64I: bb.0.entry: + ; RV64I-NEXT: liveins: $x10, $x11 + ; RV64I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV64I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV64I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV64I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 255 + ; RV64I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV64I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV64I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV64I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s8) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s8) = G_TRUNC %3(s64) + %4:_(s8) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s8) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i16_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i16_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s16) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s16) = G_TRUNC %3(s64) + %4:_(s16) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s16) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: $x10 = COPY [[VREG5]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ANYEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32_signext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32_signext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: [[SHAMT:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_SHL [[VREG5]], [[SHAMT]](s64) + ; RV32I-NEXT: [[VREG7:%[0-9]+]]:_(s64) = G_ASHR [[VREG6]], [[SHAMT]](s64) + ; RV32I-NEXT: $x10 = COPY [[VREG7]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_SEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i32_zeroext +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i32_zeroext + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[VREG3:%[0-9]+]]:_(s64) = COPY [[VREG1]](s64) + ; RV32I-NEXT: [[VREG4:%[0-9]+]]:_(s64) = COPY [[VREG2]](s64) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG3]], [[VREG4]] + ; RV32I-NEXT: [[MASK:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295 + ; RV32I-NEXT: [[VREG5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64) + ; RV32I-NEXT: [[VREG6:%[0-9]+]]:_(s64) = G_AND [[VREG5]], [[MASK]] + ; RV32I-NEXT: $x10 = COPY [[VREG6]](s64) + ; RV32I-NEXT: PseudoRET + %2:_(s64) = COPY $x10 + %0:_(s32) = G_TRUNC %2(s64) + %3:_(s64) = COPY $x11 + %1:_(s32) = G_TRUNC %3(s64) + %4:_(s32) = G_ADD %0, %1 + %5:_(s64) = G_ZEXT %4(s32) + $x10 = COPY %5(s64) + PseudoRET implicit $x10 + +... +--- +name: add_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: add_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[ADD]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_ADD %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sub_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sub_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SUB]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SUB %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: mul_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: mul_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[MUL]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_MUL %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: sdiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: sdiv_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[SDIV:%[0-9]+]]:_(s64) = G_SDIV [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SDIV]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: srem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: srem_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[SREM:%[0-9]+]]:_(s64) = G_SREM [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[SREM]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_SREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: udiv_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: udiv_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[UDIV]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UDIV %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +... +--- +name: urem_i64 +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x10, $x11 + + ; RV32I-LABEL: name: urem_i64 + ; RV32I: bb.0.entry: + ; RV32I-NEXT: liveins: $x10, $x11 + ; RV32I: [[VREG1:%[0-9]+]]:_(s64) = COPY $x10 + ; RV32I-NEXT: [[VREG2:%[0-9]+]]:_(s64) = COPY $x11 + ; RV32I-NEXT: [[UREM:%[0-9]+]]:_(s64) = G_UREM [[VREG1]], [[VREG2]] + ; RV32I-NEXT: $x10 = COPY [[UREM]](s64) + ; RV32I-NEXT: PseudoRET + %0:_(s64) = COPY $x10 + %1:_(s64) = COPY $x11 + %2:_(s64) = G_UREM %0, %1 + $x10 = COPY %2(s64) + PseudoRET implicit $x10 + +...