diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1340,7 +1340,7 @@ MVT XLenVT = Subtarget.getXLenVT(); SDValue VL = VecVT.isFixedLengthVector() ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) - : DAG.getRegister(RISCV::X0, XLenVT); + : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); return {Mask, VL}; @@ -3292,7 +3292,7 @@ // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi, - DAG.getRegister(RISCV::X0, MVT::i64)); + DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64)); } // Custom-lower extensions from mask vectors by using a vselect either with 1 @@ -4450,7 +4450,7 @@ PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); @@ -4486,7 +4486,7 @@ Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); return DAG.getMemIntrinsicNode( @@ -4743,7 +4743,7 @@ VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); unsigned IntID = IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; @@ -4824,7 +4824,7 @@ VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); } else - VL = DAG.getRegister(RISCV::X0, XLenVT); + VL = DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); unsigned IntID = IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -430,10 +430,16 @@ if (RISCVII::hasVLOp(TSFlags)) { const MachineOperand &VLOp = MI.getOperand(NumOperands - 2); - if (VLOp.isImm()) - InstrInfo.setAVLImm(VLOp.getImm()); - else + if (VLOp.isImm()) { + int64_t Imm = VLOp.getImm(); + // Conver the VLMax sentintel to X0 register. + if (Imm == RISCV::VLMaxSentinel) + InstrInfo.setAVLReg(RISCV::X0); + else + InstrInfo.setAVLImm(Imm); + } else { InstrInfo.setAVLReg(VLOp.getReg()); + } } else InstrInfo.setAVLReg(RISCV::NoRegister); InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -181,6 +181,11 @@ const RISCVSubtarget &STI; }; +namespace RISCV { +// Special immediate for AVL operand of V pseudo instructions to indicate VLMax. +static constexpr int64_t VLMaxSentinel = -1LL; +} + namespace RISCVVPseudosTable { struct PseudoInfo { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -23,7 +23,7 @@ // Operand that is allowed to be a register or a 5 bit immediate. // This allows us to pick between VSETIVLI and VSETVLI opcodes using the same // pseudo instructions. -def AVL : RegisterOperand { +def AVL : RegisterOperand { let OperandNamespace = "RISCVOp"; let OperandType = "OPERAND_AVL"; } @@ -140,7 +140,9 @@ def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>; // Output pattern for X0 used to represent VLMAX in the pseudo instructions. -def VLMax : OutPatFrag<(ops), (XLenVT X0)>; +// We can't use X0 register becuase the AVL operands use GPRNoX0. +// This must be kept in sync with RISCV::VLMaxSentinel. +def VLMax : OutPatFrag<(ops), (XLenVT -1)>; // List of EEW. defvar EEWList = [8, 16, 32, 64]; diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -53,7 +53,7 @@ ; CHECK: $x1 = LD $x2, 2024 :: (load (s64) from %stack.3) ; CHECK: $x2 = frame-destroy ADDI $x2, 2032 ; CHECK: PseudoRET - %1:gpr = COPY $x11 + %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 %2:vr = PseudoVLE64_V_M1 %0, %1, 6 :: (load unknown-size from %ir.pa, align 8) %3:gpr = ADDI %stack.2, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir --- a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir +++ b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir @@ -29,7 +29,7 @@ ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v0 ; CHECK: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1 ; CHECK: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2 - ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], $x0, 6, 1, implicit $vl, implicit $vtype + ; CHECK: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]] ; CHECK: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v0 = COPY [[PseudoVNMSUB_VV_M1_]] @@ -37,7 +37,7 @@ %0:vr = COPY $v0 %1:vrnov0 = COPY $v1 %2:vrnov0 = COPY $v2 - %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, $x0, 6, 1, implicit $vl, implicit $vtype + %0:vr = PseudoVNMSUB_VV_M1 %0, %1, killed %2, -1, 6, 1, implicit $vl, implicit $vtype %3:vr = COPY %0 %3:vr = PseudoVSLL_VI_M1 %3, 11, $noreg, 6, implicit $vl, implicit $vtype $v0 = COPY %0 diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -52,7 +52,7 @@ ; CHECK: $v0 = COPY [[COPY]] ; CHECK: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) + ; CHECK: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 :: (load (s512) from %ir.a, align 8) ; CHECK: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK: PseudoRET implicit $v8m8 %1:vr = COPY $v0 @@ -60,7 +60,7 @@ $v0 = COPY %1 %3:vrm8 = IMPLICIT_DEF %4:vrm8nov0 = COPY %3 - %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, $x0, 6 :: (load (s512) from %ir.a, align 8) + %2:vrm8nov0 = PseudoVLE64_V_M8_MASK %4, %0, $v0, -1, 6 :: (load (s512) from %ir.a, align 8) $v8m8 = COPY %2 PseudoRET implicit $v8m8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -131,7 +131,7 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } - { id: 6, class: vr } - - { id: 7, class: gpr } + - { id: 7, class: gprnox0 } - { id: 8, class: gpr } liveins: - { reg: '$x10', virtual-reg: '%4' } @@ -170,7 +170,7 @@ successors: %bb.2(0x30000000), %bb.1(0x50000000) liveins: $x10, $x11, $v8, $x12 - %7:gpr = COPY $x12 + %7:gprnox0 = COPY $x12 %6:vr = COPY $v8 %5:gpr = COPY $x11 %4:gpr = COPY $x10 @@ -204,7 +204,7 @@ - { id: 4, class: gpr } - { id: 5, class: gpr } - { id: 6, class: gpr } - - { id: 7, class: gpr } + - { id: 7, class: gprnox0 } - { id: 8, class: gpr } liveins: - { reg: '$x10', virtual-reg: '%4' } @@ -245,7 +245,7 @@ successors: %bb.2(0x30000000), %bb.1(0x50000000) liveins: $x10, $x11, $x12, $x13 - %7:gpr = COPY $x13 + %7:gprnox0 = COPY $x13 %6:gpr = COPY $x12 %5:gpr = COPY $x11 %4:gpr = COPY $x10 @@ -278,7 +278,7 @@ - { id: 3, class: gpr } - { id: 4, class: vr } - { id: 5, class: vr } - - { id: 6, class: gpr } + - { id: 6, class: gprnox0 } - { id: 7, class: gpr } - { id: 8, class: gpr } liveins: @@ -319,7 +319,7 @@ successors: %bb.2(0x30000000), %bb.1(0x50000000) liveins: $x10, $v8, $v9, $x11 - %6:gpr = COPY $x11 + %6:gprnox0 = COPY $x11 %5:vr = COPY $v9 %4:vr = COPY $v8 %3:gpr = COPY $x10 @@ -346,7 +346,7 @@ alignment: 4 tracksRegLiveness: true registers: - - { id: 0, class: gpr } + - { id: 0, class: gprnox0 } - { id: 1, class: vr } - { id: 2, class: vr } - { id: 3, class: vr } @@ -372,7 +372,7 @@ ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype + ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype ; CHECK: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK: PseudoBR %bb.1 @@ -395,7 +395,7 @@ %6:vr = COPY $v9 %5:vr = COPY $v8 %4:gpr = COPY $x10 - %0:gpr = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype + %0:gprnox0 = PseudoVSETVLI %7, 88, implicit-def dead $vl, implicit-def dead $vtype %8:gpr = COPY $x0 BEQ %4, %8, %bb.2 PseudoBR %bb.1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -98,7 +98,7 @@ registers: - { id: 0, class: vr } - { id: 1, class: vr } - - { id: 2, class: gpr } + - { id: 2, class: gprnox0 } - { id: 3, class: vr } liveins: - { reg: '$v8', virtual-reg: '%0' } @@ -120,7 +120,7 @@ ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK: PseudoRET implicit $v8 - %2:gpr = COPY $x10 + %2:gprnox0 = COPY $x10 %1:vr = COPY $v9 %0:vr = COPY $v8 %3:vr = PseudoVADD_VV_M1 %0, %1, %2, 6 @@ -135,7 +135,7 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vr } - - { id: 2, class: gpr } + - { id: 2, class: gprnox0 } - { id: 3, class: vr } - { id: 4, class: vr } liveins: @@ -159,7 +159,7 @@ ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK: PseudoRET implicit $v8 - %2:gpr = COPY $x11 + %2:gprnox0 = COPY $x11 %1:vr = COPY $v8 %0:gpr = COPY $x10 %3:vr = PseudoVLE64_V_M1 %0, %2, 6 @@ -174,7 +174,7 @@ tracksRegLiveness: true registers: - { id: 0, class: gpr } - - { id: 1, class: gpr } + - { id: 1, class: gprnox0 } - { id: 2, class: vr } - { id: 3, class: vr } liveins: @@ -197,7 +197,7 @@ ; CHECK: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v8 = COPY %3 ; CHECK: PseudoRET implicit $v8 - %1:gpr = COPY $x11 + %1:gprnox0 = COPY $x11 %0:gpr = COPY $x10 %2:vr = PseudoVLE32_V_MF2 %0, %1, 5 early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6 @@ -299,7 +299,7 @@ ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype ; CHECK: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) ; CHECK: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype - ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, $noreg, 6, implicit $vl, implicit $vtype + ; CHECK: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype ; CHECK: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype @@ -308,7 +308,7 @@ ; CHECK: PseudoRET implicit $x10 %0:gpr = COPY $x10 %1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x) - %2:vr = PseudoVMV_V_I_M1 0, $x0, 6 + %2:vr = PseudoVMV_V_I_M1 0, -1, 6 %4:vr = IMPLICIT_DEF %3:vr = PseudoVREDSUM_VS_M1 %4, killed %1, killed %2, 2, 6 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6 @@ -324,7 +324,7 @@ - { id: 0, class: vr } - { id: 1, class: vr } - { id: 2, class: gprnox0 } - - { id: 3, class: gpr } + - { id: 3, class: gprnox0 } - { id: 4, class: vr } liveins: - { reg: '$v8', virtual-reg: '%0' } @@ -342,14 +342,14 @@ ; CHECK: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 ; CHECK: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gpr = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype + ; CHECK: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK: PseudoRET implicit $v8 %2:gprnox0 = COPY $x10 %1:vr = COPY $v9 %0:vr = COPY $v8 - %3:gpr = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype + %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype %4:vr = PseudoVADD_VV_M1 %0, %1, killed %3, 6 $v8 = COPY %4 PseudoRET implicit $v8 @@ -362,7 +362,7 @@ registers: - { id: 0, class: gpr } - { id: 1, class: vr } - - { id: 2, class: gpr } + - { id: 2, class: gprnox0 } - { id: 3, class: vr } - { id: 4, class: vr } liveins: @@ -388,7 +388,7 @@ ; CHECK: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype ; CHECK: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK: PseudoRET implicit $v8 - %2:gpr = COPY $x11 + %2:gprnox0 = COPY $x11 %1:vr = COPY $v8 %0:gpr = COPY $x10 %3:vr = PseudoVLE64_V_M1 %0, %2, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -40,7 +40,7 @@ ; CHECK: $x2 = frame-destroy ADDI $x2, 16 ; CHECK: PseudoRET %0:gpr = COPY $x10 - %1:gpr = COPY $x11 + %1:gprnox0 = COPY $x11 $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 %0, %1, 6 PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, %stack.0, $x0 renamable $v7_v8_v9_v10_v11_v12_v13 = PseudoVRELOAD7_M1 %stack.0, $x0