diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -164,6 +164,12 @@ MachineInstr *convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override; + // MIR printer helper function to annotate Operands with a comment. + std::string + createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, + unsigned OpIdx, + const TargetRegisterInfo *TRI) const override; + Register getVLENFactoredAmount( MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, int64_t Amount, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1328,6 +1328,53 @@ return It; } +// MIR printer helper function to annotate Operands with a comment. +std::string RISCVInstrInfo::createMIROperandComment( + const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, + const TargetRegisterInfo *TRI) const { + // Print a generic comment for this operand if there is one. + std::string GenericComment = + TargetInstrInfo::createMIROperandComment(MI, Op, OpIdx, TRI); + if (!GenericComment.empty()) + return GenericComment; + + // If not, we must have an immediate operand. + if (Op.getType() != MachineOperand::MO_Immediate) + return std::string(); + + std::string Comment; + raw_string_ostream OS(Comment); + + uint64_t TSFlags = MI.getDesc().TSFlags; + + // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW + // operand of vector codegen pseudos. + if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLI || + MI.getOpcode() == RISCV::PseudoVSETIVLI || + MI.getOpcode() == RISCV::PseudoVSETVLIX0) && + OpIdx == 2) { + unsigned Imm = MI.getOperand(OpIdx).getImm(); + RISCVVType::printVType(Imm, OS); + } else if (RISCVII::hasSEWOp(TSFlags)) { + unsigned NumOperands = MI.getNumExplicitOperands(); + bool HasPolicy = RISCVII::hasVecPolicyOp(TSFlags); + + // The SEW operand is before any policy operand. + if (OpIdx != NumOperands - HasPolicy - 1) + return std::string(); + + unsigned Log2SEW = MI.getOperand(OpIdx).getImm(); + unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; + assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); + + OS << "e" << SEW; + } + + OS.flush(); + return Comment; +} + // clang-format off #define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \ RISCV::PseudoV##OP##_##TYPE##_##LMUL diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -39,8 +39,8 @@ ; CHECK-NEXT: $x2 = frame-setup ADDI $x2, -240 ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; CHECK-NEXT: $x11 = PseudoReadVLENB ; CHECK-NEXT: $x10 = LUI 1048575 ; CHECK-NEXT: $x10 = ADDIW killed $x10, 1824 diff --git a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir --- a/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir +++ b/llvm/test/CodeGen/RISCV/rvv/commuted-op-indices-regression.mir @@ -30,9 +30,9 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v0 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vrnov0 = COPY $v1 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrnov0 = COPY $v2 - ; CHECK-NEXT: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6, 1, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVNMSUB_VV_M1_:%[0-9]+]]:vr = PseudoVNMSUB_VV_M1 [[PseudoVNMSUB_VV_M1_]], [[COPY1]], [[COPY2]], -1, 6 /* e64 */, 1, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVNMSUB_VV_M1_]] - ; CHECK-NEXT: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead [[COPY2]]:vr = PseudoVSLL_VI_M1 [[COPY2]], 11, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVNMSUB_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v0 %0:vr = COPY $v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir --- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir +++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir @@ -87,8 +87,8 @@ ; CHECK-NEXT: $x10 = frame-setup MUL killed $x10, killed $x11 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x10 ; CHECK-NEXT: $x2 = frame-setup ANDI $x2, -128 - ; CHECK-NEXT: dead renamable $x15 = PseudoVSETIVLI 1, 72, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v25 = PseudoVMV_V_X_M1 killed renamable $x12, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead renamable $x15 = PseudoVSETIVLI 1, 72 /* e16, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v25 = PseudoVMV_V_X_M1 killed renamable $x12, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x11 = PseudoReadVLENB ; CHECK-NEXT: $x10 = ADDI $x0, 50 ; CHECK-NEXT: $x11 = MUL killed $x11, killed $x10 @@ -129,7 +129,7 @@ ; CHECK-NEXT: renamable $x13 = ADD renamable $x26, killed renamable $x13 ; CHECK-NEXT: renamable $x13 = LD killed renamable $x13, 0 :: (load (s64)) ; CHECK-NEXT: renamable $x16 = SUB killed renamable $x13, renamable $x13 - ; CHECK-NEXT: dead renamable $x13 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead renamable $x13 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: renamable $x13 = nsw ADDI renamable $x16, -2 ; CHECK-NEXT: $x5 = PseudoReadVLENB ; CHECK-NEXT: $x1 = ADDI $x0, 50 @@ -141,8 +141,8 @@ ; CHECK-NEXT: $x5 = LD $x2, 0 :: (load (s64) from %stack.16) ; CHECK-NEXT: renamable $v0 = PseudoVRELOAD_M1 killed $x1 :: (load unknown-size from %stack.1, align 8) ; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.15) - ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, implicit $vl, implicit $vtype - ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: BLT killed renamable $x16, renamable $x27, %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir --- a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir +++ b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir @@ -16,11 +16,11 @@ ; CHECK-LABEL: name: mask_reg_alloc ; CHECK: liveins: $v0, $v1, $v2, $v3 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v8 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $v0 = COPY killed renamable $v1 - ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3, implicit $vl, implicit $vtype - ; CHECK-NEXT: renamable $v0 = PseudoVADD_VV_M1 killed renamable $v8, killed renamable $v9, 1, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: renamable $v0 = PseudoVADD_VV_M1 killed renamable $v8, killed renamable $v9, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET implicit $v0 %0:vr = COPY $v0 %1:vr = COPY $v1 diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir --- a/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir +++ b/llvm/test/CodeGen/RISCV/rvv/reg-coalescing.mir @@ -10,12 +10,12 @@ ; CHECK-LABEL: name: test_earlyclobber ; CHECK: liveins: $x10 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 - ; CHECK-NEXT: %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 - ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5 - ; CHECK-NEXT: undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 %0.sub_vrm2_0, 0, 1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 /* e32 */ + ; CHECK-NEXT: %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 /* e32 */ + ; CHECK-NEXT: [[PseudoVLE32_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5 /* e32 */ + ; CHECK-NEXT: undef early-clobber %2.sub_vrm2_0:vrn2m2 = PseudoVRGATHER_VI_M2 %0.sub_vrm2_0, 0, 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: %2.sub_vrm2_1:vrn2m2 = COPY %0.sub_vrm2_1 - ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %2, $x10, [[PseudoVLE32_V_M2_]], 1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSUXSEG2EI32_V_M2_M2 %2, $x10, [[PseudoVLE32_V_M2_]], 1, 5 /* e32 */, implicit $vl, implicit $vtype undef %0.sub_vrm2_0:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 %0.sub_vrm2_1:vrn2m2 = PseudoVLE32_V_M2 $x10, 1, 5 %1:vrm2 = PseudoVLE32_V_M2 $x10, 1, 5 diff --git a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/tail-agnostic-impdef-copy.mir @@ -53,7 +53,7 @@ ; CHECK-NEXT: $v0 = COPY [[COPY]] ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8nov0 = COPY [[DEF]] - ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6, 1 :: (load (s512) from %ir.a, align 8) + ; CHECK-NEXT: [[PseudoVLE64_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64_V_M8_MASK [[COPY2]], [[COPY1]], $v0, -1, 6 /* e64 */, 1 :: (load (s512) from %ir.a, align 8) ; CHECK-NEXT: $v8m8 = COPY [[PseudoVLE64_V_M8_MASK]] ; CHECK-NEXT: PseudoRET implicit $v8m8 %1:vr = COPY $v0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir --- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -12,8 +12,8 @@ ; CHECK-LABEL: name: copy_different_lmul ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -29,9 +29,9 @@ ; CHECK-LABEL: name: copy_convert_to_vmv_v_v ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 @@ -46,9 +46,9 @@ ; CHECK-LABEL: name: copy_convert_to_vmv_v_i ; CHECK: liveins: $x14 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype $v12m4 = COPY $v28m4 @@ -63,7 +63,7 @@ ; CHECK-LABEL: name: copy_from_whole_load_store ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v28m4 = VL4RE32_V $x16 ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype @@ -80,9 +80,9 @@ ; CHECK-LABEL: name: copy_with_vleff ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype @@ -100,12 +100,12 @@ ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_1 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x17, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -126,13 +126,13 @@ ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_2 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype @@ -152,10 +152,10 @@ ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_3 ; CHECK: liveins: $x14, $x16, $x17, $x18 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -173,10 +173,10 @@ ; CHECK-LABEL: name: copy_subregister ; CHECK: liveins: $x16, $x17 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4, implicit $vl, implicit $vtype - ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETIVLI 4, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype @@ -194,8 +194,8 @@ ; CHECK-LABEL: name: copy_from_reload ; CHECK: liveins: $x14, $x16, $x17 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 73, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 73 /* e16, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x12 = PseudoReadVLENB ; CHECK-NEXT: $x12 = SLLI $x12, 1 ; CHECK-NEXT: $v2m2_v4m2 = PseudoVRELOAD2_M2 killed $x17, killed $x12 @@ -218,9 +218,9 @@ ; CHECK-LABEL: name: copy_with_different_vlmax ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74 /* e16, m4, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -236,10 +236,10 @@ ; CHECK-LABEL: name: copy_with_widening_reduction ; CHECK: liveins: $x10, $v8, $v26, $v27 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v26 = PseudoVMV1R_V killed $v8 - ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75 /* e16, m8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v8m8 = PseudoVRELOAD_M8 killed $x10 $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype @@ -257,8 +257,8 @@ ; CHECK-LABEL: name: copy_zvlsseg_reg ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v8 $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -274,10 +274,10 @@ ; CHECK-LABEL: name: copy_zvlsseg_reg_2 ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 $v8, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 $v9, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 $v8, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 $v9, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype $v10_v11 = COPY $v8_v9 @@ -292,8 +292,8 @@ ; CHECK-LABEL: name: copy_fractional_lmul ; CHECK: liveins: $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v12 = PseudoVMV1R_V $v28 $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5, implicit $vl, implicit $vtype @@ -309,10 +309,10 @@ ; CHECK-LABEL: name: copy_implicit_def ; CHECK: liveins: $x12, $x14, $x16 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: $x0 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 killed $x12, $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 + ; CHECK-NEXT: $x0 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8_v9_v10_v11_v12_v13_v14_v15 = PseudoVLSEG8E32_V_M1 killed $x12, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETIVLI 10, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v15 = PseudoVLE32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype, implicit killed $v8_v9_v10_v11_v12_v13_v14_v15, implicit-def $v8_v9_v10_v11_v12_v13_v14_v15 ; CHECK-NEXT: $v24 = PseudoVMV1R_V killed $v8 ; CHECK-NEXT: $v25 = PseudoVMV1R_V killed $v9 ; CHECK-NEXT: $v26 = PseudoVMV1R_V killed $v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -184,8 +184,8 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK-NEXT: PseudoBR %bb.1 @@ -193,13 +193,13 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: ; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2 @@ -263,8 +263,8 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY2]], $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY2]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK-NEXT: PseudoBR %bb.1 @@ -272,19 +272,19 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: ; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2 - ; CHECK-NEXT: PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE64_V_M1 [[PHI]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET bb.0.entry: successors: %bb.2(0x30000000), %bb.1(0x50000000) @@ -351,19 +351,19 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY1]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: ; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2 - ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[PHI]], 6, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[PHI]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 bb.0.entry: @@ -424,7 +424,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 ; CHECK-NEXT: PseudoBR %bb.1 @@ -432,13 +432,13 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: ; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI [[PseudoVADD_VV_M1_]], %bb.1, [[PseudoVSUB_VV_M1_]], %bb.2 @@ -495,20 +495,20 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF - ; CHECK-NEXT: dead %12:gpr = PseudoVSETVLIX0 $x0, 95, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 -1, 6, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead %13:gpr = PseudoVSETVLIX0 $x0, 87, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead %12:gpr = PseudoVSETVLIX0 $x0, 95 /* e64, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 -1, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead %13:gpr = PseudoVSETVLIX0 $x0, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 killed [[PseudoVID_V_MF2_]], 0, -1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMSEQ_VI_MF2_:%[0-9]+]]:vmv0 = PseudoVMSEQ_VI_MF2 killed [[PseudoVID_V_MF2_]], 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVMSEQ_VI_MF2_]] - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5, 0, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5 /* e32 */, 0, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3 ; CHECK-NEXT: PseudoBR %bb.2 @@ -520,8 +520,8 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[DEF]], %bb.1, [[LWU]], %bb.2 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 [[PseudoVLE32_V_MF2_MASK]], [[PHI]], -1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 [[PseudoVLE32_V_MF2_MASK]], [[PHI]], -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVADD_VX_MF2_]] ; CHECK-NEXT: PseudoRET implicit $v0 bb.0: @@ -581,21 +581,21 @@ ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, %10, %bb.1 - ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 [[PseudoVID_V_M1_]], [[PHI]], -1, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 [[PseudoVID_V_M1_]], [[PHI]], -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]] ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]] - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: BLTU [[ADDI]], [[COPY1]], %bb.1 ; CHECK-NEXT: PseudoBR %bb.2 ; CHECK-NEXT: {{ $}} @@ -670,8 +670,8 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, 4, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, 4, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVMV_V_I_M1_]] ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY [[COPY2]] ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 1 @@ -683,8 +683,8 @@ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY1]], %bb.0, %5, %bb.1 ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[ADDIW]], %bb.0, %4, %bb.1 ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vr = PHI [[COPY3]], %bb.0, %16, %bb.1 - ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 [[PHI]], 4, 5, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4) - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 [[PHI]], 4, 5 /* e32 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4) + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = nsw ADDI [[PHI1]], -4 ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI [[PHI]], 16 ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 @@ -694,11 +694,11 @@ ; CHECK-NEXT: bb.2.middle.block: ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 80, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET bb.0.entry: liveins: $x10, $x12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -117,8 +117,8 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %2:gprnox0 = COPY $x10 @@ -156,9 +156,9 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %2:gprnox0 = COPY $x11 @@ -194,10 +194,10 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY %3 ; CHECK-NEXT: PseudoRET implicit $v8 %1:gprnox0 = COPY $x11 @@ -228,8 +228,8 @@ ; CHECK: liveins: $v8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:vr = COPY $v8 @@ -263,11 +263,11 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) - ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.y) - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6, implicit $vl, implicit $vtype - ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6, implicit $vl, implicit $vtype :: (store (s128) into %ir.x) + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) + ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y) + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x) ; CHECK-NEXT: PseudoRET %1:gpr = COPY $x11 %0:gpr = COPY $x10 @@ -302,14 +302,14 @@ ; CHECK: liveins: $x10 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) - ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) + ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6, implicit $vl, implicit $vtype - ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 %0:gpr = COPY $x10 @@ -349,8 +349,8 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %2:gprnox0 = COPY $x10 @@ -389,11 +389,11 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */ - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 %2:gprnox0 = COPY $x11 diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir --- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir @@ -162,8 +162,8 @@ ; CHECK-NEXT: $x10 = PseudoReadVLENB ; CHECK-NEXT: $x10 = ADD $x2, killed $x10 ; CHECK-NEXT: SD killed renamable $x16, killed $x10, 48 :: (store (s64) into %fixed-stack.1, align 16) - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x10 = ADDI $x2, 24 ; CHECK-NEXT: PseudoVSPILL_M1 killed renamable $v8, killed $x10 :: (store unknown-size into %stack.1, align 8) ; CHECK-NEXT: {{ $}} @@ -178,10 +178,10 @@ ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $x8, $x9 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $x10 = ADDI $x2, 24 ; CHECK-NEXT: renamable $v8 = PseudoVRELOAD_M1 killed $x10 :: (load unknown-size from %stack.1, align 8) - ; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) + ; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3 /* e8 */, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1) ; CHECK-NEXT: $x10 = COPY renamable $x9 ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10 ; CHECK-NEXT: PseudoBR %bb.1 diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -26,8 +26,8 @@ ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 3 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88, implicit-def $vl, implicit-def $vtype - ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6, implicit $vl, implicit $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x11 = ADDI $x2, 16 ; CHECK-NEXT: $x12 = PseudoReadVLENB ; CHECK-NEXT: PseudoVSPILL7_M1 killed renamable $v0_v1_v2_v3_v4_v5_v6, killed $x11, killed $x12