diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4395,7 +4395,6 @@ /*MaskAgnostic*/ false)); // Remove (now) redundant operands from pseudo - MI.getOperand(SEWIndex).setImm(-1); if (VLIndex >= 0) { MI.getOperand(VLIndex).setReg(RISCV::NoRegister); MI.getOperand(VLIndex).setIsKill(false); diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir @@ -40,13 +40,13 @@ # POST-INSERTER: %2:gpr = COPY $x11 # POST-INSERTER: %3:gpr = COPY $x10 # POST-INSERTER: dead %7:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) +# POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) # POST-INSERTER: dead %8:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) +# POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %1, $noreg, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) # POST-INSERTER: dead %9:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, -1, implicit $vl, implicit $vtype +# POST-INSERTER: %6:vr = PseudoVADD_VV_M1 killed %4, killed %5, $noreg, 64, implicit $vl, implicit $vtype # POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 88, implicit-def $vl, implicit-def $vtype -# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +# POST-INSERTER: PseudoVSE64_V_M1 killed %6, %3, $noreg, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) # CODEGEN: vsetvli a3, a3, e64,m1,ta,mu # CODEGEN-NEXT: vle64.v v25, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -28,5 +28,5 @@ ; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8) ; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8) ; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype +; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, 64, implicit $vl, implicit $vtype ; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll --- a/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll @@ -17,7 +17,7 @@ ; CHECK: [[COPY:%[0-9]+]]:vr = COPY $v8 ; CHECK: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1 ; CHECK: dead %3:gpr = PseudoVSETIVLI 1, 88, implicit-def $vl, implicit-def $vtype - ; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, $noreg, -1, implicit $vl, implicit $vtype + ; CHECK: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, $noreg, 64, implicit $vl, implicit $vtype ; CHECK: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load 8 from %ir.a) ; CHECK: $x10 = COPY [[LD]] ; CHECK: PseudoRET implicit $x10