diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1952,7 +1952,7 @@ // For simplicity we reuse the vtype representation here. MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth, - /*TailAgnostic*/ false, + /*TailAgnostic*/ true, /*MaskAgnostic*/ false)); // Remove (now) redundant operands from pseudo diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir @@ -40,23 +40,23 @@ # POST-INSERTER: %0:gpr = COPY $x13 # POST-INSERTER: %4:vr = IMPLICIT_DEF -# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %10:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %4, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) # POST-INSERTER: %6:vr = IMPLICIT_DEF -# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %11:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: %7:vr = PseudoVLE64_V_M1 %6, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) # POST-INSERTER: %8:vr = IMPLICIT_DEF -# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: %9:vr = PseudoVADD_VV_M1 %8, killed %5, killed %7, $noreg, $noreg, -1, implicit $vl, implicit $vtype -# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 12, implicit-def $vl, implicit-def $vtype +# POST-INSERTER: dead %13:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype # POST-INSERTER: PseudoVSE64_V_M1 killed %9, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) -# CODEGEN: vsetvli a4, a3, e64,m1,tu,mu +# CODEGEN: vsetvli a4, a3, e64,m1,ta,mu # CODEGEN-NEXT: vle64.v v25, (a1) -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu +# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu # CODEGEN-NEXT: vle64.v v26, (a2) -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu +# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu # CODEGEN-NEXT: vadd.vv v25, v25, v26 -# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,tu,mu +# CODEGEN-NEXT: vsetvli a1, a3, e64,m1,ta,mu # CODEGEN-NEXT: vse64.v v25, (a0) # CODEGEN-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -29,13 +29,13 @@ ; PRE-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) ; POST-INSERTER: %4:vr = IMPLICIT_DEF -; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %4, %1, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; POST-INSERTER: %6:vr = IMPLICIT_DEF -; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %10:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %5:vr = PseudoVLE64_V_M1 %6, %2, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) ; POST-INSERTER: %8:vr = IMPLICIT_DEF -; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %11:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %7:vr = PseudoVADD_VV_M1 %8, killed %3, killed %5, $noreg, $noreg, -1, implicit $vl, implicit $vtype -; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype +; POST-INSERTER: dead %12:gpr = PseudoVSETVLI $x0, 76, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: PseudoVSE64_V_M1 killed %7, %0, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll @@ -7,13 +7,13 @@ define void @vadd_vint16m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m1,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -26,13 +26,13 @@ define void @vadd_vint16m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vse16.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -45,13 +45,13 @@ define void @vadd_vint16m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vse16.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -64,13 +64,13 @@ define void @vadd_vint16m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m8,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -83,13 +83,13 @@ define void @vadd_vint16mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,mf2,ta,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -102,13 +102,13 @@ define void @vadd_vint16mf4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e16,mf4,ta,mu ; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vle16.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll @@ -7,13 +7,13 @@ define void @vadd_vint32m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m1,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu ; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vle32.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -26,13 +26,13 @@ define void @vadd_vint32m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vse32.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -45,13 +45,13 @@ define void @vadd_vint32m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vse32.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -64,13 +64,13 @@ define void @vadd_vint32m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m8,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -83,13 +83,13 @@ define void @vadd_vint32mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e32,mf2,ta,mu ; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vle32.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu ; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll @@ -7,13 +7,13 @@ define void @vadd_vint64m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m1,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e64,m1,ta,mu ; CHECK-NEXT: vle64.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vle64.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vse64.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -26,13 +26,13 @@ define void @vadd_vint64m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e64,m2,ta,mu ; CHECK-NEXT: vle64.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vle64.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vse64.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -45,13 +45,13 @@ define void @vadd_vint64m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vse64.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -64,13 +64,13 @@ define void @vadd_vint64m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m8,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll @@ -7,13 +7,13 @@ define void @vadd_vint8m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m1,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -26,13 +26,13 @@ define void @vadd_vint8m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu ; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vle8.v v28, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vse8.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -45,13 +45,13 @@ define void @vadd_vint8m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v8, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vse8.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -64,13 +64,13 @@ define void @vadd_vint8m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m8,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -83,13 +83,13 @@ define void @vadd_vint8mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -102,13 +102,13 @@ define void @vadd_vint8mf4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,mf4,ta,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -121,13 +121,13 @@ define void @vadd_vint8mf8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli a3, zero, e8,mf8,ta,mu ; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vle8.v v26, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu +; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu ; CHECK-NEXT: vse8.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa