diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -58,12 +58,13 @@ uint8_t TailAgnostic : 1; uint8_t MaskAgnostic : 1; uint8_t MaskRegOp : 1; + uint8_t StoreOp : 1; uint8_t SEWLMULRatioOnly : 1; public: VSETVLIInfo() : AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false), - SEWLMULRatioOnly(false) {} + StoreOp(false), SEWLMULRatioOnly(false) {} static VSETVLIInfo getUnknown() { VSETVLIInfo Info; @@ -118,7 +119,8 @@ TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); } - void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO) { + void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO, + bool IsStore) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; @@ -126,6 +128,7 @@ TailAgnostic = TA; MaskAgnostic = MA; MaskRegOp = MRO; + StoreOp = IsStore; } unsigned encodeVTYPE() const { @@ -198,17 +201,28 @@ return true; } - // VTypes must match unless the instruction is a mask reg operation, then it - // only care about VLMAX. + // The AVL must match. + if (!hasSameAVL(InstrInfo)) + return false; + + // Simple case, see if full VTYPE matches. + if (hasSameVTYPE(InstrInfo)) + return true; + + // If this is a mask reg operation, it only cares about VLMAX. // FIXME: Mask reg operations are probably ok if "this" VLMAX is larger // than "InstrInfo". - if (!hasSameVTYPE(InstrInfo) && - !(InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) && - TailAgnostic == InstrInfo.TailAgnostic && - MaskAgnostic == InstrInfo.MaskAgnostic)) - return false; + if (InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) && + TailAgnostic == InstrInfo.TailAgnostic && + MaskAgnostic == InstrInfo.MaskAgnostic) + return true; + + // Store instructions don't use the policy fields. + if (InstrInfo.StoreOp && VLMul == InstrInfo.VLMul && SEW == InstrInfo.SEW) + return true; - return hasSameAVL(InstrInfo); + // Anything else is not compatible. + return false; } bool isCompatibleWithLoadStoreEEW(unsigned EEW, @@ -225,10 +239,9 @@ if (!hasSameAVL(InstrInfo)) return false; - // TODO: This check isn't required for stores. But we should ignore for all - // stores not just unit-stride and strided so leaving it for now. - if (TailAgnostic != InstrInfo.TailAgnostic || - MaskAgnostic != InstrInfo.MaskAgnostic) + // Stores can ignore the tail and mask policies. + if (!InstrInfo.StoreOp && (TailAgnostic != InstrInfo.TailAgnostic || + MaskAgnostic != InstrInfo.MaskAgnostic)) return false; return getSEWLMULRatio() == getSEWLMULRatio(EEW, InstrInfo.VLMul); @@ -428,6 +441,10 @@ unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); + // If there are no explicit defs, this is a store instruction which can + // ignore the tail and mask policies. + bool StoreOp = MI.getNumExplicitDefs() == 0; + if (RISCVII::hasVLOp(TSFlags)) { const MachineOperand &VLOp = MI.getOperand(NumOperands - 2); if (VLOp.isImm()) { @@ -443,7 +460,7 @@ } else InstrInfo.setAVLReg(RISCV::NoRegister); InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic, - /*MaskAgnostic*/ false, MaskRegOp); + /*MaskAgnostic*/ false, MaskRegOp, StoreOp); return InstrInfo; } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -681,7 +681,6 @@ ; LMULMAX2-RV64-NEXT: or a1, a1, a2 ; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX2-RV64-NEXT: ret ; @@ -813,7 +812,6 @@ ; LMULMAX1-RV64-NEXT: or a1, a1, a2 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x @@ -2328,7 +2326,6 @@ ; LMULMAX1-RV64-NEXT: or a1, a1, a2 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v27, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v26, (a6) ; LMULMAX1-RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -3892,7 +3892,6 @@ ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 ; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX2-RV64-NEXT: ret ; @@ -4127,7 +4126,6 @@ ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x @@ -11962,7 +11960,6 @@ ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v27, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v26, (a6) ; LMULMAX1-RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -2721,7 +2721,6 @@ ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 ; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX2-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX2-RV64-NEXT: ret ; @@ -2905,7 +2904,6 @@ ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX1-RV64-NEXT: ret %a = load <2 x i64>, <2 x i64>* %x @@ -8294,7 +8292,6 @@ ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; LMULMAX1-RV64-NEXT: vse64.v v26, (a0) ; LMULMAX1-RV64-NEXT: vse64.v v27, (a6) ; LMULMAX1-RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -856,7 +856,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 8 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1176,7 +1175,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1206,7 +1204,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1312,7 +1309,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 12 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1352,7 +1348,6 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v26, (a1) ; LMULMAX1-NEXT: vse16.v v27, (a0) ; LMULMAX1-NEXT: ret @@ -1527,7 +1522,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1563,7 +1557,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; LMULMAX1-NEXT: vse16.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1593,7 +1586,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v9, 0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1645,7 +1637,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vse8.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1693,7 +1684,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v26, (a0) ; LMULMAX1-NEXT: ret ; @@ -1733,7 +1723,6 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v26, (a1) ; LMULMAX1-NEXT: vse32.v v27, (a0) ; LMULMAX1-NEXT: ret @@ -1818,7 +1807,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v26, v26, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v25, v26, 14 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; LMULMAX1-NEXT: vse8.v v25, (a0) ; LMULMAX1-NEXT: ret ; @@ -1842,7 +1830,6 @@ ; LMULMAX4-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu ; LMULMAX4-NEXT: vslideup.vi v26, v25, 8 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; LMULMAX4-NEXT: vse8.v v26, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i8> @@ -1905,7 +1892,6 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v26, (a1) ; LMULMAX1-NEXT: vse16.v v27, (a0) ; LMULMAX1-NEXT: ret @@ -1926,7 +1912,6 @@ ; LMULMAX4-NEXT: vslideup.vi v26, v30, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu ; LMULMAX4-NEXT: vslideup.vi v26, v28, 8 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; LMULMAX4-NEXT: vse16.v v26, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i16> @@ -1975,7 +1960,6 @@ ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v26, (a1) ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: vse32.v v29, (a1) @@ -1995,7 +1979,6 @@ ; LMULMAX4-NEXT: vslideup.vi v8, v12, 0 ; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu ; LMULMAX4-NEXT: vslideup.vi v8, v28, 8 -; LMULMAX4-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: ret %y = trunc <16 x i64> %x to <16 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -91,7 +91,6 @@ ; CHECK-NEXT: fmv.w.x ft0, zero ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu ; CHECK-NEXT: vfmv.s.f v25, ft0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v25, (a0) ; CHECK-NEXT: ret store <2 x float> , <2 x float>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -171,7 +171,6 @@ ; LMULMAX1-NEXT: vfncvt.f.f.w v27, v26 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v25, v27, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -228,7 +227,6 @@ ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -476,7 +476,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vse8.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -543,7 +542,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vse8.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x double>, <8 x double>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -500,7 +500,6 @@ ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x @@ -557,7 +556,6 @@ ; LMULMAX1-NEXT: vfncvt.f.f.w v25, v26 ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v29, v25, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; LMULMAX1-NEXT: vse16.v v29, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -132,7 +132,6 @@ ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v26, v25, 2 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v26, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -212,7 +211,6 @@ ; LMULMAX1-NEXT: vle32.v v26, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v26, (a0) ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -231,7 +229,6 @@ ; LMULMAX2-NEXT: vle32.v v28, (a0) ; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; LMULMAX2-NEXT: vslideup.vi v28, v26, 6 -; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; LMULMAX2-NEXT: vse32.v v28, (a0) ; LMULMAX2-NEXT: ret ; @@ -244,7 +241,6 @@ ; LMULMAX1-NEXT: vle32.v v26, (a0) ; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; LMULMAX1-NEXT: vslideup.vi v26, v25, 2 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; LMULMAX1-NEXT: vse32.v v26, (a0) ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -307,7 +303,6 @@ ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v25, v26, 2 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v25, (a0) ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -16,7 +16,6 @@ ; RV32-NEXT: vslide1up.vx v28, v30, a1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu ; RV32-NEXT: vslideup.vi v26, v28, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vse64.v v26, (a0) ; RV32-NEXT: ret ; @@ -27,7 +26,6 @@ ; RV64-NEXT: vmv.s.x v28, a1 ; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; RV64-NEXT: vslideup.vi v26, v28, 3 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV64-NEXT: vse64.v v26, (a0) ; RV64-NEXT: ret %a = load <4 x i64>, <4 x i64>* %x @@ -168,7 +166,6 @@ ; CHECK-NEXT: addi a1, zero, -1 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v28, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v28, (a0) ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x @@ -218,7 +215,6 @@ ; CHECK-NEXT: addi a1, zero, 6 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmv.s.x v28, a1 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; CHECK-NEXT: vse64.v v28, (a0) ; CHECK-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -388,7 +388,6 @@ ; RV64-NEXT: addi a1, a1, -910 ; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; RV64-NEXT: vmv.s.x v25, a1 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vse64.v v25, (a0) ; RV64-NEXT: ret store <2 x i64> , <2 x i64>* %x @@ -575,7 +574,6 @@ ; CHECK-NEXT: vmv.v.i v26, 0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v26, v25, 3 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vse32.v v26, (a6) ; CHECK-NEXT: ret store <4 x i32> , <4 x i32>* %z0 @@ -614,7 +612,6 @@ ; CHECK-NEXT: vmv.v.i v26, 3 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v26, v25, 3 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu ; CHECK-NEXT: vse16.v v26, (a6) ; CHECK-NEXT: ret store <4 x i16> , <4 x i16>* %z0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -223,7 +223,6 @@ ; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu ; LMULMAX1-NEXT: vslideup.vi v27, v25, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; LMULMAX1-NEXT: vse8.v v27, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll @@ -48,7 +48,6 @@ ; RV64-1024-NEXT: vslideup.vi v0, v25, 3 ; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; RV64-1024-NEXT: vrgather.vv v12, v28, v8, v0.t -; RV64-1024-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64-1024-NEXT: vse16.v v12, (a0) ; RV64-1024-NEXT: ret ; @@ -97,7 +96,6 @@ ; RV64-2048-NEXT: vslideup.vi v0, v25, 3 ; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, tu, mu ; RV64-2048-NEXT: vrgather.vv v30, v26, v28, v0.t -; RV64-2048-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-2048-NEXT: vse16.v v30, (a0) ; RV64-2048-NEXT: ret entry: @@ -225,7 +223,6 @@ ; RV64-1024-NEXT: addi a1, a1, 16 ; RV64-1024-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload ; RV64-1024-NEXT: vrgather.vv v8, v24, v16, v0.t -; RV64-1024-NEXT: vsetvli zero, zero, e16, m8, ta, mu ; RV64-1024-NEXT: vse16.v v8, (a0) ; RV64-1024-NEXT: csrr a0, vlenb ; RV64-1024-NEXT: addi a1, zero, 40 @@ -287,7 +284,6 @@ ; RV64-2048-NEXT: vslideup.vi v0, v25, 7 ; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, tu, mu ; RV64-2048-NEXT: vrgather.vv v12, v28, v8, v0.t -; RV64-2048-NEXT: vsetvli zero, zero, e16, m4, ta, mu ; RV64-2048-NEXT: vse16.v v12, (a0) ; RV64-2048-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -457,7 +457,6 @@ ; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: vsetvli zero, a4, e32, m8, tu, mu ; CHECK-NEXT: vfmacc.vf v16, ft0, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vse32.v v16, (a3) ; CHECK-NEXT: sub a0, a0, a4 ; CHECK-NEXT: vsetvli a4, a0, e32, m8, ta, mu