diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -588,8 +588,8 @@ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { class RVVVLEMaskBuiltin : RVVBuiltin<"m", "mPCUe", "c"> { - let Name = "vle1_v"; - let IRName = "vle1"; + let Name = "vlm_v"; + let IRName = "vlm"; let HasMask = false; } } @@ -735,8 +735,8 @@ IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; }] in { class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> { - let Name = "vse1_v"; - let IRName = "vse1"; + let Name = "vsm_v"; + let IRName = "vsm"; let HasMask = false; } } @@ -1553,13 +1553,13 @@ // 7. Vector Loads and Stores // 7.4. Vector Unit-Stride Instructions -def vle1: RVVVLEMaskBuiltin; +def vlm: RVVVLEMaskBuiltin; defm vle8: RVVVLEBuiltin<["c"]>; defm vle16: RVVVLEBuiltin<["s","x"]>; defm vle32: RVVVLEBuiltin<["i","f"]>; defm vle64: RVVVLEBuiltin<["l","d"]>; -def vse1 : RVVVSEMaskBuiltin; +def vsm : RVVVSEMaskBuiltin; defm vse8 : RVVVSEBuiltin<["c"]>; defm vse16: RVVVSEBuiltin<["s","x"]>; defm vse32: RVVVSEBuiltin<["i","f"]>; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -1187,72 +1187,72 @@ return vse64(mask, base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b1( +// CHECK-RV64-LABEL: @test_vsm_v_b1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b2( +// CHECK-RV64-LABEL: @test_vsm_v_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b4( +// CHECK-RV64-LABEL: @test_vsm_v_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b8( +// CHECK-RV64-LABEL: @test_vsm_v_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b16( +// CHECK-RV64-LABEL: @test_vsm_v_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b32( +// CHECK-RV64-LABEL: @test_vsm_v_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { + return vsm(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b64( +// CHECK-RV64-LABEL: @test_vsm_v_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return vse1(base, value, vl); +void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) { + return vsm(base, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c @@ -1776,3 +1776,72 @@ vfloat64m8_t test_vle64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl, uint8_t ta) { return vle64_v_f64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vlm_v_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv64i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool1_t test_vlm_v_b1(const uint8_t *base, size_t vl) { + return vlm_v_b1(base, vl); +} + +// CHECK-RV64-LABEL: @test_vlm_v_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv32i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool2_t test_vlm_v_b2(const uint8_t *base, size_t vl) { + return vlm_v_b2(base, vl); +} + +// CHECK-RV64-LABEL: @test_vlm_v_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv16i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool4_t test_vlm_v_b4(const uint8_t *base, size_t vl) { + return vlm_v_b4(base, vl); +} +// CHECK-RV64-LABEL: @test_vlm_v_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv8i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool8_t test_vlm_v_b8(const uint8_t *base, size_t vl) { + return vlm_v_b8(base, vl); +} + +// CHECK-RV64-LABEL: @test_vlm_v_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv4i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool16_t test_vlm_v_b16(const uint8_t *base, size_t vl) { + return vlm_v_b16(base, vl); +} + +// CHECK-RV64-LABEL: @test_vlm_v_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv2i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool32_t test_vlm_v_b32(const uint8_t *base, size_t vl) { + return vlm_v_b32(base, vl); +} + +// CHECK-RV64-LABEL: @test_vlm_v_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlm.nxv1i1.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vbool64_t test_vlm_v_b64(const uint8_t *base, size_t vl) { + return vlm_v_b64(base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c @@ -1187,72 +1187,72 @@ return vse64_v_f64m8_m(mask, base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b1( +// CHECK-RV64-LABEL: @test_vsm_v_b1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { - return vse1_v_b1(base, value, vl); +void test_vsm_v_b1(uint8_t *base, vbool1_t value, size_t vl) { + return vsm_v_b1(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b2( +// CHECK-RV64-LABEL: @test_vsm_v_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { - return vse1_v_b2(base, value, vl); +void test_vsm_v_b2(uint8_t *base, vbool2_t value, size_t vl) { + return vsm_v_b2(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b4( +// CHECK-RV64-LABEL: @test_vsm_v_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { - return vse1_v_b4(base, value, vl); +void test_vsm_v_b4(uint8_t *base, vbool4_t value, size_t vl) { + return vsm_v_b4(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b8( +// CHECK-RV64-LABEL: @test_vsm_v_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { - return vse1_v_b8(base, value, vl); +void test_vsm_v_b8(uint8_t *base, vbool8_t value, size_t vl) { + return vsm_v_b8(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b16( +// CHECK-RV64-LABEL: @test_vsm_v_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { - return vse1_v_b16(base, value, vl); +void test_vsm_v_b16(uint8_t *base, vbool16_t value, size_t vl) { + return vsm_v_b16(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b32( +// CHECK-RV64-LABEL: @test_vsm_v_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { - return vse1_v_b32(base, value, vl); +void test_vsm_v_b32(uint8_t *base, vbool32_t value, size_t vl) { + return vsm_v_b32(base, value, vl); } -// CHECK-RV64-LABEL: @test_vse1_v_b64( +// CHECK-RV64-LABEL: @test_vsm_v_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsm.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { - return vse1_v_b64(base, value, vl); +void test_vsm_v_b64(uint8_t *base, vbool64_t value, size_t vl) { + return vsm_v_b64(base, value, vl); } diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -973,8 +973,8 @@ defm vsoxei : RISCVIStore; defm vsuxei : RISCVIStore; - def int_riscv_vle1 : RISCVUSLoad; - def int_riscv_vse1 : RISCVUSStore; + def int_riscv_vlm : RISCVUSLoad; + def int_riscv_vsm : RISCVUSStore; defm vamoswap : RISCVAMO; defm vamoadd : RISCVAMO; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1113,7 +1113,7 @@ ReplaceNode(Node, Load); return; } - case Intrinsic::riscv_vle1: + case Intrinsic::riscv_vlm: case Intrinsic::riscv_vle: case Intrinsic::riscv_vle_mask: case Intrinsic::riscv_vlse: @@ -1303,7 +1303,7 @@ ReplaceNode(Node, Store); return; } - case Intrinsic::riscv_vse1: + case Intrinsic::riscv_vsm: case Intrinsic::riscv_vse: case Intrinsic::riscv_vse_mask: case Intrinsic::riscv_vsse: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -858,10 +858,14 @@ def VLE64FF_V : VUnitStrideLoad, VLFSched<64>; -def VLE1_V : VUnitStrideLoadMask<"vle1.v">, +def VLM_V : VUnitStrideLoadMask<"vlm.v">, Sched<[WriteVLDM, ReadVLDX]>; -def VSE1_V : VUnitStrideStoreMask<"vse1.v">, +def VSM_V : VUnitStrideStoreMask<"vsm.v">, Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>; +def : InstAlias<"vle1.v $vd, (${rs1})", + (VLM_V VR:$vd, GPR:$rs1), 0>; +def : InstAlias<"vse1.v $vs3, (${rs1})", + (VSM_V VR:$vs3, GPR:$rs1), 0>; def VSE8_V : VUnitStrideStore, VSESched<8>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3534,8 +3534,8 @@ defm PseudoVL : VPseudoUSLoad; defm PseudoVS : VPseudoUSStore; -defm PseudoVLE1 : VPseudoLoadMask; -defm PseudoVSE1 : VPseudoStoreMask; +defm PseudoVLM : VPseudoLoadMask; +defm PseudoVSM : VPseudoStoreMask; //===----------------------------------------------------------------------===// // 7.5 Vector Strided Instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -89,8 +89,8 @@ multiclass VPatUSLoadStoreMaskSDNode { - defvar load_instr = !cast("PseudoVLE1_V_"#m.BX); - defvar store_instr = !cast("PseudoVSE1_V_"#m.BX); + defvar load_instr = !cast("PseudoVLM_V_"#m.BX); + defvar store_instr = !cast("PseudoVSM_V_"#m.BX); // Load def : Pat<(m.Mask (load BaseAddr:$rs1)), (load_instr BaseAddr:$rs1, m.AVL, m.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -622,8 +622,8 @@ } foreach mti = AllMasks in { - defvar load_instr = !cast("PseudoVLE1_V_"#mti.BX); - defvar store_instr = !cast("PseudoVSE1_V_"#mti.BX); + defvar load_instr = !cast("PseudoVLM_V_"#mti.BX); + defvar store_instr = !cast("PseudoVSM_V_"#mti.BX); def : Pat<(mti.Mask (riscv_vle_vl BaseAddr:$rs1, VLOpFrag)), (load_instr BaseAddr:$rs1, GPR:$vl, mti.Log2SEW)>; def : Pat<(riscv_vse_vl (mti.Mask VR:$rs2), BaseAddr:$rs1, diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -49,7 +49,7 @@ ; CHECK-LABEL: ret_mask_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %p ret %v @@ -59,7 +59,7 @@ ; CHECK-LABEL: ret_mask_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %p ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: sextload_nxv1i1_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v8, v25, -1, v0 ; CHECK-NEXT: ret @@ -399,7 +399,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vand.vi v25, v8, 1 ; CHECK-NEXT: vmsne.vi v25, v25, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %y = trunc %x to store %y, * %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -54,7 +54,7 @@ ; CHECK-LABEL: ret_mask_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p ret <8 x i1> %v @@ -65,7 +65,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, zero, 32 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <32 x i1>, <32 x i1>* %p ret <32 x i1> %v @@ -561,7 +561,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: addi a0, sp, 152 -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vmxor.mm v0, v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -108,7 +108,7 @@ ; CHECK-LABEL: ret_mask_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p ret <8 x i1> %v @@ -119,29 +119,29 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a1, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; LMULMAX8-NEXT: vle1.v v0, (a0) +; LMULMAX8-NEXT: vlm.v v0, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_mask_v32i1: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, zero, 32 ; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; LMULMAX4-NEXT: vle1.v v0, (a0) +; LMULMAX4-NEXT: vlm.v v0, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_mask_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a1, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v0, (a0) +; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_mask_v32i1: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v0, (a0) +; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: addi a0, a0, 2 -; LMULMAX1-NEXT: vle1.v v8, (a0) +; LMULMAX1-NEXT: vlm.v v8, (a0) ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %p ret <32 x i1> %v @@ -1419,7 +1419,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: addi a0, sp, 152 -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ret <4 x i1> %10 @@ -1453,7 +1453,7 @@ ; LMULMAX8-NEXT: addi a5, zero, 5 ; LMULMAX8-NEXT: addi a6, zero, 6 ; LMULMAX8-NEXT: addi a7, zero, 7 -; LMULMAX8-NEXT: vse1.v v25, (a0) +; LMULMAX8-NEXT: vsm.v v25, (a0) ; LMULMAX8-NEXT: mv a0, zero ; LMULMAX8-NEXT: mv a1, zero ; LMULMAX8-NEXT: mv a2, zero @@ -1491,7 +1491,7 @@ ; LMULMAX4-NEXT: addi a5, zero, 5 ; LMULMAX4-NEXT: addi a6, zero, 6 ; LMULMAX4-NEXT: addi a7, zero, 7 -; LMULMAX4-NEXT: vse1.v v25, (a0) +; LMULMAX4-NEXT: vsm.v v25, (a0) ; LMULMAX4-NEXT: mv a0, zero ; LMULMAX4-NEXT: mv a1, zero ; LMULMAX4-NEXT: mv a2, zero @@ -1535,7 +1535,7 @@ ; LMULMAX2-NEXT: addi a5, zero, 5 ; LMULMAX2-NEXT: addi a6, zero, 6 ; LMULMAX2-NEXT: addi a7, zero, 7 -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: mv a0, zero ; LMULMAX2-NEXT: mv a1, zero ; LMULMAX2-NEXT: mv a2, zero @@ -1591,7 +1591,7 @@ ; LMULMAX1-NEXT: addi a5, zero, 5 ; LMULMAX1-NEXT: addi a6, zero, 6 ; LMULMAX1-NEXT: addi a7, zero, 7 -; LMULMAX1-NEXT: vse1.v v25, (a0) +; LMULMAX1-NEXT: vsm.v v25, (a0) ; LMULMAX1-NEXT: mv a0, zero ; LMULMAX1-NEXT: mv a1, zero ; LMULMAX1-NEXT: mv a2, zero diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: sextload_v2i1_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v8, v25, -1, v0 @@ -562,7 +562,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %y = trunc <2 x i8> %x to <2 x i1> store <2 x i1> %y, <2 x i1>* %z diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -214,17 +214,17 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) +; LMULMAX2-NEXT: vlm.v v25, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_0: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v25, (a0) +; LMULMAX1-NEXT: vlm.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0) @@ -237,21 +237,21 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) +; LMULMAX2-NEXT: vlm.v v25, (a0) ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; LMULMAX2-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v25, (a0) +; LMULMAX1-NEXT: vlm.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; LMULMAX1-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8) @@ -265,20 +265,20 @@ ; LMULMAX2-NEXT: addi a0, a0, 4 ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) +; LMULMAX2-NEXT: vlm.v v25, (a0) ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu ; LMULMAX2-NEXT: vslidedown.vi v25, v25, 2 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_48: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 6 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v25, (a0) +; LMULMAX1-NEXT: vlm.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48) @@ -290,7 +290,7 @@ ; CHECK-LABEL: extract_v8i1_nxv2i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) +; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y @@ -301,7 +301,7 @@ ; CHECK-LABEL: extract_v8i1_nxv64i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) +; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y @@ -314,7 +314,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v25, v0, 1 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 8) store <8 x i1> %c, <8 x i1>* %y @@ -327,7 +327,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vi v25, v0, 6 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 48) store <8 x i1> %c, <8 x i1>* %y @@ -340,7 +340,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v0, (a0) +; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; LMULMAX2-NEXT: vmv.v.i v25, 0 ; LMULMAX2-NEXT: vmerge.vim v25, v25, 1, v0 @@ -350,13 +350,13 @@ ; LMULMAX2-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_0: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v0, (a0) +; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; LMULMAX1-NEXT: vmv.v.i v25, 0 ; LMULMAX1-NEXT: vmerge.vim v25, v25, 1, v0 @@ -366,7 +366,7 @@ ; LMULMAX1-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0) @@ -379,7 +379,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v0, (a0) +; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v26, 0 ; LMULMAX2-NEXT: vmerge.vim v26, v26, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, mu @@ -394,13 +394,13 @@ ; LMULMAX2-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_2: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v0, (a0) +; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: vmv.v.i v25, 0 ; LMULMAX1-NEXT: vmerge.vim v25, v25, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, mu @@ -415,7 +415,7 @@ ; LMULMAX1-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2) @@ -429,7 +429,7 @@ ; LMULMAX2-NEXT: addi a0, a0, 4 ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v0, (a0) +; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v26, 0 ; LMULMAX2-NEXT: vmerge.vim v26, v26, 1, v0 ; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, mu @@ -444,14 +444,14 @@ ; LMULMAX2-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX2-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_42: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 4 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v0, (a0) +; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: vmv.v.i v25, 0 ; LMULMAX1-NEXT: vmerge.vim v25, v25, 1, v0 ; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, mu @@ -466,7 +466,7 @@ ; LMULMAX1-NEXT: vslideup.vi v26, v25, 0 ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; LMULMAX1-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX1-NEXT: vse1.v v25, (a1) +; LMULMAX1-NEXT: vsm.v v25, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42) @@ -486,7 +486,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y @@ -511,7 +511,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y @@ -530,7 +530,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y @@ -555,7 +555,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y @@ -581,7 +581,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 42) store <2 x i1> %c, <2 x i1>* %y @@ -606,7 +606,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1( %x, i64 26) store <2 x i1> %c, <2 x i1>* %y @@ -619,7 +619,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vi v25, v0, 2 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1( %x, i64 16) store <8 x i1> %c, <8 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -9,7 +9,7 @@ ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vmfeq.vv v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -25,7 +25,7 @@ ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vmfeq.vv v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = load <8 x half>, <8 x half>* %y @@ -50,7 +50,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -75,7 +75,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = load <4 x float>, <4 x float>* %y @@ -100,7 +100,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -125,7 +125,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = load <2 x double>, <2 x double>* %y @@ -141,7 +141,7 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vmflt.vv v25, v26, v28 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -157,7 +157,7 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vmflt.vv v25, v26, v28 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = load <16 x half>, <16 x half>* %y @@ -173,7 +173,7 @@ ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -189,7 +189,7 @@ ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = load <8 x float>, <8 x float>* %y @@ -214,7 +214,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -239,7 +239,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = load <4 x double>, <4 x double>* %y @@ -257,7 +257,7 @@ ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v8, v28 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = load <32 x half>, <32 x half>* %y @@ -274,7 +274,7 @@ ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = load <32 x half>, <32 x half>* %y @@ -291,7 +291,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v28, v8 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = load <16 x float>, <16 x float>* %y @@ -307,7 +307,7 @@ ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v28 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = load <16 x float>, <16 x float>* %y @@ -324,7 +324,7 @@ ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v28 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = load <8 x double>, <8 x double>* %y @@ -340,7 +340,7 @@ ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v28, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = load <8 x double>, <8 x double>* %y @@ -358,7 +358,7 @@ ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v16 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = load <64 x half>, <64 x half>* %y @@ -375,7 +375,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmflt.vv v25, v16, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = load <64 x half>, <64 x half>* %y @@ -394,7 +394,7 @@ ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 ; CHECK-NEXT: vmnor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = load <32 x float>, <32 x float>* %y @@ -411,7 +411,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmfeq.vv v25, v8, v16 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = load <32 x float>, <32 x float>* %y @@ -429,7 +429,7 @@ ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 ; CHECK-NEXT: vmor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = load <16 x double>, <16 x double>* %y @@ -445,7 +445,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmfne.vv v25, v8, v16 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = load <16 x double>, <16 x double>* %y @@ -472,7 +472,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = load <4 x half>, <4 x half>* %y @@ -499,7 +499,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = load <2 x half>, <2 x half>* %y @@ -514,7 +514,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -530,7 +530,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -555,7 +555,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -580,7 +580,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -605,7 +605,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -630,7 +630,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -646,7 +646,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmflt.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -662,7 +662,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmflt.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -678,7 +678,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfge.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -694,7 +694,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfge.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -719,7 +719,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -744,7 +744,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -762,7 +762,7 @@ ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -779,7 +779,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -796,7 +796,7 @@ ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -812,7 +812,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -829,7 +829,7 @@ ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -845,7 +845,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -863,7 +863,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v25, v8, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -880,7 +880,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -899,7 +899,7 @@ ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 ; CHECK-NEXT: vmnor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -916,7 +916,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -934,7 +934,7 @@ ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 ; CHECK-NEXT: vmor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -950,7 +950,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -978,7 +978,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = insertelement <4 x half> undef, half %y, i32 0 @@ -1006,7 +1006,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = insertelement <2 x half> undef, half %y, i32 0 @@ -1022,7 +1022,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1038,7 +1038,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x %b = insertelement <8 x half> undef, half %y, i32 0 @@ -1063,7 +1063,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1088,7 +1088,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x float>, <4 x float>* %x %b = insertelement <4 x float> undef, float %y, i32 0 @@ -1113,7 +1113,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1138,7 +1138,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x double>, <2 x double>* %x %b = insertelement <2 x double> undef, double %y, i32 0 @@ -1154,7 +1154,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfgt.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -1170,7 +1170,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfgt.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x %b = insertelement <16 x half> undef, half %y, i32 0 @@ -1186,7 +1186,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfle.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -1202,7 +1202,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfle.vf v25, v26, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x %b = insertelement <8 x float> undef, float %y, i32 0 @@ -1227,7 +1227,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -1252,7 +1252,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x double>, <4 x double>* %x %b = insertelement <4 x double> undef, double %y, i32 0 @@ -1270,7 +1270,7 @@ ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -1287,7 +1287,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x %b = insertelement <32 x half> undef, half %y, i32 0 @@ -1304,7 +1304,7 @@ ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -1320,7 +1320,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x %b = insertelement <16 x float> undef, float %y, i32 0 @@ -1337,7 +1337,7 @@ ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -1353,7 +1353,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x %b = insertelement <8 x double> undef, double %y, i32 0 @@ -1371,7 +1371,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v25, v8, fa0 ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -1388,7 +1388,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x %b = insertelement <64 x half> undef, half %y, i32 0 @@ -1407,7 +1407,7 @@ ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 ; CHECK-NEXT: vmnor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -1424,7 +1424,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x %b = insertelement <32 x float> undef, float %y, i32 0 @@ -1442,7 +1442,7 @@ ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 ; CHECK-NEXT: vmor.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -1458,7 +1458,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v25, v8, fa0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x %b = insertelement <16 x double> undef, double %y, i32 0 @@ -1486,7 +1486,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x half>, <4 x half>* %x %b = insertelement <4 x half> undef, half %y, i32 0 @@ -1514,7 +1514,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x half>, <2 x half>* %x %b = insertelement <2 x half> undef, half %y, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -317,25 +317,25 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) +; LMULMAX2-NEXT: vlm.v v25, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX2-NEXT: vle1.v v26, (a1) +; LMULMAX2-NEXT: vlm.v v26, (a1) ; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v25, v26, 0 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_0: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v25, (a0) +; LMULMAX1-NEXT: vlm.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vle1.v v26, (a1) +; LMULMAX1-NEXT: vlm.v v26, (a1) ; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; LMULMAX1-NEXT: vslideup.vi v25, v26, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vse1.v v25, (a0) +; LMULMAX1-NEXT: vsm.v v25, (a0) ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp @@ -349,26 +349,26 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) +; LMULMAX2-NEXT: vlm.v v25, (a0) ; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX2-NEXT: vle1.v v26, (a1) +; LMULMAX2-NEXT: vlm.v v26, (a1) ; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, mu ; LMULMAX2-NEXT: vslideup.vi v25, v26, 2 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 2 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vle1.v v25, (a0) +; LMULMAX1-NEXT: vlm.v v25, (a0) ; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; LMULMAX1-NEXT: vle1.v v26, (a1) +; LMULMAX1-NEXT: vlm.v v26, (a1) ; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; LMULMAX1-NEXT: vslideup.vi v25, v26, 0 ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; LMULMAX1-NEXT: vse1.v v25, (a0) +; LMULMAX1-NEXT: vsm.v v25, (a0) ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp @@ -381,9 +381,9 @@ ; CHECK-LABEL: insert_v8i1_v4i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v26, 0 ; CHECK-NEXT: vmerge.vim v26, v26, 1, v0 @@ -395,7 +395,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp @@ -408,9 +408,9 @@ ; CHECK-LABEL: insert_v8i1_v4i1_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.i v26, 0 ; CHECK-NEXT: vmerge.vim v26, v26, 1, v0 @@ -422,7 +422,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 4 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp @@ -461,7 +461,7 @@ ; CHECK-LABEL: insert_nxv2i1_v4i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.i v26, 0 ; CHECK-NEXT: vmerge.vim v26, v26, 1, v0 @@ -483,7 +483,7 @@ ; CHECK-LABEL: insert_nxv8i1_v4i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v25, 0 ; CHECK-NEXT: ret @@ -496,7 +496,7 @@ ; CHECK-LABEL: insert_nxv8i1_v8i1_16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu ; CHECK-NEXT: vslideup.vi v0, v25, 2 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -53,7 +53,7 @@ ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmslt.vv v25, v8, v28 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -70,7 +70,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmslt.vv v25, v8, v16 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -86,7 +86,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) ; CHECK-NEXT: vmsle.vv v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -102,7 +102,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) ; CHECK-NEXT: vmsle.vv v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = load <16 x i8>, <16 x i8>* %y @@ -119,7 +119,7 @@ ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vle8.v v28, (a1) ; CHECK-NEXT: vmsltu.vv v25, v28, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = load <32 x i8>, <32 x i8>* %y @@ -136,7 +136,7 @@ ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vmsltu.vv v25, v28, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = load <64 x i8>, <64 x i8>* %y @@ -153,7 +153,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmsleu.vv v25, v16, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = load <128 x i8>, <128 x i8>* %y @@ -169,7 +169,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vle8.v v26, (a1) ; CHECK-NEXT: vmsleu.vv v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = load <8 x i8>, <8 x i8>* %y @@ -184,7 +184,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmseq.vx v25, v25, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -201,7 +201,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vmsne.vx v25, v26, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -218,7 +218,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmsgt.vx v25, v28, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -235,7 +235,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v25, v8, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -252,7 +252,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 ; CHECK-NEXT: vmsle.vv v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -268,7 +268,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsle.vx v25, v25, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -285,7 +285,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vmsgtu.vx v25, v26, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -302,7 +302,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmsltu.vx v25, v28, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -320,7 +320,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v16, a1 ; CHECK-NEXT: vmsleu.vv v25, v16, v8 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -336,7 +336,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsleu.vx v25, v25, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -352,7 +352,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmseq.vx v25, v25, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -369,7 +369,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vmsne.vx v25, v26, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -386,7 +386,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmslt.vx v25, v28, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -403,7 +403,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v25, v8, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -419,7 +419,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsle.vx v25, v25, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -436,7 +436,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 ; CHECK-NEXT: vmsle.vv v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 %y, i32 0 @@ -453,7 +453,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vmsltu.vx v25, v26, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 %y, i32 0 @@ -470,7 +470,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmsgtu.vx v25, v28, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 %y, i32 0 @@ -487,7 +487,7 @@ ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v25, v8, a1 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 %y, i32 0 @@ -504,7 +504,7 @@ ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmv.v.x v26, a1 ; CHECK-NEXT: vmsleu.vv v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a2) +; CHECK-NEXT: vsm.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 %y, i32 0 @@ -520,7 +520,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmseq.vi v25, v25, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -537,7 +537,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 0, i32 0 @@ -554,7 +554,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmsgt.vx v25, v28, zero -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 0, i32 0 @@ -571,7 +571,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vi v25, v8, -1 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 0, i32 0 @@ -587,7 +587,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsgt.vi v25, v25, -1 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 0, i32 0 @@ -603,7 +603,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsle.vi v25, v25, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x %b = insertelement <16 x i8> undef, i8 0, i32 0 @@ -621,7 +621,7 @@ ; CHECK-NEXT: vle8.v v26, (a0) ; CHECK-NEXT: addi a0, zero, 5 ; CHECK-NEXT: vmsgtu.vx v25, v26, a0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x i8>, <32 x i8>* %x %b = insertelement <32 x i8> undef, i8 5, i32 0 @@ -638,7 +638,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu ; CHECK-NEXT: vle8.v v28, (a0) ; CHECK-NEXT: vmsleu.vi v25, v28, 4 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x %b = insertelement <64 x i8> undef, i8 5, i32 0 @@ -655,7 +655,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vi v25, v8, 4 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x %b = insertelement <128 x i8> undef, i8 5, i32 0 @@ -671,7 +671,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vle8.v v25, (a0) ; CHECK-NEXT: vmsleu.vi v25, v25, 5 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -767,10 +767,10 @@ ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV32-LMULMAX4-NEXT: addi a1, zero, 64 ; RV32-LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; RV32-LMULMAX4-NEXT: vle1.v v0, (a0) +; RV32-LMULMAX4-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1) ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1) -; RV32-LMULMAX4-NEXT: vle1.v v8, (a0) +; RV32-LMULMAX4-NEXT: vlm.v v8, (a0) ; RV32-LMULMAX4-NEXT: ret ; ; RV64-LMULMAX4-LABEL: buildvec_mask_optsize_v128i1: @@ -800,7 +800,7 @@ ; RV32-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV32-LMULMAX8-NEXT: addi a1, zero, 128 ; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; RV32-LMULMAX8-NEXT: vle1.v v0, (a0) +; RV32-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX8-NEXT: ret ; ; RV64-LMULMAX8-LABEL: buildvec_mask_optsize_v128i1: @@ -809,7 +809,7 @@ ; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV64-LMULMAX8-NEXT: addi a1, zero, 128 ; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; RV64-LMULMAX8-NEXT: vle1.v v0, (a0) +; RV64-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: load_store_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -17,7 +17,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <1 x i1>, <1 x i1>* %x store <1 x i1> %a, <1 x i1>* %y @@ -28,7 +28,7 @@ ; CHECK-LABEL: load_store_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -37,7 +37,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <2 x i1>, <2 x i1>* %x store <2 x i1> %a, <2 x i1>* %y @@ -48,7 +48,7 @@ ; CHECK-LABEL: load_store_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu @@ -57,7 +57,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <4 x i1>, <4 x i1>* %x store <4 x i1> %a, <4 x i1>* %y @@ -68,8 +68,8 @@ ; CHECK-LABEL: load_store_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x store <8 x i1> %a, <8 x i1>* %y @@ -80,8 +80,8 @@ ; CHECK-LABEL: load_store_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x store <16 x i1> %a, <16 x i1>* %y @@ -93,8 +93,8 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; LMULMAX2-NEXT: vle1.v v25, (a0) -; LMULMAX2-NEXT: vse1.v v25, (a1) +; LMULMAX2-NEXT: vlm.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: load_store_v32i1: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -8,10 +8,10 @@ ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmand.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -24,10 +24,10 @@ ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmor.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -41,10 +41,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, zero, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmxor.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y @@ -58,9 +58,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, zero, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vmnand.mm v25, v25, v25 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x %b = load <64 x i1>, <64 x i1>* %y @@ -73,10 +73,10 @@ ; CHECK-LABEL: andnot_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmandnot.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -90,10 +90,10 @@ ; CHECK-LABEL: ornot_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmornot.mm v25, v26, v25 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -108,10 +108,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, zero, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmxnor.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y @@ -125,10 +125,10 @@ ; CHECK-LABEL: nand_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmnand.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x %b = load <8 x i1>, <8 x i1>* %y @@ -142,10 +142,10 @@ ; CHECK-LABEL: nor_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmnor.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x %b = load <16 x i1>, <16 x i1>* %y @@ -160,10 +160,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, zero, 32 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vle1.v v26, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vlm.v v26, (a1) ; CHECK-NEXT: vmxnor.mm v25, v25, v26 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = load <32 x i1>, <32 x i1>* %x %b = load <32 x i1>, <32 x i1>* %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret store <1 x i1> , <1 x i1>* %x ret void @@ -36,7 +36,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret store <2 x i1> zeroinitializer, <2 x i1>* %x ret void @@ -57,7 +57,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <1 x i1> undef, i1 %y, i32 0 %b = shufflevector <1 x i1> %a, <1 x i1> undef, <1 x i32> zeroinitializer @@ -81,7 +81,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %c = icmp eq i32 %y, %z %a = insertelement <1 x i1> undef, i1 %c, i32 0 @@ -103,7 +103,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret store <4 x i1> , <4 x i1>* %x ret void @@ -124,7 +124,7 @@ ; CHECK-NEXT: vslideup.vi v26, v25, 0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v25, v26, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <4 x i1> undef, i1 %y, i32 0 %b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> zeroinitializer @@ -137,7 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmclr.m v25 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret store <8 x i1> zeroinitializer, <8 x i1>* %x ret void @@ -150,7 +150,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v25, a1 ; CHECK-NEXT: vmsne.vi v25, v25, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <8 x i1> undef, i1 %y, i32 0 %b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer @@ -163,7 +163,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmset.m v25 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret store <16 x i1> , <16 x i1>* %x ret void @@ -176,7 +176,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v25, a1 ; CHECK-NEXT: vmsne.vi v25, v25, 0 -; CHECK-NEXT: vse1.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a0) ; CHECK-NEXT: ret %a = insertelement <16 x i1> undef, i1 %y, i32 0 %b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer @@ -190,25 +190,25 @@ ; LMULMAX2-NEXT: addi a1, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; LMULMAX2-NEXT: vmclr.m v25 -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zeros_v32i1: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmclr.m v25 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_zeros_v32i1: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: vmclr.m v25 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret store <32 x i1> zeroinitializer, <32 x i1>* %x ret void @@ -222,7 +222,7 @@ ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmv.v.x v26, a1 ; LMULMAX2-NEXT: vmsne.vi v25, v26, 0 -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v32i1: @@ -232,8 +232,8 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1 ; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v32i1: @@ -243,8 +243,8 @@ ; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1 ; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret %a = insertelement <32 x i1> undef, i1 %y, i32 0 %b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer @@ -259,34 +259,34 @@ ; LMULMAX2-NEXT: addi a2, zero, 32 ; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu ; LMULMAX2-NEXT: vmset.m v25 -; LMULMAX2-NEXT: vse1.v v25, (a1) -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_ones_v64i1: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV32-NEXT: vmset.m v25 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 6 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a1, a0, 4 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a0, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_ones_v64i1: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: vmset.m v25 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 6 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a1, a0, 4 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a0, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret store <64 x i1> , <64 x i1>* %x ret void @@ -301,8 +301,8 @@ ; LMULMAX2-NEXT: vmv.v.x v26, a1 ; LMULMAX2-NEXT: vmsne.vi v25, v26, 0 ; LMULMAX2-NEXT: addi a1, a0, 4 -; LMULMAX2-NEXT: vse1.v v25, (a1) -; LMULMAX2-NEXT: vse1.v v25, (a0) +; LMULMAX2-NEXT: vsm.v v25, (a1) +; LMULMAX2-NEXT: vsm.v v25, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_v64i1: @@ -312,12 +312,12 @@ ; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1 ; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 6 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a1, a0, 4 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 -; LMULMAX1-RV32-NEXT: vse1.v v25, (a1) -; LMULMAX1-RV32-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a1) +; LMULMAX1-RV32-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v64i1: @@ -327,12 +327,12 @@ ; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1 ; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 6 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a1, a0, 4 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 -; LMULMAX1-RV64-NEXT: vse1.v v25, (a1) -; LMULMAX1-RV64-NEXT: vse1.v v25, (a0) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a1) +; LMULMAX1-RV64-NEXT: vsm.v v25, (a0) ; LMULMAX1-RV64-NEXT: ret %a = insertelement <64 x i1> undef, i1 %y, i32 0 %b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -85,7 +85,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a0, sp, 15 -; RV32-NEXT: vse1.v v25, (a0) +; RV32-NEXT: vsm.v v25, (a0) ; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: beqz a1, .LBB4_2 @@ -132,7 +132,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a0, sp, 15 -; RV64-NEXT: vse1.v v25, (a0) +; RV64-NEXT: vsm.v v25, (a0) ; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB4_2 @@ -185,7 +185,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a0, sp, 15 -; RV32-NEXT: vse1.v v25, (a0) +; RV32-NEXT: vsm.v v25, (a0) ; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu @@ -232,7 +232,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a0, sp, 15 -; RV64-NEXT: vse1.v v25, (a0) +; RV64-NEXT: vsm.v v25, (a0) ; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB5_2 @@ -285,7 +285,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a0, sp, 15 -; RV32-NEXT: vse1.v v25, (a0) +; RV32-NEXT: vsm.v v25, (a0) ; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB6_5 @@ -362,7 +362,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a0, sp, 15 -; RV64-NEXT: vse1.v v25, (a0) +; RV64-NEXT: vsm.v v25, (a0) ; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB6_5 @@ -445,7 +445,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a0, sp, 15 -; RV32-NEXT: vse1.v v25, (a0) +; RV32-NEXT: vsm.v v25, (a0) ; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB7_3 @@ -490,7 +490,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a0, sp, 15 -; RV64-NEXT: vse1.v v25, (a0) +; RV64-NEXT: vsm.v v25, (a0) ; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB7_3 @@ -544,7 +544,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a2, sp, 15 -; RV32-NEXT: vse1.v v25, (a2) +; RV32-NEXT: vsm.v v25, (a2) ; RV32-NEXT: lbu a2, 15(sp) ; RV32-NEXT: andi a3, a2, 1 ; RV32-NEXT: beqz a3, .LBB8_2 @@ -604,7 +604,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a2, sp, 15 -; RV64-NEXT: vse1.v v25, (a2) +; RV64-NEXT: vsm.v v25, (a2) ; RV64-NEXT: lbu a2, 15(sp) ; RV64-NEXT: andi a3, a2, 1 ; RV64-NEXT: beqz a3, .LBB8_2 @@ -672,7 +672,7 @@ ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 ; RV32-NEXT: addi a1, sp, 15 -; RV32-NEXT: vse1.v v25, (a1) +; RV32-NEXT: vsm.v v25, (a1) ; RV32-NEXT: lbu a1, 15(sp) ; RV32-NEXT: andi a2, a1, 1 ; RV32-NEXT: bnez a2, .LBB9_3 @@ -715,7 +715,7 @@ ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 ; RV64-NEXT: addi a1, sp, 15 -; RV64-NEXT: vse1.v v25, (a1) +; RV64-NEXT: vsm.v v25, (a1) ; RV64-NEXT: lbu a1, 15(sp) ; RV64-NEXT: andi a2, a1, 1 ; RV64-NEXT: bnez a2, .LBB9_3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -403,7 +403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, zero, 128 ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v26, (a0) +; CHECK-NEXT: vlm.v v26, (a0) ; CHECK-NEXT: addi a3, a1, -128 ; CHECK-NEXT: vmv1r.v v25, v0 ; CHECK-NEXT: mv a0, zero @@ -462,7 +462,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, zero, 128 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t ; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v25 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -7,7 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse32.v v26, (a3) @@ -24,7 +24,7 @@ ; CHECK-LABEL: vselect_vx_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle32.v v26, (a1) ; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0 ; CHECK-NEXT: vse32.v v26, (a3) @@ -42,7 +42,7 @@ ; CHECK-LABEL: vselect_vi_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmerge.vim v26, v26, -1, v0 ; CHECK-NEXT: vse32.v v26, (a2) @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse32.v v26, (a3) @@ -78,7 +78,7 @@ ; CHECK-LABEL: vselect_vx_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vfmerge.vfm v26, v26, fa0, v0 ; CHECK-NEXT: vse32.v v26, (a2) @@ -96,7 +96,7 @@ ; CHECK-LABEL: vselect_vfpzero_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmerge.vim v26, v26, 0, v0 ; CHECK-NEXT: vse32.v v26, (a2) @@ -115,7 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v26, (a0) -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse16.v v26, (a3) @@ -132,7 +132,7 @@ ; CHECK-LABEL: vselect_vx_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0 ; CHECK-NEXT: vse16.v v26, (a3) @@ -150,7 +150,7 @@ ; CHECK-LABEL: vselect_vi_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmerge.vim v26, v26, 4, v0 ; CHECK-NEXT: vse16.v v26, (a2) @@ -170,7 +170,7 @@ ; CHECK-NEXT: addi a4, zero, 32 ; CHECK-NEXT: vsetvli zero, a4, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vle1.v v0, (a2) +; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmerge.vvm v28, v8, v28, v0 ; CHECK-NEXT: vse16.v v28, (a3) @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, zero, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vfmerge.vfm v28, v28, fa0, v0 ; CHECK-NEXT: vse16.v v28, (a2) @@ -207,7 +207,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a3, zero, 32 ; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu -; CHECK-NEXT: vle1.v v0, (a1) +; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmerge.vim v28, v28, 0, v0 ; CHECK-NEXT: vse16.v v28, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll @@ -8,8 +8,8 @@ ; CHECK-LABEL: test_load_mask_64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -20,8 +20,8 @@ ; CHECK-LABEL: test_load_mask_32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -32,8 +32,8 @@ ; CHECK-LABEL: test_load_mask_16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -44,8 +44,8 @@ ; CHECK-LABEL: test_load_mask_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -56,8 +56,8 @@ ; CHECK-LABEL: test_load_mask_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -68,8 +68,8 @@ ; CHECK-LABEL: test_load_mask_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb @@ -80,8 +80,8 @@ ; CHECK-LABEL: test_load_mask_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) -; CHECK-NEXT: vse1.v v25, (a1) +; CHECK-NEXT: vlm.v v25, (a0) +; CHECK-NEXT: vsm.v v25, (a1) ; CHECK-NEXT: ret %a = load , * %pa store %a, * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll --- a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll @@ -93,7 +93,7 @@ ; CHECK-LABEL: unaligned_load_nxv1i1_a1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) +; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %ptr, align 1 ret %v diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -642,7 +642,7 @@ ; CHECK-NEXT: .LBB49_2: ; CHECK-NEXT: mv a4, zero ; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: sub a0, a1, a2 ; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv32.ll +++ /dev/null @@ -1,94 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare @llvm.riscv.vle1.nxv1i1(*, i32); - -define @intrinsic_vle1_v_nxv1i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv1i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv2i1(*, i32); - -define @intrinsic_vle1_v_nxv2i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv2i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv4i1(*, i32); - -define @intrinsic_vle1_v_nxv4i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv4i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv8i1(*, i32); - -define @intrinsic_vle1_v_nxv8i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv8i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv16i1(*, i32); - -define @intrinsic_vle1_v_nxv16i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv16i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv32i1(*, i32); - -define @intrinsic_vle1_v_nxv32i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv32i1(* %0, i32 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv64i1(*, i32); - -define @intrinsic_vle1_v_nxv64i1(* %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv64i1(* %0, i32 %1) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vle1-rv64.ll +++ /dev/null @@ -1,94 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare @llvm.riscv.vle1.nxv1i1(*, i64); - -define @intrinsic_vle1_v_nxv1i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv1i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv2i1(*, i64); - -define @intrinsic_vle1_v_nxv2i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv2i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv4i1(*, i64); - -define @intrinsic_vle1_v_nxv4i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv4i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv8i1(*, i64); - -define @intrinsic_vle1_v_nxv8i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv8i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv16i1(*, i64); - -define @intrinsic_vle1_v_nxv16i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv16i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv32i1(*, i64); - -define @intrinsic_vle1_v_nxv32i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv32i1(* %0, i64 %1) - ret %a -} - -declare @llvm.riscv.vle1.nxv64i1(*, i64); - -define @intrinsic_vle1_v_nxv64i1(* %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vle1_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vle1.v v0, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vle1.nxv64i1(* %0, i64 %1) - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlm-rv32.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.vlm.nxv1i1(*, i32); + +define @intrinsic_vlm_v_nxv1i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv1i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv2i1(*, i32); + +define @intrinsic_vlm_v_nxv2i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv2i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv4i1(*, i32); + +define @intrinsic_vlm_v_nxv4i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv4i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv8i1(*, i32); + +define @intrinsic_vlm_v_nxv8i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv8i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv16i1(*, i32); + +define @intrinsic_vlm_v_nxv16i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv16i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv32i1(*, i32); + +define @intrinsic_vlm_v_nxv32i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv32i1(* %0, i32 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv64i1(*, i32); + +define @intrinsic_vlm_v_nxv64i1(* %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv64i1(* %0, i32 %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlm-rv64.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare @llvm.riscv.vlm.nxv1i1(*, i64); + +define @intrinsic_vlm_v_nxv1i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv1i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv2i1(*, i64); + +define @intrinsic_vlm_v_nxv2i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv2i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv4i1(*, i64); + +define @intrinsic_vlm_v_nxv4i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv4i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv8i1(*, i64); + +define @intrinsic_vlm_v_nxv8i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv8i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv16i1(*, i64); + +define @intrinsic_vlm_v_nxv16i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv16i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv32i1(*, i64); + +define @intrinsic_vlm_v_nxv32i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv32i1(* %0, i64 %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv64i1(*, i64); + +define @intrinsic_vlm_v_nxv64i1(* %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv64i1(* %0, i64 %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv32.ll +++ /dev/null @@ -1,137 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare void @llvm.riscv.vse1.nxv1i1(, *, i32); - -define void @intrinsic_vse1_v_nxv1i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv1i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv2i1(, *, i32); - -define void @intrinsic_vse1_v_nxv2i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv2i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv4i1(, *, i32); - -define void @intrinsic_vse1_v_nxv4i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv4i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv8i1(, *, i32); - -define void @intrinsic_vse1_v_nxv8i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv8i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv16i1(, *, i32); - -define void @intrinsic_vse1_v_nxv16i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv16i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv32i1(, *, i32); - -define void @intrinsic_vse1_v_nxv32i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv32i1( %0, * %1, i32 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv64i1(, *, i32); - -define void @intrinsic_vse1_v_nxv64i1( %0, * %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv64i1( %0, * %1, i32 %2) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i16( - , - , - i32); - -; Make sure we can use the vsetvli from the producing instruction. -define void @test_vsetvli_i16( %0, %1, * %2, i32 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmseq.vv v25, v8, v9 -; CHECK-NEXT: vse1.v v25, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16( - %0, - %1, - i32 %3) - call void @llvm.riscv.vse1.nxv1i1( %a, * %2, i32 %3) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i32( - , - , - i32); - -define void @test_vsetvli_i32( %0, %1, * %2, i32 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v25, v8, v9 -; CHECK-NEXT: vse1.v v25, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32( - %0, - %1, - i32 %3) - call void @llvm.riscv.vse1.nxv1i1( %a, * %2, i32 %3) - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse1-rv64.ll +++ /dev/null @@ -1,137 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ -; RUN: < %s | FileCheck %s - -declare void @llvm.riscv.vse1.nxv1i1(, *, i64); - -define void @intrinsic_vse1_v_nxv1i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv1i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv2i1(, *, i64); - -define void @intrinsic_vse1_v_nxv2i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv2i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv4i1(, *, i64); - -define void @intrinsic_vse1_v_nxv4i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv4i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv8i1(, *, i64); - -define void @intrinsic_vse1_v_nxv8i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv8i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv16i1(, *, i64); - -define void @intrinsic_vse1_v_nxv16i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv16i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv32i1(, *, i64); - -define void @intrinsic_vse1_v_nxv32i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv32i1( %0, * %1, i64 %2) - ret void -} - -declare void @llvm.riscv.vse1.nxv64i1(, *, i64); - -define void @intrinsic_vse1_v_nxv64i1( %0, * %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vse1_v_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vse1.v v0, (a0) -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vse1.nxv64i1( %0, * %1, i64 %2) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i16( - , - , - i64); - -; Make sure we can use the vsetvli from the producing instruction. -define void @test_vsetvli_i16( %0, %1, * %2, i64 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i16: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vmseq.vv v25, v8, v9 -; CHECK-NEXT: vse1.v v25, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i16( - %0, - %1, - i64 %3) - call void @llvm.riscv.vse1.nxv1i1( %a, * %2, i64 %3) - ret void -} - -declare @llvm.riscv.vmseq.nxv1i32( - , - , - i64); - -define void @test_vsetvli_i32( %0, %1, * %2, i64 %3) nounwind { -; CHECK-LABEL: test_vsetvli_i32: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vmseq.vv v25, v8, v9 -; CHECK-NEXT: vse1.v v25, (a0) -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmseq.nxv1i32( - %0, - %1, - i64 %3) - call void @llvm.riscv.vse1.nxv1i1( %a, * %2, i64 %3) - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll @@ -435,7 +435,7 @@ ; CHECK-LABEL: vselect_legalize_regression: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vmand.mm v1, v0, v25 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a2, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll @@ -435,7 +435,7 @@ ; CHECK-LABEL: vselect_legalize_regression: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu -; CHECK-NEXT: vle1.v v25, (a0) +; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: vmand.mm v1, v0, v25 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a2, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm-rv32.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare void @llvm.riscv.vsm.nxv1i1(, *, i32); + +define void @intrinsic_vsm_v_nxv1i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv1i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv2i1(, *, i32); + +define void @intrinsic_vsm_v_nxv2i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv2i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv4i1(, *, i32); + +define void @intrinsic_vsm_v_nxv4i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv4i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv8i1(, *, i32); + +define void @intrinsic_vsm_v_nxv8i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv8i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv16i1(, *, i32); + +define void @intrinsic_vsm_v_nxv16i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv16i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv32i1(, *, i32); + +define void @intrinsic_vsm_v_nxv32i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv32i1( %0, * %1, i32 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv64i1(, *, i32); + +define void @intrinsic_vsm_v_nxv64i1( %0, * %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv64i1( %0, * %1, i32 %2) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i16( + , + , + i32); + +; Make sure we can use the vsetvli from the producing instruction. +define void @test_vsetvli_i16( %0, %1, * %2, i32 %3) nounwind { +; CHECK-LABEL: test_vsetvli_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vsm.v v25, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i16( + %0, + %1, + i32 %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i32 %3) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i32( + , + , + i32); + +define void @test_vsetvli_i32( %0, %1, * %2, i32 %3) nounwind { +; CHECK-LABEL: test_vsetvli_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vsm.v v25, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i32( + %0, + %1, + i32 %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i32 %3) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm-rv64.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: < %s | FileCheck %s + +declare void @llvm.riscv.vsm.nxv1i1(, *, i64); + +define void @intrinsic_vsm_v_nxv1i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv1i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv2i1(, *, i64); + +define void @intrinsic_vsm_v_nxv2i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv2i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv4i1(, *, i64); + +define void @intrinsic_vsm_v_nxv4i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv4i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv8i1(, *, i64); + +define void @intrinsic_vsm_v_nxv8i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv8i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv16i1(, *, i64); + +define void @intrinsic_vsm_v_nxv16i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv16i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv32i1(, *, i64); + +define void @intrinsic_vsm_v_nxv32i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv32i1( %0, * %1, i64 %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv64i1(, *, i64); + +define void @intrinsic_vsm_v_nxv64i1( %0, * %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv64i1( %0, * %1, i64 %2) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i16( + , + , + i64); + +; Make sure we can use the vsetvli from the producing instruction. +define void @test_vsetvli_i16( %0, %1, * %2, i64 %3) nounwind { +; CHECK-LABEL: test_vsetvli_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vsm.v v25, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i16( + %0, + %1, + i64 %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i64 %3) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i32( + , + , + i64); + +define void @test_vsetvli_i32( %0, %1, * %2, i64 %3) nounwind { +; CHECK-LABEL: test_vsetvli_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vmseq.vv v25, v8, v9 +; CHECK-NEXT: vsm.v v25, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i32( + %0, + %1, + i64 %3) + call void @llvm.riscv.vsm.nxv1i1( %a, * %2, i64 %3) + ret void +} diff --git a/llvm/test/MC/RISCV/rvv/aliases.s b/llvm/test/MC/RISCV/rvv/aliases.s --- a/llvm/test/MC/RISCV/rvv/aliases.s +++ b/llvm/test/MC/RISCV/rvv/aliases.s @@ -78,3 +78,9 @@ # ALIAS: vfabs.v v2, v1, v0.t # encoding: [0x57,0x91,0x10,0x28] # NO-ALIAS: vfsgnjx.vv v2, v1, v1, v0.t # encoding: [0x57,0x91,0x10,0x28] vfabs.v v2, v1, v0.t +# ALIAS: vlm.v v8, (a0) # encoding: [0x07,0x04,0xb5,0x02] +# NO-ALIAS: vlm.v v8, (a0) # encoding: [0x07,0x04,0xb5,0x02] +vle1.v v8, (a0) +# ALIAS: vsm.v v8, (a0) # encoding: [0x27,0x04,0xb5,0x02] +# NO-ALIAS: vsm.v v8, (a0) # encoding: [0x27,0x04,0xb5,0x02] +vse1.v v8, (a0) diff --git a/llvm/test/MC/RISCV/rvv/load.s b/llvm/test/MC/RISCV/rvv/load.s --- a/llvm/test/MC/RISCV/rvv/load.s +++ b/llvm/test/MC/RISCV/rvv/load.s @@ -8,14 +8,14 @@ # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN -vle1.v v0, (a0) -# CHECK-INST: vle1.v v0, (a0) +vlm.v v0, (a0) +# CHECK-INST: vlm.v v0, (a0) # CHECK-ENCODING: [0x07,0x00,0xb5,0x02] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 07 00 b5 02 -vle1.v v8, (a0) -# CHECK-INST: vle1.v v8, (a0) +vlm.v v8, (a0) +# CHECK-INST: vlm.v v8, (a0) # CHECK-ENCODING: [0x07,0x04,0xb5,0x02] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 07 04 b5 02 diff --git a/llvm/test/MC/RISCV/rvv/store.s b/llvm/test/MC/RISCV/rvv/store.s --- a/llvm/test/MC/RISCV/rvv/store.s +++ b/llvm/test/MC/RISCV/rvv/store.s @@ -8,8 +8,8 @@ # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \ # RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN -vse1.v v24, (a0) -# CHECK-INST: vse1.v v24, (a0) +vsm.v v24, (a0) +# CHECK-INST: vsm.v v24, (a0) # CHECK-ENCODING: [0x27,0x0c,0xb5,0x02] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 27 0c b5 02