diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1916,11 +1916,11 @@ // 16.1. Vector Mask-Register Logical Instructions def vmand : RVVMaskBinBuiltin; def vmnand : RVVMaskBinBuiltin; -def vmandnot : RVVMaskBinBuiltin; +def vmandn : RVVMaskBinBuiltin; def vmxor : RVVMaskBinBuiltin; def vmor : RVVMaskBinBuiltin; def vmnor : RVVMaskBinBuiltin; -def vmornot : RVVMaskBinBuiltin; +def vmorn : RVVMaskBinBuiltin; def vmxnor : RVVMaskBinBuiltin; // pseudoinstructions def vmclr : RVVMaskNullaryBuiltin; @@ -1929,8 +1929,8 @@ defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; let HasPolicy = false in { -// 16.2. Vector mask population count vpopc -def vpopc : RVVMaskOp0Builtin<"um">; +// 16.2. Vector count population in mask vcpop.m +def vcpop : RVVMaskOp0Builtin<"um">; // 16.3. vfirst find-first-set mask bit def vfirst : RVVMaskOp0Builtin<"lm">; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcpop.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcpop.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vcpop.c @@ -0,0 +1,131 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vcpop_m_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { + return vcpop(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vcpop(mask, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c @@ -67,65 +67,65 @@ return vmand(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b1( +// CHECK-RV64-LABEL: @test_vmandn_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b2( +// CHECK-RV64-LABEL: @test_vmandn_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b4( +// CHECK-RV64-LABEL: @test_vmandn_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b8( +// CHECK-RV64-LABEL: @test_vmandn_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b16( +// CHECK-RV64-LABEL: @test_vmandn_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b32( +// CHECK-RV64-LABEL: @test_vmandn_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { + return vmandn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b64( +// CHECK-RV64-LABEL: @test_vmandn_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmandnot(op1, op2, vl); +vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { + return vmandn(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c @@ -67,65 +67,65 @@ return vmor(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b1( +// CHECK-RV64-LABEL: @test_vmorn_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b2( +// CHECK-RV64-LABEL: @test_vmorn_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b4( +// CHECK-RV64-LABEL: @test_vmorn_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b8( +// CHECK-RV64-LABEL: @test_vmorn_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b16( +// CHECK-RV64-LABEL: @test_vmorn_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b32( +// CHECK-RV64-LABEL: @test_vmorn_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { + return vmorn(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b64( +// CHECK-RV64-LABEL: @test_vmorn_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmornot(op1, op2, vl); +vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { + return vmorn(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c +++ /dev/null @@ -1,131 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: @test_vpopc_m_b1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) { - return vpopc(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vpopc(mask, op1, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcpop.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcpop.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcpop.c @@ -0,0 +1,131 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vcpop_m_b1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) { + return vcpop_m_b1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) { + return vcpop_m_b2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) { + return vcpop_m_b4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) { + return vcpop_m_b8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) { + return vcpop_m_b16(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) { + return vcpop_m_b32(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) { + return vcpop_m_b64(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { + return vcpop_m_b1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { + return vcpop_m_b2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { + return vcpop_m_b4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { + return vcpop_m_b8_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { + return vcpop_m_b16_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { + return vcpop_m_b32_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vcpop_m_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { + return vcpop_m_b64_m(mask, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c @@ -67,65 +67,65 @@ return vmand_mm_b64(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b1( +// CHECK-RV64-LABEL: @test_vmandn_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmandnot_mm_b1(op1, op2, vl); +vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { + return vmandn_mm_b1(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b2( +// CHECK-RV64-LABEL: @test_vmandn_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmandnot_mm_b2(op1, op2, vl); +vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { + return vmandn_mm_b2(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b4( +// CHECK-RV64-LABEL: @test_vmandn_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmandnot_mm_b4(op1, op2, vl); +vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { + return vmandn_mm_b4(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b8( +// CHECK-RV64-LABEL: @test_vmandn_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmandnot_mm_b8(op1, op2, vl); +vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { + return vmandn_mm_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b16( +// CHECK-RV64-LABEL: @test_vmandn_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmandnot_mm_b16(op1, op2, vl); +vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { + return vmandn_mm_b16(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b32( +// CHECK-RV64-LABEL: @test_vmandn_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmandnot_mm_b32(op1, op2, vl); +vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { + return vmandn_mm_b32(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmandnot_mm_b64( +// CHECK-RV64-LABEL: @test_vmandn_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandn.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmandnot_mm_b64(op1, op2, vl); +vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { + return vmandn_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c @@ -67,65 +67,65 @@ return vmor_mm_b64(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b1( +// CHECK-RV64-LABEL: @test_vmorn_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { - return vmornot_mm_b1(op1, op2, vl); +vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { + return vmorn_mm_b1(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b2( +// CHECK-RV64-LABEL: @test_vmorn_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { - return vmornot_mm_b2(op1, op2, vl); +vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { + return vmorn_mm_b2(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b4( +// CHECK-RV64-LABEL: @test_vmorn_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { - return vmornot_mm_b4(op1, op2, vl); +vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { + return vmorn_mm_b4(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b8( +// CHECK-RV64-LABEL: @test_vmorn_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { - return vmornot_mm_b8(op1, op2, vl); +vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { + return vmorn_mm_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b16( +// CHECK-RV64-LABEL: @test_vmorn_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { - return vmornot_mm_b16(op1, op2, vl); +vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { + return vmorn_mm_b16(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b32( +// CHECK-RV64-LABEL: @test_vmorn_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { - return vmornot_mm_b32(op1, op2, vl); +vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { + return vmorn_mm_b32(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmornot_mm_b64( +// CHECK-RV64-LABEL: @test_vmorn_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmorn.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { - return vmornot_mm_b64(op1, op2, vl); +vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { + return vmorn_mm_b64(op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c +++ /dev/null @@ -1,131 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: @test_vpopc_m_b1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) { - return vpopc_m_b1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) { - return vpopc_m_b2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) { - return vpopc_m_b4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) { - return vpopc_m_b8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) { - return vpopc_m_b16(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) { - return vpopc_m_b32(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) { - return vpopc_m_b64(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return vpopc_m_b1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return vpopc_m_b2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return vpopc_m_b4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return vpopc_m_b8_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return vpopc_m_b16_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return vpopc_m_b32_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vpopc_m_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return vpopc_m_b64_m(mask, op1, vl); -} diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1194,16 +1194,16 @@ def int_riscv_vmand: RISCVBinaryAAANoMask; def int_riscv_vmnand: RISCVBinaryAAANoMask; - def int_riscv_vmandnot: RISCVBinaryAAANoMask; + def int_riscv_vmandn: RISCVBinaryAAANoMask; def int_riscv_vmxor: RISCVBinaryAAANoMask; def int_riscv_vmor: RISCVBinaryAAANoMask; def int_riscv_vmnor: RISCVBinaryAAANoMask; - def int_riscv_vmornot: RISCVBinaryAAANoMask; + def int_riscv_vmorn: RISCVBinaryAAANoMask; def int_riscv_vmxnor: RISCVBinaryAAANoMask; def int_riscv_vmclr : RISCVNullaryIntrinsic; def int_riscv_vmset : RISCVNullaryIntrinsic; - defm vpopc : RISCVMaskUnarySOut; + defm vcpop : RISCVMaskUnarySOut; defm vfirst : RISCVMaskUnarySOut; defm vmsbf : RISCVMaskUnaryMOut; defm vmsof : RISCVMaskUnaryMOut; diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -2377,7 +2377,7 @@ // masked va >= x, vd == v0 // // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt - // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt + // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt assert(Inst.getOperand(0).getReg() == RISCV::V0 && "The destination register should be V0."); assert(Inst.getOperand(1).getReg() != RISCV::V0 && @@ -2387,7 +2387,7 @@ .addOperand(Inst.getOperand(2)) .addOperand(Inst.getOperand(3)) .addOperand(Inst.getOperand(4))); - emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM) + emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM) .addOperand(Inst.getOperand(0)) .addOperand(Inst.getOperand(0)) .addOperand(Inst.getOperand(1))); @@ -2395,7 +2395,7 @@ // masked va >= x, any vd // // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt - // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vt, v0, vt; vmandnot.mm vd, + // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vt, v0, vt; vmandn.mm vd, // vd, v0; vmor.mm vd, vt, vd assert(Inst.getOperand(1).getReg() != RISCV::V0 && "The temporary vector register should not be V0."); @@ -2404,11 +2404,11 @@ .addOperand(Inst.getOperand(2)) .addOperand(Inst.getOperand(3)) .addReg(RISCV::NoRegister)); - emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM) + emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM) .addOperand(Inst.getOperand(1)) .addReg(RISCV::V0) .addOperand(Inst.getOperand(1))); - emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM) + emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM) .addOperand(Inst.getOperand(0)) .addOperand(Inst.getOperand(0)) .addReg(RISCV::V0)); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -808,7 +808,7 @@ } bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; MVT Src1VT = Src1.getSimpleValueType(); - unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode; + unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode; switch (RISCVTargetLowering::getLMUL(Src1VT)) { default: llvm_unreachable("Unexpected LMUL!"); @@ -861,31 +861,31 @@ llvm_unreachable("Unexpected LMUL!"); case RISCVII::VLMUL::LMUL_F8: VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8; break; case RISCVII::VLMUL::LMUL_F4: VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4; break; case RISCVII::VLMUL::LMUL_F2: VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2; break; case RISCVII::VLMUL::LMUL_1: VMXOROpcode = RISCV::PseudoVMXOR_MM_M1; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1; break; case RISCVII::VLMUL::LMUL_2: VMXOROpcode = RISCV::PseudoVMXOR_MM_M2; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2; break; case RISCVII::VLMUL::LMUL_4: VMXOROpcode = RISCV::PseudoVMXOR_MM_M4; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4; break; case RISCVII::VLMUL::LMUL_8: VMXOROpcode = RISCV::PseudoVMXOR_MM_M8; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8; + VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8; break; } SDValue SEW = CurDAG->getTargetConstant( @@ -896,13 +896,13 @@ SDValue MaskedOff = Node->getOperand(1); SDValue Mask = Node->getOperand(4); // If the MaskedOff value and the Mask are the same value use - // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt + // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt // This avoids needing to copy v0 to vd before starting the next sequence. if (Mask == MaskedOff) { SDValue Cmp = SDValue( CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), 0); - ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT, + ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT, {Mask, Cmp, VL, MaskSEW})); return; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -263,8 +263,8 @@ VSEXT_VL, VZEXT_VL, - // vpopc.m with additional mask and VL operands. - VPOPC_VL, + // vcpop.m with additional mask and VL operands. + VCPOP_VL, // Reads value of CSR. // The first operand is a chain pointer. The second specifies address of the diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4189,26 +4189,26 @@ llvm_unreachable("Unhandled reduction"); case ISD::VECREDUCE_AND: case ISD::VP_REDUCE_AND: { - // vpopc ~x == 0 + // vcpop ~x == 0 SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL); - Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); + Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); CC = ISD::SETEQ; BaseOpc = ISD::AND; break; } case ISD::VECREDUCE_OR: case ISD::VP_REDUCE_OR: - // vpopc x != 0 - Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); + // vcpop x != 0 + Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); CC = ISD::SETNE; BaseOpc = ISD::OR; break; case ISD::VECREDUCE_XOR: case ISD::VP_REDUCE_XOR: { - // ((vpopc x) & 1) != 0 + // ((vcpop x) & 1) != 0 SDValue One = DAG.getConstant(1, DL, XLenVT); - Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); + Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One); CC = ISD::SETNE; BaseOpc = ISD::XOR; @@ -4223,7 +4223,7 @@ // Now include the start value in the operation. // Note that we must return the start value when no elements are operated - // upon. The vpopc instructions we've emitted in each case above will return + // upon. The vcpop instructions we've emitted in each case above will return // 0 for an inactive vector, and so we've already received the neutral value: // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we // can simply include the start value. @@ -9273,7 +9273,7 @@ NODE_NAME_CASE(VRGATHEREI16_VV_VL) NODE_NAME_CASE(VSEXT_VL) NODE_NAME_CASE(VZEXT_VL) - NODE_NAME_CASE(VPOPC_VL) + NODE_NAME_CASE(VCPOP_VL) NODE_NAME_CASE(VLE_VL) NODE_NAME_CASE(VSE_VL) NODE_NAME_CASE(READ_CSR) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -1361,11 +1361,11 @@ let RVVConstraint = NoConstraint in { defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">; defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">; -defm VMANDNOT_M : VMALU_MV_Mask<"vmandnot", 0b011000, "m">; +defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">; defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">; defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">; defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">; -defm VMORNOT_M : VMALU_MV_Mask<"vmornot", 0b011100, "m">; +defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">; defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">; } @@ -1378,13 +1378,18 @@ def : InstAlias<"vmnot.m $vd, $vs", (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>; +def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1", + (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; +def : InstAlias<"vmornot.mm $vd, $vs2, $vs1", + (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>; + let hasSideEffects = 0, mayLoad = 0, mayStore = 0, RVVConstraint = NoConstraint in { -// Vector mask population count vpopc -def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), +// Vector mask population count vcpop +def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), (ins VR:$vs2, VMaskOp:$vm), - "vpopc.m", "$vd, $vs2$vm">, + "vcpop.m", "$vd, $vs2$vm">, Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>; // vfirst find-first-set mask bit @@ -1395,6 +1400,9 @@ } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 +def : InstAlias<"vpopc.m $vd, $vs2$vm", + (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>; + let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in { // vmsbf.m set-before-first mask bit diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -4007,11 +4007,11 @@ defm PseudoVMAND: VPseudoBinaryM_MM; defm PseudoVMNAND: VPseudoBinaryM_MM; -defm PseudoVMANDNOT: VPseudoBinaryM_MM; +defm PseudoVMANDN: VPseudoBinaryM_MM; defm PseudoVMXOR: VPseudoBinaryM_MM; defm PseudoVMOR: VPseudoBinaryM_MM; defm PseudoVMNOR: VPseudoBinaryM_MM; -defm PseudoVMORNOT: VPseudoBinaryM_MM; +defm PseudoVMORN: VPseudoBinaryM_MM; defm PseudoVMXNOR: VPseudoBinaryM_MM; // Pseudo instructions @@ -4019,10 +4019,10 @@ defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">; //===----------------------------------------------------------------------===// -// 16.2. Vector mask population count vpopc +// 16.2. Vector mask population count vcpop //===----------------------------------------------------------------------===// -defm PseudoVPOPC: VPseudoUnaryS_M; +defm PseudoVCPOP: VPseudoUnaryS_M; //===----------------------------------------------------------------------===// // 16.3. vfirst find-first-set mask bit @@ -4676,11 +4676,11 @@ //===----------------------------------------------------------------------===// defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">; defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">; -defm : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">; +defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">; defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">; defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">; defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">; -defm : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">; +defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">; defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; // pseudo instructions @@ -4688,9 +4688,9 @@ defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">; //===----------------------------------------------------------------------===// -// 16.2. Vector mask population count vpopc +// 16.2. Vector count population in mask vcpop.m //===----------------------------------------------------------------------===// -defm : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">; +defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">; //===----------------------------------------------------------------------===// // 16.3. vfirst find-first-set mask bit diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -561,10 +561,10 @@ VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))), - (!cast("PseudoVMANDNOT_MM_"#mti.LMul.MX) + (!cast("PseudoVMANDN_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))), - (!cast("PseudoVMORNOT_MM_"#mti.LMul.MX) + (!cast("PseudoVMORN_MM_"#mti.LMul.MX) VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>; // Handle rvv_vnot the same as the vmnot.m pseudoinstruction. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -199,7 +199,7 @@ def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; -def riscv_vpopc_vl : SDNode<"RISCVISD::VPOPC_VL", +def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL", SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, SDTCisVec<1>, SDTCisInt<1>, SDTCVecEltisVT<2, i1>, @@ -1233,12 +1233,12 @@ def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMANDNOT_MM_" # mti.LMul.MX) + (!cast("PseudoVMANDN_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), - (!cast("PseudoVMORNOT_MM_" # mti.LMul.MX) + (!cast("PseudoVMORN_MM_" # mti.LMul.MX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; // XOR is associative so we need 2 patterns for VMXNOR. def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, @@ -1268,14 +1268,14 @@ (!cast("PseudoVMNAND_MM_" # mti.LMul.MX) VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; - // 16.2 Vector Mask Population Count vpopc - def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), + // 16.2 Vector count population in mask vcpop.m + def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask), VLOpFrag)), - (!cast("PseudoVPOPC_M_" # mti.BX) + (!cast("PseudoVCPOP_M_" # mti.BX) VR:$rs2, GPR:$vl, mti.Log2SEW)>; - def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask V0), + def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0), VLOpFrag)), - (!cast("PseudoVPOPC_M_" # mti.BX # "_MASK") + (!cast("PseudoVCPOP_M_" # mti.BX # "_MASK") VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>; } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -75,7 +75,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) -; CHECK-NEXT: vmandnot.mm v8, v9, v8 +; CHECK-NEXT: vmandn.mm v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x i1>, <8 x i1>* %x @@ -92,7 +92,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) -; CHECK-NEXT: vmornot.mm v8, v9, v8 +; CHECK-NEXT: vmorn.mm v8, v9, v8 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %a = load <16 x i1>, <16 x i1>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -29,7 +29,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -47,7 +47,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -64,7 +64,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -81,7 +81,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -99,7 +99,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -116,7 +116,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -133,7 +133,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -151,7 +151,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -168,7 +168,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -185,7 +185,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -203,7 +203,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -220,7 +220,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -237,7 +237,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -255,7 +255,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -134,7 +134,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -58,7 +58,7 @@ ; CHECK-LABEL: vreduce_or_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ ; CHECK-LABEL: vreduce_xor_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -87,7 +87,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ ; CHECK-LABEL: vreduce_or_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -115,7 +115,7 @@ ; CHECK-LABEL: vreduce_xor_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -130,7 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -144,7 +144,7 @@ ; CHECK-LABEL: vreduce_or_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ ; CHECK-LABEL: vreduce_xor_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -173,7 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ ; CHECK-LABEL: vreduce_or_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -201,7 +201,7 @@ ; CHECK-LABEL: vreduce_xor_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vmor.mm v8, v0, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -240,7 +240,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a0, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; LMULMAX8-NEXT: vpopc.m a0, v0 +; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret @@ -255,7 +255,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vmxor.mm v8, v0, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -264,7 +264,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a0, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; LMULMAX8-NEXT: vpopc.m a0, v0 +; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret @@ -279,7 +279,7 @@ ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-NEXT: vmnand.mm v8, v0, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -289,7 +289,7 @@ ; LMULMAX8-NEXT: addi a0, zero, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 -; LMULMAX8-NEXT: vpopc.m a0, v8 +; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret @@ -306,7 +306,7 @@ ; LMULMAX1-NEXT: vmor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmor.mm v8, v9, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -315,7 +315,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a0, zero, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; LMULMAX8-NEXT: vpopc.m a0, v0 +; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: snez a0, a0 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret @@ -332,7 +332,7 @@ ; LMULMAX1-NEXT: vmxor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmxor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmxor.mm v8, v9, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -341,7 +341,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a0, zero, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; LMULMAX8-NEXT: vpopc.m a0, v0 +; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret @@ -358,7 +358,7 @@ ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 ; LMULMAX1-NEXT: vmand.mm v9, v0, v9 ; LMULMAX1-NEXT: vmnand.mm v8, v9, v8 -; LMULMAX1-NEXT: vpopc.m a0, v8 +; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 ; LMULMAX1-NEXT: neg a0, a0 ; LMULMAX1-NEXT: ret @@ -368,7 +368,7 @@ ; LMULMAX8-NEXT: addi a0, zero, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 -; LMULMAX8-NEXT: vpopc.m a0, v8 +; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 ; LMULMAX8-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -225,7 +225,7 @@ ; CHECK-LABEL: vselect_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ ; CHECK-LABEL: vselect_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -249,7 +249,7 @@ ; CHECK-LABEL: vselect_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -261,7 +261,7 @@ ; CHECK-LABEL: vselect_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -274,7 +274,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, zero, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, zero, 64 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/select-int.ll b/llvm/test/CodeGen/RISCV/rvv/select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-int.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -88,7 +88,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -119,7 +119,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -134,7 +134,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v9, v10, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v9, v10, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -196,7 +196,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v9, v16, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -212,7 +212,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v9, v16, 0 -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll rename from llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll rename to llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv32.ll @@ -1,39 +1,39 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare i32 @llvm.riscv.vpopc.i32.nxv1i1( +declare i32 @llvm.riscv.vcpop.i32.nxv1i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv1i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv1i1: +define i32 @intrinsic_vcpop_m_i32_nxv1i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv1i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv1i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv1i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1( %0, %1, i32 %2) @@ -41,39 +41,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv2i1( +declare i32 @llvm.riscv.vcpop.i32.nxv2i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv2i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv2i1: +define i32 @intrinsic_vcpop_m_i32_nxv2i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv2i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv2i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv2i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1( %0, %1, i32 %2) @@ -81,39 +81,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv4i1( +declare i32 @llvm.riscv.vcpop.i32.nxv4i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv4i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv4i1: +define i32 @intrinsic_vcpop_m_i32_nxv4i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv4i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv4i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv4i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1( %0, %1, i32 %2) @@ -121,39 +121,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv8i1( +declare i32 @llvm.riscv.vcpop.i32.nxv8i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv8i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv8i1: +define i32 @intrinsic_vcpop_m_i32_nxv8i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv8i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv8i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv8i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1( %0, %1, i32 %2) @@ -161,39 +161,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv16i1( +declare i32 @llvm.riscv.vcpop.i32.nxv16i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv16i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv16i1: +define i32 @intrinsic_vcpop_m_i32_nxv16i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv16i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv16i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv16i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1( %0, %1, i32 %2) @@ -201,39 +201,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv32i1( +declare i32 @llvm.riscv.vcpop.i32.nxv32i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv32i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv32i1: +define i32 @intrinsic_vcpop_m_i32_nxv32i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv32i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv32i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv32i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv32i1( %0, %1, i32 %2) @@ -241,39 +241,39 @@ ret i32 %a } -declare i32 @llvm.riscv.vpopc.i32.nxv64i1( +declare i32 @llvm.riscv.vcpop.i32.nxv64i1( , i32); -define i32 @intrinsic_vpopc_m_i32_nxv64i1( %0, i32 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv64i1: +define i32 @intrinsic_vcpop_m_i32_nxv64i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1( + %a = call i32 @llvm.riscv.vcpop.i32.nxv64i1( %0, i32 %1) ret i32 %a } -declare i32 @llvm.riscv.vpopc.mask.i32.nxv64i1( +declare i32 @llvm.riscv.vcpop.mask.i32.nxv64i1( , , i32); -define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv64i1: +define i32 @intrinsic_vcpop_mask_m_i32_nxv64i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1( + %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv64i1( %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll rename from llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll rename to llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop-rv64.ll @@ -1,39 +1,39 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare i64 @llvm.riscv.vpopc.i64.nxv1i1( +declare i64 @llvm.riscv.vcpop.i64.nxv1i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv1i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1: +define i64 @intrinsic_vcpop_m_i64_nxv1i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv1i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv1i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv1i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1( %0, %1, i64 %2) @@ -41,39 +41,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv2i1( +declare i64 @llvm.riscv.vcpop.i64.nxv2i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv2i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1: +define i64 @intrinsic_vcpop_m_i64_nxv2i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv2i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv2i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv2i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1( %0, %1, i64 %2) @@ -81,39 +81,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv4i1( +declare i64 @llvm.riscv.vcpop.i64.nxv4i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv4i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1: +define i64 @intrinsic_vcpop_m_i64_nxv4i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv4i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv4i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv4i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1( %0, %1, i64 %2) @@ -121,39 +121,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv8i1( +declare i64 @llvm.riscv.vcpop.i64.nxv8i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv8i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1: +define i64 @intrinsic_vcpop_m_i64_nxv8i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv8i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv8i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv8i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1( %0, %1, i64 %2) @@ -161,39 +161,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv16i1( +declare i64 @llvm.riscv.vcpop.i64.nxv16i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv16i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1: +define i64 @intrinsic_vcpop_m_i64_nxv16i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv16i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv16i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv16i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1( %0, %1, i64 %2) @@ -201,39 +201,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv32i1( +declare i64 @llvm.riscv.vcpop.i64.nxv32i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv32i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1: +define i64 @intrinsic_vcpop_m_i64_nxv32i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv32i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv32i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv32i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1( %0, %1, i64 %2) @@ -241,39 +241,39 @@ ret i64 %a } -declare i64 @llvm.riscv.vpopc.i64.nxv64i1( +declare i64 @llvm.riscv.vcpop.i64.nxv64i1( , i64); -define i64 @intrinsic_vpopc_m_i64_nxv64i1( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1: +define i64 @intrinsic_vcpop_m_i64_nxv64i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.i64.nxv64i1( + %a = call i64 @llvm.riscv.vcpop.i64.nxv64i1( %0, i64 %1) ret i64 %a } -declare i64 @llvm.riscv.vpopc.mask.i64.nxv64i1( +declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1( , , i64); -define i64 @intrinsic_vpopc_mask_m_i64_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1: +define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a0, v9, v0.t +; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret entry: - %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1( + %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1( %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv32.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmandn.nxv1i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv1i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv2i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv2i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv4i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv4i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv8i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv8i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv16i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv16i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv32i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv32i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv32i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv64i1( + , + , + i32); + +define @intrinsic_vmandn_mm_nxv64i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv64i1( + %0, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn-rv64.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmandn.nxv1i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv1i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv2i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv2i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv4i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv4i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv8i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv8i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv16i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv16i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv32i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv32i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv32i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmandn.nxv64i1( + , + , + i64); + +define @intrinsic_vmandn_mm_nxv64i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmandn.nxv64i1( + %0, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv32.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmandnot.nxv1i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv1i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv1i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv2i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv2i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv2i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv4i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv4i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv4i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv8i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv8i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv8i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv16i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv16i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv16i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv32i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv32i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv32i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv64i1( - , - , - i32); - -define @intrinsic_vmandnot_mm_nxv64i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv64i1( - %0, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmandnot-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmandnot.nxv1i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv2i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv4i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv8i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv16i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv32i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmandnot.nxv64i1( - , - , - i64); - -define @intrinsic_vmandnot_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmandnot.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll @@ -347,11 +347,11 @@ ret %not } -define @vmandnot_vv_nxv1i1( %va, %vb) { -; CHECK-LABEL: vmandnot_vv_nxv1i1: +define @vmandn_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmandn_vv_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -360,11 +360,11 @@ ret %vc } -define @vmandnot_vv_nxv2i1( %va, %vb) { -; CHECK-LABEL: vmandnot_vv_nxv2i1: +define @vmandn_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmandn_vv_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -373,11 +373,11 @@ ret %vc } -define @vmandnot_vv_nxv4i1( %va, %vb) { -; CHECK-LABEL: vmandnot_vv_nxv4i1: +define @vmandn_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmandn_vv_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -386,11 +386,11 @@ ret %vc } -define @vmandnot_vv_nxv8i1( %va, %vb) { -; CHECK-LABEL: vmandnot_vv_nxv8i1: +define @vmandn_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmandn_vv_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -399,11 +399,11 @@ ret %vc } -define @vmandnot_vv_nxv16i1( %va, %vb) { -; CHECK-LABEL: vmandnot_vv_nxv16i1: +define @vmandn_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmandn_vv_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -412,11 +412,11 @@ ret %vc } -define @vmornot_vv_nxv1i1( %va, %vb) { -; CHECK-LABEL: vmornot_vv_nxv1i1: +define @vmorn_vv_nxv1i1( %va, %vb) { +; CHECK-LABEL: vmorn_vv_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -425,11 +425,11 @@ ret %vc } -define @vmornot_vv_nxv2i1( %va, %vb) { -; CHECK-LABEL: vmornot_vv_nxv2i1: +define @vmorn_vv_nxv2i1( %va, %vb) { +; CHECK-LABEL: vmorn_vv_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -438,11 +438,11 @@ ret %vc } -define @vmornot_vv_nxv4i1( %va, %vb) { -; CHECK-LABEL: vmornot_vv_nxv4i1: +define @vmorn_vv_nxv4i1( %va, %vb) { +; CHECK-LABEL: vmorn_vv_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -451,11 +451,11 @@ ret %vc } -define @vmornot_vv_nxv8i1( %va, %vb) { -; CHECK-LABEL: vmornot_vv_nxv8i1: +define @vmorn_vv_nxv8i1( %va, %vb) { +; CHECK-LABEL: vmorn_vv_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -464,11 +464,11 @@ ret %vc } -define @vmornot_vv_nxv16i1( %va, %vb) { -; CHECK-LABEL: vmornot_vv_nxv16i1: +define @vmorn_vv_nxv16i1( %va, %vb) { +; CHECK-LABEL: vmorn_vv_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 +; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv32.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmorn.nxv1i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv1i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv2i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv2i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv4i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv4i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv8i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv8i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv16i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv16i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv32i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv32i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv32i1( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv64i1( + , + , + i32); + +define @intrinsic_vmorn_mm_nxv64i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv64i1( + %0, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn-rv64.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmorn.nxv1i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv1i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv2i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv2i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv4i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv4i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv8i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv8i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv16i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv16i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv32i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv32i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv32i1( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmorn.nxv64i1( + , + , + i64); + +define @intrinsic_vmorn_mm_nxv64i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmorn.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmorn.nxv64i1( + %0, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv32.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmornot.nxv1i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv1i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv1i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv2i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv2i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv2i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv4i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv4i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv4i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv8i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv8i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv8i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv16i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv16i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv16i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv32i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv32i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv32i1( - %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv64i1( - , - , - i32); - -define @intrinsic_vmornot_mm_nxv64i1( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv64i1( - %0, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll deleted file mode 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmornot-rv64.ll +++ /dev/null @@ -1,142 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ -; RUN: < %s | FileCheck %s -declare @llvm.riscv.vmornot.nxv1i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv1i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv1i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv2i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv2i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv2i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv4i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv4i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv4i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv8i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv8i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv8i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv16i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv16i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv16i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv32i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv32i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv32i1( - %0, - %1, - i64 %2) - - ret %a -} - -declare @llvm.riscv.vmornot.nxv64i1( - , - , - i64); - -define @intrinsic_vmornot_mm_nxv64i1( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu -; CHECK-NEXT: vmornot.mm v0, v0, v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vmornot.nxv64i1( - %0, - %1, - i64 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -2470,7 +2470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( @@ -2488,7 +2488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( @@ -2506,7 +2506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( @@ -2524,7 +2524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( @@ -2542,7 +2542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( @@ -2560,7 +2560,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( @@ -2578,7 +2578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( @@ -2596,7 +2596,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( @@ -2614,7 +2614,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( @@ -2632,7 +2632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( @@ -2650,7 +2650,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( @@ -2668,7 +2668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( @@ -2686,7 +2686,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( @@ -2704,7 +2704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( @@ -2722,7 +2722,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -2437,7 +2437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( @@ -2455,7 +2455,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( @@ -2473,7 +2473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( @@ -2491,7 +2491,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( @@ -2509,7 +2509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( @@ -2527,7 +2527,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( @@ -2545,7 +2545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( @@ -2563,7 +2563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( @@ -2599,7 +2599,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( @@ -2617,7 +2617,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( @@ -2635,7 +2635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( @@ -2653,7 +2653,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( @@ -2671,7 +2671,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( @@ -2689,7 +2689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( @@ -2725,7 +2725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( @@ -2743,7 +2743,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -2470,7 +2470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( @@ -2488,7 +2488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( @@ -2506,7 +2506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( @@ -2524,7 +2524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( @@ -2542,7 +2542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( @@ -2560,7 +2560,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( @@ -2578,7 +2578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( @@ -2596,7 +2596,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( @@ -2614,7 +2614,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( @@ -2632,7 +2632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( @@ -2650,7 +2650,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( @@ -2668,7 +2668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( @@ -2686,7 +2686,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( @@ -2704,7 +2704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( @@ -2722,7 +2722,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -2437,7 +2437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( @@ -2455,7 +2455,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( @@ -2473,7 +2473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( @@ -2491,7 +2491,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( @@ -2509,7 +2509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( @@ -2527,7 +2527,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( @@ -2545,7 +2545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( @@ -2563,7 +2563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( @@ -2599,7 +2599,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( @@ -2617,7 +2617,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( @@ -2635,7 +2635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( @@ -2653,7 +2653,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( @@ -2671,7 +2671,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( @@ -2689,7 +2689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v8 +; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( @@ -2725,7 +2725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v10 +; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( @@ -2743,7 +2743,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmandnot.mm v0, v0, v12 +; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -27,7 +27,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -45,7 +45,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -62,7 +62,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -79,7 +79,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -97,7 +97,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -114,7 +114,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -131,7 +131,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -149,7 +149,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -166,7 +166,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -183,7 +183,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -201,7 +201,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -218,7 +218,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -235,7 +235,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -253,7 +253,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -270,7 +270,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -287,7 +287,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -305,7 +305,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -322,7 +322,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmnand.mm v9, v0, v0 ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 @@ -339,7 +339,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 @@ -357,7 +357,7 @@ ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 -; CHECK-NEXT: vpopc.m a1, v9, v0.t +; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vreduce_or_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ ; CHECK-LABEL: vreduce_xor_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -37,7 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -51,7 +51,7 @@ ; CHECK-LABEL: vreduce_or_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -65,7 +65,7 @@ ; CHECK-LABEL: vreduce_xor_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -94,7 +94,7 @@ ; CHECK-LABEL: vreduce_or_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ ; CHECK-LABEL: vreduce_xor_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -123,7 +123,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -137,7 +137,7 @@ ; CHECK-LABEL: vreduce_or_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -151,7 +151,7 @@ ; CHECK-LABEL: vreduce_xor_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -166,7 +166,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ ; CHECK-LABEL: vreduce_or_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ ; CHECK-LABEL: vreduce_xor_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -209,7 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -223,7 +223,7 @@ ; CHECK-LABEL: vreduce_or_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ ; CHECK-LABEL: vreduce_xor_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -266,7 +266,7 @@ ; CHECK-LABEL: vreduce_or_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ ; CHECK-LABEL: vreduce_xor_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vpopc.m a0, v0 +; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu ; CHECK-NEXT: vmnand.mm v8, v0, v0 -; CHECK-NEXT: vpopc.m a0, v8 +; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: vselect_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ ; CHECK-LABEL: vselect_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -30,7 +30,7 @@ ; CHECK-LABEL: vselect_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ ; CHECK-LABEL: vselect_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -54,7 +54,7 @@ ; CHECK-LABEL: vselect_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ ; CHECK-LABEL: vselect_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret @@ -78,7 +78,7 @@ ; CHECK-LABEL: vselect_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vmandnot.mm v8, v8, v9 +; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -91,7 +91,7 @@ ret %d } - define void @vsetvli_vpopc() { + define void @vsetvli_vcpop() { ret void } @@ -442,7 +442,7 @@ ... --- -name: vsetvli_vpopc +name: vsetvli_vcpop tracksRegLiveness: true registers: - { id: 0, class: gpr, preferred-register: '' } @@ -458,7 +458,7 @@ - { id: 10, class: gpr, preferred-register: '' } - { id: 11, class: vr, preferred-register: '' } body: | - ; CHECK-LABEL: name: vsetvli_vpopc + ; CHECK-LABEL: name: vsetvli_vcpop ; CHECK: bb.0: ; CHECK-NEXT: successors: %bb.1(0x80000000) ; CHECK-NEXT: liveins: $x10, $x11 @@ -479,9 +479,9 @@ ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5, 0, implicit $vl, implicit $vtype ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69, implicit-def $vl, implicit-def $vtype, implicit $vl - ; CHECK-NEXT: [[PseudoVPOPC_M_B1_:%[0-9]+]]:gpr = PseudoVPOPC_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype + ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 - ; CHECK-NEXT: BEQ killed [[PseudoVPOPC_M_B1_]], [[COPY2]], %bb.3 + ; CHECK-NEXT: BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3 ; CHECK-NEXT: PseudoBR %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2: @@ -511,7 +511,7 @@ %5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5 $v0 = COPY %5 %6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0 - %7:gpr = PseudoVPOPC_M_B1 %5, -1, 0 + %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0 %8:gpr = COPY $x0 BEQ killed %7, %8, %bb.3 PseudoBR %bb.2 diff --git a/llvm/test/MC/RISCV/rvv/aliases.s b/llvm/test/MC/RISCV/rvv/aliases.s --- a/llvm/test/MC/RISCV/rvv/aliases.s +++ b/llvm/test/MC/RISCV/rvv/aliases.s @@ -90,3 +90,12 @@ # ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4] # NO-ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4] vfwredsum.vs v8, v4, v20, v0.t +# ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40] +# NO-ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40] +vpopc.m a2, v4, v0.t +# ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62] +# NO-ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62] +vmandnot.mm v8, v4, v20 +# ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72] +# NO-ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72] +vmornot.mm v8, v4, v20 diff --git a/llvm/test/MC/RISCV/rvv/compare.s b/llvm/test/MC/RISCV/rvv/compare.s --- a/llvm/test/MC/RISCV/rvv/compare.s +++ b/llvm/test/MC/RISCV/rvv/compare.s @@ -421,7 +421,7 @@ vmsgeu.vx v0, v4, a0, v0.t, v2 # CHECK-INST: vmsltu.vx v2, v4, a0, v0.t -# CHECK-INST: vmandnot.mm v0, v0, v2 +# CHECK-INST: vmandn.mm v0, v0, v2 # CHECK-ENCODING: [0x57,0x41,0x45,0x68] # CHECK-ENCODING: [0x57,0x20,0x01,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) @@ -430,7 +430,7 @@ vmsge.vx v0, v4, a0, v0.t, v2 # CHECK-INST: vmslt.vx v2, v4, a0, v0.t -# CHECK-INST: vmandnot.mm v0, v0, v2 +# CHECK-INST: vmandn.mm v0, v0, v2 # CHECK-ENCODING: [0x57,0x41,0x45,0x6c] # CHECK-ENCODING: [0x57,0x20,0x01,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) @@ -439,8 +439,8 @@ vmsgeu.vx v9, v4, a0, v0.t, v2 # CHECK-INST: vmsltu.vx v2, v4, a0 -# CHECK-INST: vmandnot.mm v2, v0, v2 -# CHECK-INST: vmandnot.mm v9, v9, v0 +# CHECK-INST: vmandn.mm v2, v0, v2 +# CHECK-INST: vmandn.mm v9, v9, v0 # CHECK-INST: vmor.mm v9, v2, v9 # CHECK-ENCODING: [0x57,0x41,0x45,0x6a] # CHECK-ENCODING: [0x57,0x21,0x01,0x62] @@ -454,8 +454,8 @@ vmsge.vx v8, v4, a0, v0.t, v2 # CHECK-INST: vmslt.vx v2, v4, a0 -# CHECK-INST: vmandnot.mm v2, v0, v2 -# CHECK-INST: vmandnot.mm v8, v8, v0 +# CHECK-INST: vmandn.mm v2, v0, v2 +# CHECK-INST: vmandn.mm v8, v8, v0 # CHECK-INST: vmor.mm v8, v2, v8 # CHECK-ENCODING: [0x57,0x41,0x45,0x6e] # CHECK-ENCODING: [0x57,0x21,0x01,0x62] diff --git a/llvm/test/MC/RISCV/rvv/mask.s b/llvm/test/MC/RISCV/rvv/mask.s --- a/llvm/test/MC/RISCV/rvv/mask.s +++ b/llvm/test/MC/RISCV/rvv/mask.s @@ -20,8 +20,8 @@ # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 24 4a 76 -vmandnot.mm v8, v4, v20 -# CHECK-INST: vmandnot.mm v8, v4, v20 +vmandn.mm v8, v4, v20 +# CHECK-INST: vmandn.mm v8, v4, v20 # CHECK-ENCODING: [0x57,0x24,0x4a,0x62] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 24 4a 62 @@ -44,8 +44,8 @@ # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 24 4a 7a -vmornot.mm v8, v4, v20 -# CHECK-INST: vmornot.mm v8, v4, v20 +vmorn.mm v8, v4, v20 +# CHECK-INST: vmorn.mm v8, v4, v20 # CHECK-ENCODING: [0x57,0x24,0x4a,0x72] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 24 4a 72 @@ -56,14 +56,14 @@ # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 24 4a 7e -vpopc.m a2, v4, v0.t -# CHECK-INST: vpopc.m a2, v4, v0.t +vcpop.m a2, v4, v0.t +# CHECK-INST: vcpop.m a2, v4, v0.t # CHECK-ENCODING: [0x57,0x26,0x48,0x40] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 26 48 40 -vpopc.m a2, v4 -# CHECK-INST: vpopc.m a2, v4 +vcpop.m a2, v4 +# CHECK-INST: vcpop.m a2, v4 # CHECK-ENCODING: [0x57,0x26,0x48,0x42] # CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions) # CHECK-UNKNOWN: 57 26 48 42