Please use GitHub pull requests for new patches. Avoid migrating existing patches. Phabricator shutdown timeline
Changeset View
Changeset View
Standalone View
Standalone View
clang/test/CodeGen/riscv-rvv-vls-compare-ops.c
- This file was added.
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py | |||||
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +zve64d \ | |||||
// RUN: -target-feature +f -target-feature +d -disable-O0-optnone \ | |||||
// RUN: -mvscale-min=4 -mvscale-max=4 -emit-llvm -o - %s | \ | |||||
// RUN: opt -S -passes=sroa | FileCheck %s | |||||
// REQUIRES: riscv-registered-target | |||||
#include <stdint.h> | |||||
typedef __rvv_int8m1_t vint8m1_t; | |||||
typedef __rvv_uint8m1_t vuint8m1_t; | |||||
typedef __rvv_int16m1_t vint16m1_t; | |||||
typedef __rvv_uint16m1_t vuint16m1_t; | |||||
typedef __rvv_int32m1_t vint32m1_t; | |||||
typedef __rvv_uint32m1_t vuint32m1_t; | |||||
typedef __rvv_int64m1_t vint64m1_t; | |||||
typedef __rvv_uint64m1_t vuint64m1_t; | |||||
typedef __rvv_float32m1_t vfloat32m1_t; | |||||
typedef __rvv_float64m1_t vfloat64m1_t; | |||||
typedef vint8m1_t fixed_int8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vint16m1_t fixed_int16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vuint8m1_t fixed_uint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vuint16m1_t fixed_uint16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vuint32m1_t fixed_uint32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vuint64m1_t fixed_uint64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vfloat32m1_t fixed_float32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); | |||||
// EQ | |||||
// CHECK-LABEL: @eq_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t eq_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t eq_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t eq_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t eq_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t eq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t eq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t eq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp eq <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t eq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t eq_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a == b; | |||||
} | |||||
// CHECK-LABEL: @eq_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t eq_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a == b; | |||||
} | |||||
// NEQ | |||||
// CHECK-LABEL: @neq_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t neq_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t neq_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t neq_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t neq_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t neq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t neq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t neq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ne <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t neq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp une <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t neq_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a != b; | |||||
} | |||||
// CHECK-LABEL: @neq_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp une <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t neq_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a != b; | |||||
} | |||||
// LT | |||||
// CHECK-LABEL: @lt_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t lt_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t lt_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t lt_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp slt <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t lt_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t lt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t lt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t lt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ult <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t lt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t lt_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a < b; | |||||
} | |||||
// CHECK-LABEL: @lt_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp olt <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t lt_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a < b; | |||||
} | |||||
// LEQ | |||||
// CHECK-LABEL: @leq_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sle <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t leq_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sle <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t leq_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t leq_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sle <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t leq_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t leq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t leq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t leq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ule <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t leq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp ole <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t leq_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// CHECK-LABEL: @leq_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp ole <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t leq_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a <= b; | |||||
} | |||||
// GT | |||||
// CHECK-LABEL: @gt_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t gt_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t gt_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t gt_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t gt_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t gt_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t gt_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t gt_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp ugt <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t gt_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t gt_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a > b; | |||||
} | |||||
// CHECK-LABEL: @gt_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t gt_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a > b; | |||||
} | |||||
// GEQ | |||||
// CHECK-LABEL: @geq_i8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sge <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t geq_i8(fixed_int8m1_t a, fixed_int8m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_i16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sge <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t geq_i16(fixed_int16m1_t a, fixed_int16m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_i32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sge <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t geq_i32(fixed_int32m1_t a, fixed_int32m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_i64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp sge <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t geq_i64(fixed_int64m1_t a, fixed_int64m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_u8( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <32 x i8> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i8> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 8 x i8> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int8m1_t geq_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_u16( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16(<vscale x 4 x i16> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <16 x i16> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i16> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i16> @llvm.vector.insert.nxv4i16.v16i16(<vscale x 4 x i16> undef, <16 x i16> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 4 x i16> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int16m1_t geq_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_u32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32(<vscale x 2 x i32> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i32> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t geq_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_u64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = icmp uge <4 x i64> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t geq_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_f32( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv2f32(<vscale x 2 x float> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp oge <8 x float> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i32> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v8i32(<vscale x 2 x i32> undef, <8 x i32> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 2 x i32> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int32m1_t geq_f32(fixed_float32m1_t a, fixed_float32m1_t b) { | |||||
return a >= b; | |||||
} | |||||
// CHECK-LABEL: @geq_f64( | |||||
// CHECK-NEXT: entry: | |||||
// CHECK-NEXT: [[A:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[A_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[B:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv1f64(<vscale x 1 x double> [[B_COERCE:%.*]], i64 0) | |||||
// CHECK-NEXT: [[CMP:%.*]] = fcmp oge <4 x double> [[A]], [[B]] | |||||
// CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP]] to <4 x i64> | |||||
// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[SEXT]], i64 0) | |||||
// CHECK-NEXT: ret <vscale x 1 x i64> [[CASTSCALABLESVE]] | |||||
// | |||||
fixed_int64m1_t geq_f64(fixed_float64m1_t a, fixed_float64m1_t b) { | |||||
return a >= b; | |||||
} |