diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -36,6 +36,41 @@ } declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1) +define void @abs_v6i16(ptr %x) { +; LMULMAX1-RV32-LABEL: abs_v6i16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vrsub.vi v9, v8, 0 +; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: abs_v6i16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vrsub.vi v9, v8, 0 +; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false) + store <6 x i16> %b, ptr %x + ret void +} +declare <6 x i16> @llvm.abs.v6i16(<6 x i16>, i1) + define void @abs_v4i32(ptr %x) { ; CHECK-LABEL: abs_v4i32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -24,6 +24,41 @@ ret void } +define void @fadd_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: fadd_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfadd.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fadd_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfadd.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = fadd <6 x half> %a, %b + store <6 x half> %c, ptr %x + ret void +} + define void @fadd_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: fadd_v4f32: ; CHECK: # %bb.0: @@ -72,6 +107,41 @@ ret void } +define void @fsub_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: fsub_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfsub.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fsub_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfsub.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = fsub <6 x half> %a, %b + store <6 x half> %c, ptr %x + ret void +} + define void @fsub_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: fsub_v4f32: ; CHECK: # %bb.0: @@ -120,6 +190,41 @@ ret void } +define void @fmul_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: fmul_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfmul.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmul_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfmul.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = fmul <6 x half> %a, %b + store <6 x half> %c, ptr %x + ret void +} + define void @fmul_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: fmul_v4f32: ; CHECK: # %bb.0: @@ -168,6 +273,41 @@ ret void } +define void @fdiv_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: fdiv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfdiv.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fdiv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfdiv.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = fdiv <6 x half> %a, %b + store <6 x half> %c, ptr %x + ret void +} + define void @fdiv_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: fdiv_v4f32: ; CHECK: # %bb.0: @@ -214,6 +354,38 @@ ret void } +define void @fneg_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: fneg_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfneg.v v8, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fneg_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfneg.v v8, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = fneg <6 x half> %a + store <6 x half> %b, ptr %x + ret void +} + define void @fneg_v4f32(ptr %x) { ; CHECK-LABEL: fneg_v4f32: ; CHECK: # %bb.0: @@ -257,6 +429,39 @@ } declare <8 x half> @llvm.fabs.v8f16(<8 x half>) +define void @fabs_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: fabs_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfabs.v v8, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fabs_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfabs.v v8, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.fabs.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.fabs.v6f16(<6 x half>) + define void @fabs_v4f32(ptr %x) { ; CHECK-LABEL: fabs_v4f32: ; CHECK: # %bb.0: @@ -304,6 +509,42 @@ } declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>) +define void @copysign_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: copysign_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfsgnj.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: copysign_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfsgnj.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = call <6 x half> @llvm.copysign.v6f16(<6 x half> %a, <6 x half> %b) + store <6 x half> %c, ptr %x + ret void +} +declare <6 x half> @llvm.copysign.v6f16(<6 x half>, <6 x half>) + define void @copysign_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: copysign_v4f32: ; CHECK: # %bb.0: @@ -354,6 +595,40 @@ ret void } +define void @copysign_vf_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: copysign_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfsgnj.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: copysign_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfsgnj.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = call <6 x half> @llvm.copysign.v6f16(<6 x half> %a, <6 x half> %c) + store <6 x half> %d, ptr %x + ret void +} + define void @copysign_vf_v4f32(ptr %x, float %y) { ; CHECK-LABEL: copysign_vf_v4f32: ; CHECK: # %bb.0: @@ -403,6 +678,42 @@ ret void } +define void @copysign_neg_v6f16(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: copysign_neg_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfsgnjn.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: copysign_neg_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfsgnjn.vv v8, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = fneg <6 x half> %b + %d = call <6 x half> @llvm.copysign.v6f16(<6 x half> %a, <6 x half> %c) + store <6 x half> %d, ptr %x + ret void +} + define void @copysign_neg_v4f32(ptr %x, ptr %y) { ; CHECK-LABEL: copysign_neg_v4f32: ; CHECK: # %bb.0: @@ -457,6 +768,47 @@ } declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>) +define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) { +; LMULMAX1-RV32-LABEL: copysign_neg_trunc_v3f16_v3f32: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) +; LMULMAX1-RV32-NEXT: vfncvt.f.f.w v10, v8 +; LMULMAX1-RV32-NEXT: vfsgnjn.vv v8, v9, v10 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 4 +; LMULMAX1-RV32-NEXT: vse16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: copysign_neg_trunc_v3f16_v3f32: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV64-NEXT: vle32.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfncvt.f.f.w v10, v9 +; LMULMAX1-RV64-NEXT: vfsgnjn.vv v8, v8, v10 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV64-NEXT: addi a1, a0, 4 +; LMULMAX1-RV64-NEXT: vse16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <3 x half>, ptr %x + %b = load <3 x float>, ptr %y + %c = fneg <3 x float> %b + %d = fptrunc <3 x float> %c to <3 x half> + %e = call <3 x half> @llvm.copysign.v3f16(<3 x half> %a, <3 x half> %d) + store <3 x half> %e, ptr %x + ret void +} +declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>) + define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) { ; CHECK-LABEL: copysign_neg_ext_v2f64_v2f32: ; CHECK: # %bb.0: @@ -492,6 +844,39 @@ } declare <8 x half> @llvm.sqrt.v8f16(<8 x half>) +define void @sqrt_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: sqrt_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfsqrt.v v8, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: sqrt_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfsqrt.v v8, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.sqrt.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.sqrt.v6f16(<6 x half>) + define void @sqrt_v4f32(ptr %x) { ; CHECK-LABEL: sqrt_v4f32: ; CHECK: # %bb.0: @@ -541,6 +926,45 @@ } declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>) +define void @fma_v6f16(ptr %x, ptr %y, ptr %z) { +; LMULMAX1-RV32-LABEL: fma_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV32-NEXT: vfmacc.vv v10, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v10, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fma_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV64-NEXT: vfmacc.vv v10, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v10, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = load <6 x half>, ptr %z + %d = call <6 x half> @llvm.fma.v6f16(<6 x half> %a, <6 x half> %b, <6 x half> %c) + store <6 x half> %d, ptr %x + ret void +} +declare <6 x half> @llvm.fma.v6f16(<6 x half>, <6 x half>, <6 x half>) + define void @fma_v4f32(ptr %x, ptr %y, ptr %z) { ; CHECK-LABEL: fma_v4f32: ; CHECK: # %bb.0: @@ -598,6 +1022,45 @@ ret void } +define void @fmsub_v6f16(ptr %x, ptr %y, ptr %z) { +; LMULMAX1-RV32-LABEL: fmsub_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV32-NEXT: vfmsac.vv v10, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v10, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmsub_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV64-NEXT: vfmsac.vv v10, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v10, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = load <6 x half>, ptr %z + %neg = fneg <6 x half> %c + %d = call <6 x half> @llvm.fma.v6f16(<6 x half> %a, <6 x half> %b, <6 x half> %neg) + store <6 x half> %d, ptr %x + ret void +} + define void @fnmsub_v4f32(ptr %x, ptr %y, ptr %z) { ; CHECK-LABEL: fnmsub_v4f32: ; CHECK: # %bb.0: @@ -1394,6 +1857,40 @@ ret void } +define void @fadd_vf_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fadd_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfadd.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fadd_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfadd.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fadd <6 x half> %a, %c + store <6 x half> %d, ptr %x + ret void +} + define void @fadd_vf_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fadd_vf_v4f32: ; CHECK: # %bb.0: @@ -1442,6 +1939,40 @@ ret void } +define void @fadd_fv_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fadd_fv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfadd.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fadd_fv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfadd.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fadd <6 x half> %c, %a + store <6 x half> %d, ptr %x + ret void +} + define void @fadd_fv_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fadd_fv_v4f32: ; CHECK: # %bb.0: @@ -1490,6 +2021,40 @@ ret void } +define void @fsub_vf_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fsub_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfsub.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fsub_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfsub.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fsub <6 x half> %a, %c + store <6 x half> %d, ptr %x + ret void +} + define void @fsub_vf_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fsub_vf_v4f32: ; CHECK: # %bb.0: @@ -1538,6 +2103,40 @@ ret void } +define void @fsub_fv_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fsub_fv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfrsub.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fsub_fv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfrsub.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fsub <6 x half> %c, %a + store <6 x half> %d, ptr %x + ret void +} + define void @fsub_fv_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fsub_fv_v4f32: ; CHECK: # %bb.0: @@ -1586,6 +2185,40 @@ ret void } +define void @fmul_vf_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fmul_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfmul.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmul_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfmul.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fmul <6 x half> %a, %c + store <6 x half> %d, ptr %x + ret void +} + define void @fmul_vf_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fmul_vf_v4f32: ; CHECK: # %bb.0: @@ -1634,6 +2267,40 @@ ret void } +define void @fmul_fv_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fmul_fv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfmul.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmul_fv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfmul.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fmul <6 x half> %c, %a + store <6 x half> %d, ptr %x + ret void +} + define void @fmul_fv_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fmul_fv_v4f32: ; CHECK: # %bb.0: @@ -1682,6 +2349,40 @@ ret void } +define void @fdiv_vf_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fdiv_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfdiv.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fdiv_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfdiv.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fdiv <6 x half> %a, %c + store <6 x half> %d, ptr %x + ret void +} + define void @fdiv_vf_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fdiv_vf_v4f32: ; CHECK: # %bb.0: @@ -1730,6 +2431,40 @@ ret void } +define void @fdiv_fv_v6f16(ptr %x, half %y) { +; LMULMAX1-RV32-LABEL: fdiv_fv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vfrdiv.vf v8, v8, fa0 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fdiv_fv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vfrdiv.vf v8, v8, fa0 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = insertelement <6 x half> poison, half %y, i32 0 + %c = shufflevector <6 x half> %b, <6 x half> poison, <6 x i32> zeroinitializer + %d = fdiv <6 x half> %c, %a + store <6 x half> %d, ptr %x + ret void +} + define void @fdiv_fv_v4f32(ptr %x, float %y) { ; CHECK-LABEL: fdiv_fv_v4f32: ; CHECK: # %bb.0: @@ -1780,6 +2515,43 @@ ret void } +define void @fma_vf_v6f16(ptr %x, ptr %y, half %z) { +; LMULMAX1-RV32-LABEL: fma_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfmacc.vf v9, fa0, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fma_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfmacc.vf v9, fa0, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = insertelement <6 x half> poison, half %z, i32 0 + %d = shufflevector <6 x half> %c, <6 x half> poison, <6 x i32> zeroinitializer + %e = call <6 x half> @llvm.fma.v6f16(<6 x half> %a, <6 x half> %d, <6 x half> %b) + store <6 x half> %e, ptr %x + ret void +} + define void @fma_vf_v4f32(ptr %x, ptr %y, float %z) { ; CHECK-LABEL: fma_vf_v4f32: ; CHECK: # %bb.0: @@ -1834,6 +2606,43 @@ ret void } +define void @fma_fv_v6f16(ptr %x, ptr %y, half %z) { +; LMULMAX1-RV32-LABEL: fma_fv_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfmacc.vf v9, fa0, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fma_fv_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfmacc.vf v9, fa0, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = insertelement <6 x half> poison, half %z, i32 0 + %d = shufflevector <6 x half> %c, <6 x half> poison, <6 x i32> zeroinitializer + %e = call <6 x half> @llvm.fma.v6f16(<6 x half> %d, <6 x half> %a, <6 x half> %b) + store <6 x half> %e, ptr %x + ret void +} + define void @fma_fv_v4f32(ptr %x, ptr %y, float %z) { ; CHECK-LABEL: fma_fv_v4f32: ; CHECK: # %bb.0: @@ -1889,6 +2698,44 @@ ret void } +define void @fmsub_vf_v6f16(ptr %x, ptr %y, half %z) { +; LMULMAX1-RV32-LABEL: fmsub_vf_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vfmsac.vf v9, fa0, v8 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v9, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmsub_vf_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vfmsac.vf v9, fa0, v8 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v9, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = insertelement <6 x half> poison, half %z, i32 0 + %d = shufflevector <6 x half> %c, <6 x half> poison, <6 x i32> zeroinitializer + %neg = fneg <6 x half> %b + %e = call <6 x half> @llvm.fma.v6f16(<6 x half> %a, <6 x half> %d, <6 x half> %neg) + store <6 x half> %e, ptr %x + ret void +} + define void @fnmsub_vf_v4f32(ptr %x, ptr %y, float %z) { ; CHECK-LABEL: fnmsub_vf_v4f32: ; CHECK: # %bb.0: @@ -1972,8 +2819,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI91_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI91_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI115_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI115_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -1989,6 +2836,53 @@ } declare <8 x half> @llvm.trunc.v8f16(<8 x half>) +define void @trunc_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: trunc_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI116_0) +; LMULMAX1-RV32-NEXT: flh fa5, %lo(.LCPI116_0)(a1) +; LMULMAX1-RV32-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV32-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; LMULMAX1-RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: trunc_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI116_0) +; LMULMAX1-RV64-NEXT: flh fa5, %lo(.LCPI116_0)(a1) +; LMULMAX1-RV64-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV64-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t +; LMULMAX1-RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.trunc.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.trunc.v6f16(<6 x half>) + define void @trunc_v4f32(ptr %x) { ; CHECK-LABEL: trunc_v4f32: ; CHECK: # %bb.0: @@ -2016,8 +2910,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI93_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI93_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI118_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI118_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t @@ -2038,8 +2932,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI94_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI94_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI119_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI119_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 3 @@ -2057,6 +2951,57 @@ } declare <8 x half> @llvm.ceil.v8f16(<8 x half>) +define void @ceil_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: ceil_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI120_0) +; LMULMAX1-RV32-NEXT: flh fa5, %lo(.LCPI120_0)(a1) +; LMULMAX1-RV32-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV32-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV32-NEXT: fsrmi a1, 3 +; LMULMAX1-RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV32-NEXT: fsrm a1 +; LMULMAX1-RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: ceil_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI120_0) +; LMULMAX1-RV64-NEXT: flh fa5, %lo(.LCPI120_0)(a1) +; LMULMAX1-RV64-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV64-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV64-NEXT: fsrmi a1, 3 +; LMULMAX1-RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV64-NEXT: fsrm a1 +; LMULMAX1-RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.ceil.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.ceil.v6f16(<6 x half>) + define void @ceil_v4f32(ptr %x) { ; CHECK-LABEL: ceil_v4f32: ; CHECK: # %bb.0: @@ -2086,8 +3031,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI96_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI96_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI122_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI122_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 3 @@ -2110,8 +3055,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI97_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI97_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI123_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI123_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 2 @@ -2129,6 +3074,57 @@ } declare <8 x half> @llvm.floor.v8f16(<8 x half>) +define void @floor_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: floor_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI124_0) +; LMULMAX1-RV32-NEXT: flh fa5, %lo(.LCPI124_0)(a1) +; LMULMAX1-RV32-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV32-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV32-NEXT: fsrmi a1, 2 +; LMULMAX1-RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV32-NEXT: fsrm a1 +; LMULMAX1-RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: floor_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI124_0) +; LMULMAX1-RV64-NEXT: flh fa5, %lo(.LCPI124_0)(a1) +; LMULMAX1-RV64-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV64-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV64-NEXT: fsrmi a1, 2 +; LMULMAX1-RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV64-NEXT: fsrm a1 +; LMULMAX1-RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.floor.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.floor.v6f16(<6 x half>) + define void @floor_v4f32(ptr %x) { ; CHECK-LABEL: floor_v4f32: ; CHECK: # %bb.0: @@ -2158,8 +3154,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI99_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI99_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI126_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI126_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 2 @@ -2182,8 +3178,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI100_0) -; CHECK-NEXT: flh fa5, %lo(.LCPI100_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI127_0) +; CHECK-NEXT: flh fa5, %lo(.LCPI127_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 4 @@ -2201,6 +3197,57 @@ } declare <8 x half> @llvm.round.v8f16(<8 x half>) +define void @round_v6f16(ptr %x) { +; LMULMAX1-RV32-LABEL: round_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI128_0) +; LMULMAX1-RV32-NEXT: flh fa5, %lo(.LCPI128_0)(a1) +; LMULMAX1-RV32-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV32-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV32-NEXT: fsrmi a1, 4 +; LMULMAX1-RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV32-NEXT: fsrm a1 +; LMULMAX1-RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v8, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: round_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI128_0) +; LMULMAX1-RV64-NEXT: flh fa5, %lo(.LCPI128_0)(a1) +; LMULMAX1-RV64-NEXT: vfabs.v v9, v8 +; LMULMAX1-RV64-NEXT: vmflt.vf v0, v9, fa5 +; LMULMAX1-RV64-NEXT: fsrmi a1, 4 +; LMULMAX1-RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t +; LMULMAX1-RV64-NEXT: fsrm a1 +; LMULMAX1-RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = call <6 x half> @llvm.round.v6f16(<6 x half> %a) + store <6 x half> %b, ptr %x + ret void +} +declare <6 x half> @llvm.round.v6f16(<6 x half>) + define void @round_v4f32(ptr %x) { ; CHECK-LABEL: round_v4f32: ; CHECK: # %bb.0: @@ -2230,8 +3277,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI102_0) -; CHECK-NEXT: fld fa5, %lo(.LCPI102_0)(a1) +; CHECK-NEXT: lui a1, %hi(.LCPI130_0) +; CHECK-NEXT: fld fa5, %lo(.LCPI130_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 ; CHECK-NEXT: vmflt.vf v0, v9, fa5 ; CHECK-NEXT: fsrmi a1, 4 @@ -2406,6 +3453,45 @@ } declare <8 x half> @llvm.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>) +define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) { +; LMULMAX1-RV32-LABEL: fmuladd_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV32-NEXT: vfmacc.vv v10, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v10, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmuladd_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV64-NEXT: vfmacc.vv v10, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v10, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = load <6 x half>, ptr %z + %d = call <6 x half> @llvm.fmuladd.v6f16(<6 x half> %a, <6 x half> %b, <6 x half> %c) + store <6 x half> %d, ptr %x + ret void +} +declare <6 x half> @llvm.fmuladd.v6f16(<6 x half>, <6 x half>, <6 x half>) + define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) { ; CHECK-LABEL: fmuladd_v4f32: ; CHECK: # %bb.0: @@ -2463,6 +3549,45 @@ ret void } +define void @fmsub_fmuladd_v6f16(ptr %x, ptr %y, ptr %z) { +; LMULMAX1-RV32-LABEL: fmsub_fmuladd_v6f16: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV32-NEXT: vfmsac.vv v10, v8, v9 +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV32-NEXT: addi a1, a0, 8 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vse16.v v10, (a0) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: fmsub_fmuladd_v6f16: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) +; LMULMAX1-RV64-NEXT: vle16.v v9, (a1) +; LMULMAX1-RV64-NEXT: vle16.v v10, (a2) +; LMULMAX1-RV64-NEXT: vfmsac.vv v10, v8, v9 +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v10, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v10, 2 +; LMULMAX1-RV64-NEXT: addi a0, a0, 8 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x half>, ptr %x + %b = load <6 x half>, ptr %y + %c = load <6 x half>, ptr %z + %neg = fneg <6 x half> %c + %d = call <6 x half> @llvm.fmuladd.v6f16(<6 x half> %a, <6 x half> %b, <6 x half> %neg) + store <6 x half> %d, ptr %x + ret void +} + define void @fnmsub_fmuladd_v4f32(ptr %x, ptr %y, ptr %z) { ; CHECK-LABEL: fnmsub_fmuladd_v4f32: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8 -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64 define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) { ; CHECK-LABEL: fp2si_v2f32_v2i32: @@ -78,6 +78,348 @@ ret <2 x i1> %z } +define void @fp2si_v3f32_v3i32(ptr %x, ptr %y) { +; LMULMAX8RV32-LABEL: fp2si_v3f32_v3i32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vle32.v v8, (a0) +; LMULMAX8RV32-NEXT: vfcvt.rtz.x.f.v v8, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV32-NEXT: addi a0, a1, 8 +; LMULMAX8RV32-NEXT: vse32.v v9, (a0) +; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX8RV32-NEXT: vse32.v v8, (a1) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: fp2si_v3f32_v3i32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vle32.v v8, (a0) +; LMULMAX8RV64-NEXT: vfcvt.rtz.x.f.v v8, v8 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV64-NEXT: addi a0, a1, 8 +; LMULMAX8RV64-NEXT: vse32.v v9, (a0) +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX8RV64-NEXT: vse64.v v8, (a1) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: fp2si_v3f32_v3i32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vle32.v v8, (a0) +; LMULMAX1RV32-NEXT: vfcvt.rtz.x.f.v v8, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV32-NEXT: addi a0, a1, 8 +; LMULMAX1RV32-NEXT: vse32.v v9, (a0) +; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX1RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: fp2si_v3f32_v3i32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vle32.v v8, (a0) +; LMULMAX1RV64-NEXT: vfcvt.rtz.x.f.v v8, v8 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV64-NEXT: addi a0, a1, 8 +; LMULMAX1RV64-NEXT: vse32.v v9, (a0) +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1RV64-NEXT: ret + %a = load <3 x float>, ptr %x + %d = fptosi <3 x float> %a to <3 x i32> + store <3 x i32> %d, ptr %y + ret void +} + +define void @fp2ui_v3f32_v3i32(ptr %x, ptr %y) { +; LMULMAX8RV32-LABEL: fp2ui_v3f32_v3i32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vle32.v v8, (a0) +; LMULMAX8RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV32-NEXT: addi a0, a1, 8 +; LMULMAX8RV32-NEXT: vse32.v v9, (a0) +; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX8RV32-NEXT: vse32.v v8, (a1) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: fp2ui_v3f32_v3i32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vle32.v v8, (a0) +; LMULMAX8RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV64-NEXT: addi a0, a1, 8 +; LMULMAX8RV64-NEXT: vse32.v v9, (a0) +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX8RV64-NEXT: vse64.v v8, (a1) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: fp2ui_v3f32_v3i32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vle32.v v8, (a0) +; LMULMAX1RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV32-NEXT: addi a0, a1, 8 +; LMULMAX1RV32-NEXT: vse32.v v9, (a0) +; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX1RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: fp2ui_v3f32_v3i32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vle32.v v8, (a0) +; LMULMAX1RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV64-NEXT: addi a0, a1, 8 +; LMULMAX1RV64-NEXT: vse32.v v9, (a0) +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1RV64-NEXT: ret + %a = load <3 x float>, ptr %x + %d = fptoui <3 x float> %a to <3 x i32> + store <3 x i32> %d, ptr %y + ret void +} + +define <3 x i1> @fp2si_v3f32_v3i1(<3 x float> %x) { +; CHECK-LABEL: fp2si_v3f32_v3i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vand.vi v8, v9, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %z = fptosi <3 x float> %x to <3 x i1> + ret <3 x i1> %z +} + +; FIXME: This is expanded when they could be widened + promoted +define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) { +; LMULMAX8RV32-LABEL: fp2si_v3f32_v3i15: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX8RV32-NEXT: vmv.x.s a1, v8 +; LMULMAX8RV32-NEXT: slli a2, a1, 17 +; LMULMAX8RV32-NEXT: srli a2, a2, 19 +; LMULMAX8RV32-NEXT: sh a2, 4(a0) +; LMULMAX8RV32-NEXT: vmv.x.s a2, v9 +; LMULMAX8RV32-NEXT: lui a3, 8 +; LMULMAX8RV32-NEXT: addi a3, a3, -1 +; LMULMAX8RV32-NEXT: and a2, a2, a3 +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX8RV32-NEXT: vmv.x.s a4, v8 +; LMULMAX8RV32-NEXT: and a3, a4, a3 +; LMULMAX8RV32-NEXT: slli a3, a3, 15 +; LMULMAX8RV32-NEXT: slli a1, a1, 30 +; LMULMAX8RV32-NEXT: or a1, a2, a1 +; LMULMAX8RV32-NEXT: or a1, a1, a3 +; LMULMAX8RV32-NEXT: sw a1, 0(a0) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: fp2si_v3f32_v3i15: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX8RV64-NEXT: vmv.x.s a1, v9 +; LMULMAX8RV64-NEXT: lui a2, 8 +; LMULMAX8RV64-NEXT: addiw a2, a2, -1 +; LMULMAX8RV64-NEXT: and a1, a1, a2 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX8RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX8RV64-NEXT: and a2, a3, a2 +; LMULMAX8RV64-NEXT: slli a2, a2, 15 +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX8RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX8RV64-NEXT: slli a3, a3, 30 +; LMULMAX8RV64-NEXT: or a1, a1, a3 +; LMULMAX8RV64-NEXT: or a1, a1, a2 +; LMULMAX8RV64-NEXT: sw a1, 0(a0) +; LMULMAX8RV64-NEXT: slli a1, a1, 19 +; LMULMAX8RV64-NEXT: srli a1, a1, 51 +; LMULMAX8RV64-NEXT: sh a1, 4(a0) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: fp2si_v3f32_v3i15: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1RV32-NEXT: vmv.x.s a1, v8 +; LMULMAX1RV32-NEXT: slli a2, a1, 17 +; LMULMAX1RV32-NEXT: srli a2, a2, 19 +; LMULMAX1RV32-NEXT: sh a2, 4(a0) +; LMULMAX1RV32-NEXT: vmv.x.s a2, v9 +; LMULMAX1RV32-NEXT: lui a3, 8 +; LMULMAX1RV32-NEXT: addi a3, a3, -1 +; LMULMAX1RV32-NEXT: and a2, a2, a3 +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX1RV32-NEXT: vmv.x.s a4, v8 +; LMULMAX1RV32-NEXT: and a3, a4, a3 +; LMULMAX1RV32-NEXT: slli a3, a3, 15 +; LMULMAX1RV32-NEXT: slli a1, a1, 30 +; LMULMAX1RV32-NEXT: or a1, a2, a1 +; LMULMAX1RV32-NEXT: or a1, a1, a3 +; LMULMAX1RV32-NEXT: sw a1, 0(a0) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: fp2si_v3f32_v3i15: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX1RV64-NEXT: vmv.x.s a1, v9 +; LMULMAX1RV64-NEXT: lui a2, 8 +; LMULMAX1RV64-NEXT: addiw a2, a2, -1 +; LMULMAX1RV64-NEXT: and a1, a1, a2 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX1RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX1RV64-NEXT: and a2, a3, a2 +; LMULMAX1RV64-NEXT: slli a2, a2, 15 +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX1RV64-NEXT: slli a3, a3, 30 +; LMULMAX1RV64-NEXT: or a1, a1, a3 +; LMULMAX1RV64-NEXT: or a1, a1, a2 +; LMULMAX1RV64-NEXT: sw a1, 0(a0) +; LMULMAX1RV64-NEXT: slli a1, a1, 19 +; LMULMAX1RV64-NEXT: srli a1, a1, 51 +; LMULMAX1RV64-NEXT: sh a1, 4(a0) +; LMULMAX1RV64-NEXT: ret + %z = fptosi <3 x float> %x to <3 x i15> + ret <3 x i15> %z +} + +; FIXME: This is expanded when they could be widened + promoted +define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) { +; LMULMAX8RV32-LABEL: fp2ui_v3f32_v3i15: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX8RV32-NEXT: vmv.x.s a1, v8 +; LMULMAX8RV32-NEXT: slli a2, a1, 17 +; LMULMAX8RV32-NEXT: srli a2, a2, 19 +; LMULMAX8RV32-NEXT: sh a2, 4(a0) +; LMULMAX8RV32-NEXT: vmv.x.s a2, v9 +; LMULMAX8RV32-NEXT: lui a3, 16 +; LMULMAX8RV32-NEXT: addi a3, a3, -1 +; LMULMAX8RV32-NEXT: and a2, a2, a3 +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX8RV32-NEXT: vmv.x.s a4, v8 +; LMULMAX8RV32-NEXT: and a3, a4, a3 +; LMULMAX8RV32-NEXT: slli a3, a3, 15 +; LMULMAX8RV32-NEXT: slli a1, a1, 30 +; LMULMAX8RV32-NEXT: or a1, a2, a1 +; LMULMAX8RV32-NEXT: or a1, a1, a3 +; LMULMAX8RV32-NEXT: sw a1, 0(a0) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: fp2ui_v3f32_v3i15: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX8RV64-NEXT: vmv.x.s a1, v9 +; LMULMAX8RV64-NEXT: lui a2, 16 +; LMULMAX8RV64-NEXT: addiw a2, a2, -1 +; LMULMAX8RV64-NEXT: and a1, a1, a2 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX8RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX8RV64-NEXT: and a2, a3, a2 +; LMULMAX8RV64-NEXT: slli a2, a2, 15 +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX8RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX8RV64-NEXT: slli a3, a3, 30 +; LMULMAX8RV64-NEXT: or a1, a1, a3 +; LMULMAX8RV64-NEXT: or a1, a1, a2 +; LMULMAX8RV64-NEXT: sw a1, 0(a0) +; LMULMAX8RV64-NEXT: slli a1, a1, 19 +; LMULMAX8RV64-NEXT: srli a1, a1, 51 +; LMULMAX8RV64-NEXT: sh a1, 4(a0) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: fp2ui_v3f32_v3i15: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1RV32-NEXT: vmv.x.s a1, v8 +; LMULMAX1RV32-NEXT: slli a2, a1, 17 +; LMULMAX1RV32-NEXT: srli a2, a2, 19 +; LMULMAX1RV32-NEXT: sh a2, 4(a0) +; LMULMAX1RV32-NEXT: vmv.x.s a2, v9 +; LMULMAX1RV32-NEXT: lui a3, 16 +; LMULMAX1RV32-NEXT: addi a3, a3, -1 +; LMULMAX1RV32-NEXT: and a2, a2, a3 +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX1RV32-NEXT: vmv.x.s a4, v8 +; LMULMAX1RV32-NEXT: and a3, a4, a3 +; LMULMAX1RV32-NEXT: slli a3, a3, 15 +; LMULMAX1RV32-NEXT: slli a1, a1, 30 +; LMULMAX1RV32-NEXT: or a1, a2, a1 +; LMULMAX1RV32-NEXT: or a1, a1, a3 +; LMULMAX1RV32-NEXT: sw a1, 0(a0) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: fp2ui_v3f32_v3i15: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 +; LMULMAX1RV64-NEXT: vmv.x.s a1, v9 +; LMULMAX1RV64-NEXT: lui a2, 16 +; LMULMAX1RV64-NEXT: addiw a2, a2, -1 +; LMULMAX1RV64-NEXT: and a1, a1, a2 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1 +; LMULMAX1RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX1RV64-NEXT: and a2, a3, a2 +; LMULMAX1RV64-NEXT: slli a2, a2, 15 +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2 +; LMULMAX1RV64-NEXT: vmv.x.s a3, v8 +; LMULMAX1RV64-NEXT: slli a3, a3, 30 +; LMULMAX1RV64-NEXT: or a1, a1, a3 +; LMULMAX1RV64-NEXT: or a1, a1, a2 +; LMULMAX1RV64-NEXT: sw a1, 0(a0) +; LMULMAX1RV64-NEXT: slli a1, a1, 19 +; LMULMAX1RV64-NEXT: srli a1, a1, 51 +; LMULMAX1RV64-NEXT: sh a1, 4(a0) +; LMULMAX1RV64-NEXT: ret + %z = fptoui <3 x float> %x to <3 x i15> + ret <3 x i15> %z +} + +define <3 x i1> @fp2ui_v3f32_v3i1(<3 x float> %x) { +; CHECK-LABEL: fp2ui_v3f32_v3i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vand.vi v8, v9, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %z = fptoui <3 x float> %x to <3 x i1> + ret <3 x i1> %z +} + define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) { ; LMULMAX8-LABEL: fp2si_v8f32_v8i32: ; LMULMAX8: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8 -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64 +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64 define void @si2fp_v2i32_v2f32(ptr %x, ptr %y) { ; CHECK-LABEL: si2fp_v2i32_v2f32: @@ -84,6 +84,324 @@ ret <2 x float> %z } +define void @si2fp_v3i32_v3f32(ptr %x, ptr %y) { +; LMULMAX8RV32-LABEL: si2fp_v3i32_v3f32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vle32.v v8, (a0) +; LMULMAX8RV32-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV32-NEXT: addi a0, a1, 8 +; LMULMAX8RV32-NEXT: vse32.v v9, (a0) +; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX8RV32-NEXT: vse32.v v8, (a1) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: si2fp_v3i32_v3f32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vle32.v v8, (a0) +; LMULMAX8RV64-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV64-NEXT: addi a0, a1, 8 +; LMULMAX8RV64-NEXT: vse32.v v9, (a0) +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX8RV64-NEXT: vse64.v v8, (a1) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: si2fp_v3i32_v3f32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vle32.v v8, (a0) +; LMULMAX1RV32-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV32-NEXT: addi a0, a1, 8 +; LMULMAX1RV32-NEXT: vse32.v v9, (a0) +; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX1RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: si2fp_v3i32_v3f32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vle32.v v8, (a0) +; LMULMAX1RV64-NEXT: vfcvt.f.x.v v8, v8 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV64-NEXT: addi a0, a1, 8 +; LMULMAX1RV64-NEXT: vse32.v v9, (a0) +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1RV64-NEXT: ret + %a = load <3 x i32>, ptr %x + %d = sitofp <3 x i32> %a to <3 x float> + store <3 x float> %d, ptr %y + ret void +} + +define void @ui2fp_v3i32_v3f32(ptr %x, ptr %y) { +; LMULMAX8RV32-LABEL: ui2fp_v3i32_v3f32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vle32.v v8, (a0) +; LMULMAX8RV32-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV32-NEXT: addi a0, a1, 8 +; LMULMAX8RV32-NEXT: vse32.v v9, (a0) +; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX8RV32-NEXT: vse32.v v8, (a1) +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: ui2fp_v3i32_v3f32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vle32.v v8, (a0) +; LMULMAX8RV64-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX8RV64-NEXT: addi a0, a1, 8 +; LMULMAX8RV64-NEXT: vse32.v v9, (a0) +; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX8RV64-NEXT: vse64.v v8, (a1) +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: ui2fp_v3i32_v3f32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vle32.v v8, (a0) +; LMULMAX1RV32-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV32-NEXT: addi a0, a1, 8 +; LMULMAX1RV32-NEXT: vse32.v v9, (a0) +; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX1RV32-NEXT: vse32.v v8, (a1) +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: ui2fp_v3i32_v3f32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vle32.v v8, (a0) +; LMULMAX1RV64-NEXT: vfcvt.f.xu.v v8, v8 +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2 +; LMULMAX1RV64-NEXT: addi a0, a1, 8 +; LMULMAX1RV64-NEXT: vse32.v v9, (a0) +; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1RV64-NEXT: vse64.v v8, (a1) +; LMULMAX1RV64-NEXT: ret + %a = load <3 x i32>, ptr %x + %d = uitofp <3 x i32> %a to <3 x float> + store <3 x float> %d, ptr %y + ret void +} + +define <3 x float> @si2fp_v3i1_v3f32(<3 x i1> %x) { +; CHECK-LABEL: si2fp_v3i1_v3f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v9, v8, -1, v0 +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret + %z = sitofp <3 x i1> %x to <3 x float> + ret <3 x float> %z +} + +; FIXME: This gets expanded instead of widened + promoted +define <3 x float> @si2fp_v3i7_v3f32(<3 x i7> %x) { +; LMULMAX8RV32-LABEL: si2fp_v3i7_v3f32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: addi sp, sp, -16 +; LMULMAX8RV32-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX8RV32-NEXT: lw a1, 8(a0) +; LMULMAX8RV32-NEXT: sb a1, 14(sp) +; LMULMAX8RV32-NEXT: lw a1, 4(a0) +; LMULMAX8RV32-NEXT: sb a1, 13(sp) +; LMULMAX8RV32-NEXT: lw a0, 0(a0) +; LMULMAX8RV32-NEXT: sb a0, 12(sp) +; LMULMAX8RV32-NEXT: addi a0, sp, 12 +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX8RV32-NEXT: vle8.v v8, (a0) +; LMULMAX8RV32-NEXT: vadd.vv v8, v8, v8 +; LMULMAX8RV32-NEXT: vsra.vi v8, v8, 1 +; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vsext.vf2 v9, v8 +; LMULMAX8RV32-NEXT: vfwcvt.f.x.v v8, v9 +; LMULMAX8RV32-NEXT: addi sp, sp, 16 +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: si2fp_v3i7_v3f32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: addi sp, sp, -16 +; LMULMAX8RV64-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX8RV64-NEXT: ld a1, 16(a0) +; LMULMAX8RV64-NEXT: sb a1, 14(sp) +; LMULMAX8RV64-NEXT: ld a1, 8(a0) +; LMULMAX8RV64-NEXT: sb a1, 13(sp) +; LMULMAX8RV64-NEXT: ld a0, 0(a0) +; LMULMAX8RV64-NEXT: sb a0, 12(sp) +; LMULMAX8RV64-NEXT: addi a0, sp, 12 +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX8RV64-NEXT: vle8.v v8, (a0) +; LMULMAX8RV64-NEXT: vadd.vv v8, v8, v8 +; LMULMAX8RV64-NEXT: vsra.vi v8, v8, 1 +; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vsext.vf2 v9, v8 +; LMULMAX8RV64-NEXT: vfwcvt.f.x.v v8, v9 +; LMULMAX8RV64-NEXT: addi sp, sp, 16 +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: si2fp_v3i7_v3f32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: addi sp, sp, -16 +; LMULMAX1RV32-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX1RV32-NEXT: lw a1, 8(a0) +; LMULMAX1RV32-NEXT: sb a1, 14(sp) +; LMULMAX1RV32-NEXT: lw a1, 4(a0) +; LMULMAX1RV32-NEXT: sb a1, 13(sp) +; LMULMAX1RV32-NEXT: lw a0, 0(a0) +; LMULMAX1RV32-NEXT: sb a0, 12(sp) +; LMULMAX1RV32-NEXT: addi a0, sp, 12 +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX1RV32-NEXT: vle8.v v8, (a0) +; LMULMAX1RV32-NEXT: vadd.vv v8, v8, v8 +; LMULMAX1RV32-NEXT: vsra.vi v8, v8, 1 +; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vsext.vf2 v9, v8 +; LMULMAX1RV32-NEXT: vfwcvt.f.x.v v8, v9 +; LMULMAX1RV32-NEXT: addi sp, sp, 16 +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: si2fp_v3i7_v3f32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: addi sp, sp, -16 +; LMULMAX1RV64-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX1RV64-NEXT: ld a1, 16(a0) +; LMULMAX1RV64-NEXT: sb a1, 14(sp) +; LMULMAX1RV64-NEXT: ld a1, 8(a0) +; LMULMAX1RV64-NEXT: sb a1, 13(sp) +; LMULMAX1RV64-NEXT: ld a0, 0(a0) +; LMULMAX1RV64-NEXT: sb a0, 12(sp) +; LMULMAX1RV64-NEXT: addi a0, sp, 12 +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX1RV64-NEXT: vle8.v v8, (a0) +; LMULMAX1RV64-NEXT: vadd.vv v8, v8, v8 +; LMULMAX1RV64-NEXT: vsra.vi v8, v8, 1 +; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vsext.vf2 v9, v8 +; LMULMAX1RV64-NEXT: vfwcvt.f.x.v v8, v9 +; LMULMAX1RV64-NEXT: addi sp, sp, 16 +; LMULMAX1RV64-NEXT: ret + %z = sitofp <3 x i7> %x to <3 x float> + ret <3 x float> %z +} + +; FIXME: This gets expanded instead of widened + promoted +define <3 x float> @ui2fp_v3i7_v3f32(<3 x i7> %x) { +; LMULMAX8RV32-LABEL: ui2fp_v3i7_v3f32: +; LMULMAX8RV32: # %bb.0: +; LMULMAX8RV32-NEXT: addi sp, sp, -16 +; LMULMAX8RV32-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX8RV32-NEXT: lw a1, 8(a0) +; LMULMAX8RV32-NEXT: sb a1, 14(sp) +; LMULMAX8RV32-NEXT: lw a1, 4(a0) +; LMULMAX8RV32-NEXT: sb a1, 13(sp) +; LMULMAX8RV32-NEXT: lw a0, 0(a0) +; LMULMAX8RV32-NEXT: sb a0, 12(sp) +; LMULMAX8RV32-NEXT: addi a0, sp, 12 +; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX8RV32-NEXT: vle8.v v8, (a0) +; LMULMAX8RV32-NEXT: li a0, 127 +; LMULMAX8RV32-NEXT: vand.vx v8, v8, a0 +; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX8RV32-NEXT: vzext.vf2 v9, v8 +; LMULMAX8RV32-NEXT: vfwcvt.f.xu.v v8, v9 +; LMULMAX8RV32-NEXT: addi sp, sp, 16 +; LMULMAX8RV32-NEXT: ret +; +; LMULMAX8RV64-LABEL: ui2fp_v3i7_v3f32: +; LMULMAX8RV64: # %bb.0: +; LMULMAX8RV64-NEXT: addi sp, sp, -16 +; LMULMAX8RV64-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX8RV64-NEXT: ld a1, 16(a0) +; LMULMAX8RV64-NEXT: sb a1, 14(sp) +; LMULMAX8RV64-NEXT: ld a1, 8(a0) +; LMULMAX8RV64-NEXT: sb a1, 13(sp) +; LMULMAX8RV64-NEXT: ld a0, 0(a0) +; LMULMAX8RV64-NEXT: sb a0, 12(sp) +; LMULMAX8RV64-NEXT: addi a0, sp, 12 +; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX8RV64-NEXT: vle8.v v8, (a0) +; LMULMAX8RV64-NEXT: li a0, 127 +; LMULMAX8RV64-NEXT: vand.vx v8, v8, a0 +; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX8RV64-NEXT: vzext.vf2 v9, v8 +; LMULMAX8RV64-NEXT: vfwcvt.f.xu.v v8, v9 +; LMULMAX8RV64-NEXT: addi sp, sp, 16 +; LMULMAX8RV64-NEXT: ret +; +; LMULMAX1RV32-LABEL: ui2fp_v3i7_v3f32: +; LMULMAX1RV32: # %bb.0: +; LMULMAX1RV32-NEXT: addi sp, sp, -16 +; LMULMAX1RV32-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX1RV32-NEXT: lw a1, 8(a0) +; LMULMAX1RV32-NEXT: sb a1, 14(sp) +; LMULMAX1RV32-NEXT: lw a1, 4(a0) +; LMULMAX1RV32-NEXT: sb a1, 13(sp) +; LMULMAX1RV32-NEXT: lw a0, 0(a0) +; LMULMAX1RV32-NEXT: sb a0, 12(sp) +; LMULMAX1RV32-NEXT: addi a0, sp, 12 +; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX1RV32-NEXT: vle8.v v8, (a0) +; LMULMAX1RV32-NEXT: li a0, 127 +; LMULMAX1RV32-NEXT: vand.vx v8, v8, a0 +; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX1RV32-NEXT: vzext.vf2 v9, v8 +; LMULMAX1RV32-NEXT: vfwcvt.f.xu.v v8, v9 +; LMULMAX1RV32-NEXT: addi sp, sp, 16 +; LMULMAX1RV32-NEXT: ret +; +; LMULMAX1RV64-LABEL: ui2fp_v3i7_v3f32: +; LMULMAX1RV64: # %bb.0: +; LMULMAX1RV64-NEXT: addi sp, sp, -16 +; LMULMAX1RV64-NEXT: .cfi_def_cfa_offset 16 +; LMULMAX1RV64-NEXT: ld a1, 16(a0) +; LMULMAX1RV64-NEXT: sb a1, 14(sp) +; LMULMAX1RV64-NEXT: ld a1, 8(a0) +; LMULMAX1RV64-NEXT: sb a1, 13(sp) +; LMULMAX1RV64-NEXT: ld a0, 0(a0) +; LMULMAX1RV64-NEXT: sb a0, 12(sp) +; LMULMAX1RV64-NEXT: addi a0, sp, 12 +; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; LMULMAX1RV64-NEXT: vle8.v v8, (a0) +; LMULMAX1RV64-NEXT: li a0, 127 +; LMULMAX1RV64-NEXT: vand.vx v8, v8, a0 +; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; LMULMAX1RV64-NEXT: vzext.vf2 v9, v8 +; LMULMAX1RV64-NEXT: vfwcvt.f.xu.v v8, v9 +; LMULMAX1RV64-NEXT: addi sp, sp, 16 +; LMULMAX1RV64-NEXT: ret + %z = uitofp <3 x i7> %x to <3 x float> + ret <3 x float> %z +} + +define <3 x float> @ui2fp_v3i1_v3f32(<3 x i1> %x) { +; CHECK-LABEL: ui2fp_v3i1_v3f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmerge.vim v9, v8, 1, v0 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret + %z = uitofp <3 x i1> %x to <3 x float> + ret <3 x float> %z +} + define void @si2fp_v8i32_v8f32(ptr %x, ptr %y) { ; LMULMAX8-LABEL: si2fp_v8i32_v8f32: ; LMULMAX8: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -38,6 +38,41 @@ ret void } +define void @add_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: add_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: add_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = add <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @add_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: add_v4i32: ; CHECK: # %bb.0: @@ -102,6 +137,41 @@ ret void } +define void @sub_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: sub_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: sub_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = sub <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @sub_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: sub_v4i32: ; CHECK: # %bb.0: @@ -166,6 +236,41 @@ ret void } +define void @mul_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: mul_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: mul_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vmul.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = mul <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @mul_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: mul_v4i32: ; CHECK: # %bb.0: @@ -230,6 +335,41 @@ ret void } +define void @and_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: and_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: and_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = and <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @and_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: and_v4i32: ; CHECK: # %bb.0: @@ -294,6 +434,41 @@ ret void } +define void @or_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: or_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: or_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = or <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @or_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: or_v4i32: ; CHECK: # %bb.0: @@ -358,6 +533,41 @@ ret void } +define void @xor_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: xor_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vxor.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: xor_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vxor.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = xor <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @xor_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: xor_v4i32: ; CHECK: # %bb.0: @@ -422,6 +632,41 @@ ret void } +define void @lshr_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: lshr_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vsrl.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: lshr_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vsrl.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = lshr <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @lshr_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: lshr_v4i32: ; CHECK: # %bb.0: @@ -486,6 +731,41 @@ ret void } +define void @ashr_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: ashr_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vsra.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: ashr_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vsra.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = ashr <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @ashr_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: ashr_v4i32: ; CHECK: # %bb.0: @@ -550,6 +830,41 @@ ret void } +define void @shl_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: shl_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vsll.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: shl_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vsll.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = shl <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @shl_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: shl_v4i32: ; CHECK: # %bb.0: @@ -614,6 +929,56 @@ ret void } +define void @sdiv_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: sdiv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v9, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: vslidedown.vi v11, v9, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vdiv.vv v10, v11, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v11, v10, 4 +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vdiv.vv v8, v9, v8 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v11, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: sdiv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a1) +; RV64-NEXT: vle16.v v9, (a0) +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-NEXT: vslidedown.vi v11, v9, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vdiv.vv v10, v11, v10 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vdiv.vv v8, v9, v8 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v8, v10, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = sdiv <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @sdiv_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: sdiv_v4i32: ; CHECK: # %bb.0: @@ -678,6 +1043,56 @@ ret void } +define void @srem_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: srem_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v9, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: vslidedown.vi v11, v9, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vrem.vv v10, v11, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v11, v10, 4 +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vrem.vv v8, v9, v8 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v11, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: srem_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a1) +; RV64-NEXT: vle16.v v9, (a0) +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-NEXT: vslidedown.vi v11, v9, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vrem.vv v10, v11, v10 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vrem.vv v8, v9, v8 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v8, v10, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = srem <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @srem_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: srem_v4i32: ; CHECK: # %bb.0: @@ -742,6 +1157,56 @@ ret void } +define void @udiv_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: udiv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v9, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: vslidedown.vi v11, v9, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vdivu.vv v10, v11, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v11, v10, 4 +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vdivu.vv v8, v9, v8 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v11, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: udiv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a1) +; RV64-NEXT: vle16.v v9, (a0) +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-NEXT: vslidedown.vi v11, v9, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vdivu.vv v10, v11, v10 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vdivu.vv v8, v9, v8 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v8, v10, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = udiv <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @udiv_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: udiv_v4i32: ; CHECK: # %bb.0: @@ -806,6 +1271,56 @@ ret void } +define void @urem_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: urem_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a1) +; RV32-NEXT: vle16.v v9, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: vslidedown.vi v11, v9, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vremu.vv v10, v11, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v11, v10, 4 +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vremu.vv v8, v9, v8 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v11, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: urem_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a1) +; RV64-NEXT: vle16.v v9, (a0) +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 4 +; RV64-NEXT: vslidedown.vi v11, v9, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vremu.vv v10, v11, v10 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vremu.vv v8, v9, v8 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v8, v10, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %c = urem <6 x i16> %a, %b + store <6 x i16> %c, ptr %x + ret void +} + define void @urem_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: urem_v4i32: ; CHECK: # %bb.0: @@ -873,8 +1388,8 @@ ; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI52_0) -; RV32-NEXT: addi a1, a1, %lo(.LCPI52_0) +; RV32-NEXT: lui a1, %hi(.LCPI65_0) +; RV32-NEXT: addi a1, a1, %lo(.LCPI65_0) ; RV32-NEXT: vle8.v v12, (a1) ; RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; RV32-NEXT: vsrl.vv v10, v8, v10 @@ -920,8 +1435,8 @@ ; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a1 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma -; RV64-NEXT: lui a1, %hi(.LCPI52_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI52_0) +; RV64-NEXT: lui a1, %hi(.LCPI65_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI65_0) ; RV64-NEXT: vle8.v v12, (a1) ; RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; RV64-NEXT: vsrl.vv v10, v8, v10 @@ -953,8 +1468,8 @@ ; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v11, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: lui a1, %hi(.LCPI53_0) -; CHECK-NEXT: addi a1, a1, %lo(.LCPI53_0) +; CHECK-NEXT: lui a1, %hi(.LCPI66_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vsrl.vv v9, v8, v9 ; CHECK-NEXT: vmulhu.vv v9, v9, v12 @@ -977,6 +1492,61 @@ ret void } +define void @mulhu_v6i16(ptr %x) { +; RV32-LABEL: mulhu_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vid.v v10 +; RV32-NEXT: vadd.vi v10, v10, 12 +; RV32-NEXT: vdivu.vv v9, v9, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v10, v9, 4 +; RV32-NEXT: lui a1, %hi(.LCPI67_0) +; RV32-NEXT: addi a1, a1, %lo(.LCPI67_0) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vdivu.vv v8, v8, v9 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v10, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: mulhu_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: lui a1, %hi(.LCPI67_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI67_0) +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vdivu.vv v9, v8, v9 +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vid.v v10 +; RV64-NEXT: vadd.vi v10, v10, 12 +; RV64-NEXT: vdivu.vv v8, v8, v10 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v9, v8, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v9, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v9, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = udiv <6 x i16> %a, + store <6 x i16> %b, ptr %x + ret void +} + define void @mulhu_v4i32(ptr %x) { ; CHECK-LABEL: mulhu_v4i32: ; CHECK: # %bb.0: @@ -987,8 +1557,8 @@ ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 2 -; CHECK-NEXT: lui a1, %hi(.LCPI54_0) -; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0) +; CHECK-NEXT: lui a1, %hi(.LCPI68_0) +; CHECK-NEXT: addi a1, a1, %lo(.LCPI68_0) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmulhu.vv v9, v8, v9 @@ -1012,14 +1582,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: lui a1, %hi(.LCPI55_0) -; RV32-NEXT: addi a1, a1, %lo(.LCPI55_0) +; RV32-NEXT: lui a1, %hi(.LCPI69_0) +; RV32-NEXT: addi a1, a1, %lo(.LCPI69_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a1) ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmulhu.vv v8, v8, v9 -; RV32-NEXT: lui a1, %hi(.LCPI55_1) -; RV32-NEXT: addi a1, a1, %lo(.LCPI55_1) +; RV32-NEXT: lui a1, %hi(.LCPI69_1) +; RV32-NEXT: addi a1, a1, %lo(.LCPI69_1) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a1) ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma @@ -1030,11 +1600,11 @@ ; RV64-LABEL: mulhu_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV64-NEXT: lui a1, %hi(.LCPI55_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI55_0) +; RV64-NEXT: lui a1, %hi(.LCPI69_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI69_0) ; RV64-NEXT: vlse64.v v8, (a1), zero -; RV64-NEXT: lui a1, %hi(.LCPI55_1) -; RV64-NEXT: ld a1, %lo(.LCPI55_1)(a1) +; RV64-NEXT: lui a1, %hi(.LCPI69_1) +; RV64-NEXT: ld a1, %lo(.LCPI69_1)(a1) ; RV64-NEXT: vle64.v v9, (a0) ; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 @@ -1141,6 +1711,69 @@ ret void } +define void @mulhs_v6i16(ptr %x) { +; RV32-LABEL: mulhs_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vmv.v.i v9, 7 +; RV32-NEXT: vid.v v10 +; RV32-NEXT: li a1, -14 +; RV32-NEXT: vmadd.vx v10, a1, v9 +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 4 +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV32-NEXT: vdiv.vv v9, v9, v10 +; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma +; RV32-NEXT: vslideup.vi v10, v9, 4 +; RV32-NEXT: li a1, 6 +; RV32-NEXT: vmv.s.x v0, a1 +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vmv.v.i v9, -7 +; RV32-NEXT: vmerge.vim v9, v9, 7, v0 +; RV32-NEXT: vdiv.vv v8, v8, v9 +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v8, v10, 2 +; RV32-NEXT: addi a0, a0, 8 +; RV32-NEXT: vse32.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: mulhs_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vmv.v.i v9, 7 +; RV64-NEXT: vid.v v10 +; RV64-NEXT: li a1, -14 +; RV64-NEXT: vmadd.vx v10, a1, v9 +; RV64-NEXT: vsetivli zero, 2, e16, m1, ta, ma +; RV64-NEXT: vslidedown.vi v9, v8, 4 +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vdiv.vv v9, v9, v10 +; RV64-NEXT: li a1, 6 +; RV64-NEXT: vmv.s.x v0, a1 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vmv.v.i v10, -7 +; RV64-NEXT: vmerge.vim v10, v10, 7, v0 +; RV64-NEXT: vdiv.vv v8, v8, v10 +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vslideup.vi v8, v9, 4 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = sdiv <6 x i16> %a, + store <6 x i16> %b, ptr %x + ret void +} + define void @mulhs_v4i32(ptr %x) { ; RV32-LABEL: mulhs_v4i32: ; RV32: # %bb.0: @@ -1165,8 +1798,8 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: lui a1, %hi(.LCPI58_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI58_0) +; RV64-NEXT: lui a1, %hi(.LCPI73_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI73_0) ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vlse64.v v9, (a1), zero ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma @@ -1218,11 +1851,11 @@ ; RV64-LABEL: mulhs_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma -; RV64-NEXT: lui a1, %hi(.LCPI59_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI59_0) +; RV64-NEXT: lui a1, %hi(.LCPI74_0) +; RV64-NEXT: addi a1, a1, %lo(.LCPI74_0) ; RV64-NEXT: vlse64.v v8, (a1), zero -; RV64-NEXT: lui a1, %hi(.LCPI59_1) -; RV64-NEXT: ld a1, %lo(.LCPI59_1)(a1) +; RV64-NEXT: lui a1, %hi(.LCPI74_1) +; RV64-NEXT: ld a1, %lo(.LCPI74_1)(a1) ; RV64-NEXT: vle64.v v9, (a0) ; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 @@ -1277,6 +1910,42 @@ ret void } +define void @smin_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: smin_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vmin.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smin_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vmin.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %cc = icmp slt <6 x i16> %a, %b + %c = select <6 x i1> %cc, <6 x i16> %a, <6 x i16> %b + store <6 x i16> %c, ptr %x + ret void +} + define void @smin_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: smin_v4i32: ; CHECK: # %bb.0: @@ -1345,6 +2014,41 @@ } declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>) +define void @smin_vx_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: smin_vx_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmin.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smin_vx_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmin.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.smin.v6i16(<6 x i16> %a, <6 x i16> %c) + store <6 x i16> %d, ptr %x + ret void +} +declare <6 x i16> @llvm.smin.v6i16(<6 x i16>, <6 x i16>) + define void @smin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smin_vx_v4i32: ; CHECK: # %bb.0: @@ -1394,6 +2098,40 @@ ret void } +define void @smin_xv_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: smin_xv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmin.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smin_xv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmin.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.smin.v6i16(<6 x i16> %c, <6 x i16> %a) + store <6 x i16> %d, ptr %x + ret void +} + define void @smin_xv_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smin_xv_v4i32: ; CHECK: # %bb.0: @@ -1444,6 +2182,42 @@ ret void } +define void @smax_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: smax_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vmax.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smax_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vmax.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %cc = icmp sgt <6 x i16> %a, %b + %c = select <6 x i1> %cc, <6 x i16> %a, <6 x i16> %b + store <6 x i16> %c, ptr %x + ret void +} + define void @smax_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: smax_v4i32: ; CHECK: # %bb.0: @@ -1512,6 +2286,41 @@ } declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>) +define void @smax_vx_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: smax_vx_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmax.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smax_vx_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmax.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.smax.v6i16(<6 x i16> %a, <6 x i16> %c) + store <6 x i16> %d, ptr %x + ret void +} +declare <6 x i16> @llvm.smax.v6i16(<6 x i16>, <6 x i16>) + define void @smax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smax_vx_v4i32: ; CHECK: # %bb.0: @@ -1561,6 +2370,40 @@ ret void } +define void @smax_xv_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: smax_xv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmax.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: smax_xv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmax.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.smax.v6i16(<6 x i16> %c, <6 x i16> %a) + store <6 x i16> %d, ptr %x + ret void +} + define void @smax_xv_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: smax_xv_v4i32: ; CHECK: # %bb.0: @@ -1611,6 +2454,42 @@ ret void } +define void @umin_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: umin_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vminu.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umin_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vminu.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %cc = icmp ult <6 x i16> %a, %b + %c = select <6 x i1> %cc, <6 x i16> %a, <6 x i16> %b + store <6 x i16> %c, ptr %x + ret void +} + define void @umin_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: umin_v4i32: ; CHECK: # %bb.0: @@ -1679,6 +2558,41 @@ } declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>) +define void @umin_vx_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: umin_vx_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vminu.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umin_vx_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vminu.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.umin.v6i16(<6 x i16> %a, <6 x i16> %c) + store <6 x i16> %d, ptr %x + ret void +} +declare <6 x i16> @llvm.umin.v6i16(<6 x i16>, <6 x i16>) + define void @umin_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umin_vx_v4i32: ; CHECK: # %bb.0: @@ -1728,6 +2642,40 @@ ret void } +define void @umin_xv_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: umin_xv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vminu.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umin_xv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vminu.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.umin.v6i16(<6 x i16> %c, <6 x i16> %a) + store <6 x i16> %d, ptr %x + ret void +} + define void @umin_xv_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umin_xv_v4i32: ; CHECK: # %bb.0: @@ -1778,6 +2726,42 @@ ret void } +define void @umax_v6i16(ptr %x, ptr %y) { +; RV32-LABEL: umax_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vle16.v v9, (a1) +; RV32-NEXT: vmaxu.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umax_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vle16.v v9, (a1) +; RV64-NEXT: vmaxu.vv v8, v8, v9 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = load <6 x i16>, ptr %y + %cc = icmp ugt <6 x i16> %a, %b + %c = select <6 x i1> %cc, <6 x i16> %a, <6 x i16> %b + store <6 x i16> %c, ptr %x + ret void +} + define void @umax_v4i32(ptr %x, ptr %y) { ; CHECK-LABEL: umax_v4i32: ; CHECK: # %bb.0: @@ -1846,6 +2830,41 @@ } declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>) +define void @umax_vx_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: umax_vx_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmaxu.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umax_vx_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmaxu.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.umax.v6i16(<6 x i16> %a, <6 x i16> %c) + store <6 x i16> %d, ptr %x + ret void +} +declare <6 x i16> @llvm.umax.v6i16(<6 x i16>, <6 x i16>) + define void @umax_vx_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umax_vx_v4i32: ; CHECK: # %bb.0: @@ -1895,6 +2914,40 @@ ret void } +define void @umax_xv_v6i16(ptr %x, i16 %y) { +; RV32-LABEL: umax_xv_v6i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vle16.v v8, (a0) +; RV32-NEXT: vmaxu.vx v8, v8, a1 +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV32-NEXT: vslidedown.vi v9, v8, 2 +; RV32-NEXT: addi a1, a0, 8 +; RV32-NEXT: vse32.v v9, (a1) +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vse16.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: umax_xv_v6i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV64-NEXT: vle16.v v8, (a0) +; RV64-NEXT: vmaxu.vx v8, v8, a1 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vse64.v v8, (a0) +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma +; RV64-NEXT: vslidedown.vi v8, v8, 2 +; RV64-NEXT: addi a0, a0, 8 +; RV64-NEXT: vse32.v v8, (a0) +; RV64-NEXT: ret + %a = load <6 x i16>, ptr %x + %b = insertelement <6 x i16> poison, i16 %y, i32 0 + %c = shufflevector <6 x i16> %b, <6 x i16> poison, <6 x i32> zeroinitializer + %d = call <6 x i16> @llvm.umax.v6i16(<6 x i16> %c, <6 x i16> %a) + store <6 x i16> %d, ptr %x + ret void +} + define void @umax_xv_v4i32(ptr %x, i32 %y) { ; CHECK-LABEL: umax_xv_v4i32: ; CHECK: # %bb.0: @@ -2050,6 +3103,74 @@ ret void } +define void @add_v6i32(ptr %x, ptr %y) { +; LMULMAX2-RV32-LABEL: add_v6i32: +; LMULMAX2-RV32: # %bb.0: +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) +; LMULMAX2-RV32-NEXT: vle32.v v10, (a1) +; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; LMULMAX2-RV32-NEXT: vslidedown.vi v10, v8, 4 +; LMULMAX2-RV32-NEXT: addi a1, a0, 16 +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX2-RV32-NEXT: vse32.v v10, (a1) +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX2-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV32-NEXT: ret +; +; LMULMAX2-RV64-LABEL: add_v6i32: +; LMULMAX2-RV64: # %bb.0: +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) +; LMULMAX2-RV64-NEXT: vle32.v v10, (a1) +; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; LMULMAX2-RV64-NEXT: vslidedown.vi v10, v8, 2 +; LMULMAX2-RV64-NEXT: addi a1, a0, 16 +; LMULMAX2-RV64-NEXT: vse64.v v10, (a1) +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX2-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX2-RV64-NEXT: ret +; +; LMULMAX1-RV32-LABEL: add_v6i32: +; LMULMAX1-RV32: # %bb.0: +; LMULMAX1-RV32-NEXT: addi a2, a0, 16 +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) +; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) +; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) +; LMULMAX1-RV32-NEXT: addi a1, a1, 16 +; LMULMAX1-RV32-NEXT: vle32.v v11, (a1) +; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; LMULMAX1-RV32-NEXT: vadd.vv v8, v10, v11 +; LMULMAX1-RV32-NEXT: vse32.v v8, (a2) +; LMULMAX1-RV32-NEXT: ret +; +; LMULMAX1-RV64-LABEL: add_v6i32: +; LMULMAX1-RV64: # %bb.0: +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) +; LMULMAX1-RV64-NEXT: addi a2, a1, 16 +; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) +; LMULMAX1-RV64-NEXT: addi a2, a0, 16 +; LMULMAX1-RV64-NEXT: vle32.v v10, (a2) +; LMULMAX1-RV64-NEXT: vle32.v v11, (a1) +; LMULMAX1-RV64-NEXT: vadd.vv v9, v10, v9 +; LMULMAX1-RV64-NEXT: vadd.vv v8, v8, v11 +; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; LMULMAX1-RV64-NEXT: vse64.v v9, (a2) +; LMULMAX1-RV64-NEXT: ret + %a = load <6 x i32>, ptr %x + %b = load <6 x i32>, ptr %y + %c = add <6 x i32> %a, %b + store <6 x i32> %c, ptr %x + ret void +} + define void @add_v4i64(ptr %x, ptr %y) { ; LMULMAX2-LABEL: add_v4i64: ; LMULMAX2: # %bb.0: @@ -4360,8 +5481,8 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; LMULMAX2-RV32-NEXT: lui a2, %hi(.LCPI153_0) -; LMULMAX2-RV32-NEXT: addi a2, a2, %lo(.LCPI153_0) +; LMULMAX2-RV32-NEXT: lui a2, %hi(.LCPI181_0) +; LMULMAX2-RV32-NEXT: addi a2, a2, %lo(.LCPI181_0) ; LMULMAX2-RV32-NEXT: vle8.v v10, (a2) ; LMULMAX2-RV32-NEXT: vmv.v.i v12, 0 ; LMULMAX2-RV32-NEXT: vmerge.vim v14, v12, 1, v0 @@ -4410,8 +5531,8 @@ ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI153_0) -; LMULMAX2-RV64-NEXT: addi a2, a2, %lo(.LCPI153_0) +; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI181_0) +; LMULMAX2-RV64-NEXT: addi a2, a2, %lo(.LCPI181_0) ; LMULMAX2-RV64-NEXT: vle8.v v10, (a2) ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 ; LMULMAX2-RV64-NEXT: vmerge.vim v14, v12, 1, v0 @@ -4455,8 +5576,8 @@ ; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a1) -; LMULMAX1-NEXT: lui a2, %hi(.LCPI153_0) -; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI153_0) +; LMULMAX1-NEXT: lui a2, %hi(.LCPI181_0) +; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI181_0) ; LMULMAX1-NEXT: vle8.v v9, (a2) ; LMULMAX1-NEXT: vle8.v v10, (a0) ; LMULMAX1-NEXT: vdivu.vv v8, v8, v9 @@ -4488,8 +5609,8 @@ ; LMULMAX2-RV32-NEXT: li a1, 257 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI154_0) -; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI154_0) +; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI182_0) +; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI182_0) ; LMULMAX2-RV32-NEXT: vle16.v v16, (a1) ; LMULMAX2-RV32-NEXT: lui a1, 1048568 ; LMULMAX2-RV32-NEXT: vmerge.vxm v18, v14, a1, v0 @@ -4521,8 +5642,8 @@ ; LMULMAX2-RV64-NEXT: li a1, 257 ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI154_0) -; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI154_0) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI182_0) +; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI182_0) ; LMULMAX2-RV64-NEXT: vle16.v v16, (a1) ; LMULMAX2-RV64-NEXT: lui a1, 1048568 ; LMULMAX2-RV64-NEXT: vmerge.vxm v18, v14, a1, v0 @@ -4542,8 +5663,8 @@ ; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v8, (a1) -; LMULMAX1-NEXT: lui a2, %hi(.LCPI154_0) -; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI154_0) +; LMULMAX1-NEXT: lui a2, %hi(.LCPI182_0) +; LMULMAX1-NEXT: addi a2, a2, %lo(.LCPI182_0) ; LMULMAX1-NEXT: vle16.v v9, (a2) ; LMULMAX1-NEXT: vle16.v v10, (a0) ; LMULMAX1-NEXT: vdivu.vv v8, v8, v9 @@ -4564,8 +5685,8 @@ ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: li a1, 68 ; LMULMAX2-NEXT: vmv.s.x v0, a1 -; LMULMAX2-NEXT: lui a1, %hi(.LCPI155_0) -; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI155_0) +; LMULMAX2-NEXT: lui a1, %hi(.LCPI183_0) +; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI183_0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmv.v.i v12, 0 ; LMULMAX2-NEXT: lui a1, 524288 @@ -4593,8 +5714,8 @@ ; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 ; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2 -; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI155_0) -; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI155_0) +; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI183_0) +; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI183_0) ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) ; LMULMAX1-RV32-NEXT: vmulhu.vv v12, v9, v10 @@ -4619,8 +5740,8 @@ ; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI155_0) -; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI155_0) +; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI183_0) +; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI183_0) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) ; LMULMAX1-RV64-NEXT: vle32.v v10, (a0) ; LMULMAX1-RV64-NEXT: vdivu.vv v8, v8, v9 @@ -4639,8 +5760,8 @@ ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) -; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI156_0) -; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI156_0) +; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI184_0) +; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI184_0) ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v10, (a1) ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma @@ -4655,8 +5776,8 @@ ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI156_1) -; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI156_1) +; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI184_1) +; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI184_1) ; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v10, (a1) ; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma @@ -4674,13 +5795,13 @@ ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 ; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2 -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI156_0) -; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI156_0) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI184_0) +; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI184_0) ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v10, (a1) ; LMULMAX2-RV64-NEXT: vmulhu.vv v10, v8, v10 -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI156_1) -; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI156_1) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI184_1) +; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI184_1) ; LMULMAX2-RV64-NEXT: vle64.v v14, (a1) ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v12 @@ -4695,14 +5816,14 @@ ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) -; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI156_0) -; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI156_0) +; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI184_0) +; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI184_0) ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10 -; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI156_1) -; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI156_1) +; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI184_1) +; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI184_1) ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma @@ -4722,12 +5843,12 @@ ; LMULMAX1-RV64-NEXT: slli a2, a2, 63 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2 -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_0) -; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI156_0) +; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_0) +; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI184_0) ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vlse64.v v11, (a2), zero -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_1) -; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI156_1)(a2) +; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_1) +; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI184_1)(a2) ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma @@ -4736,11 +5857,11 @@ ; LMULMAX1-RV64-NEXT: vmulhu.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vadd.vv v9, v9, v11 ; LMULMAX1-RV64-NEXT: vid.v v10 -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_2) -; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI156_2) +; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_2) +; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI184_2) ; LMULMAX1-RV64-NEXT: vlse64.v v11, (a2), zero -; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_3) -; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI156_3)(a2) +; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI184_3) +; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI184_3)(a2) ; LMULMAX1-RV64-NEXT: vadd.vi v12, v10, 2 ; LMULMAX1-RV64-NEXT: vsrl.vv v9, v9, v12 ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma @@ -4930,8 +6051,8 @@ ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI159_0) -; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI159_0) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI187_0) +; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI187_0) ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vlse64.v v10, (a1), zero ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma @@ -5031,11 +6152,11 @@ ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: li a1, 5 ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI160_0) -; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI160_0) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI188_0) +; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI188_0) ; LMULMAX2-RV64-NEXT: vlse64.v v8, (a1), zero -; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI160_1) -; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI160_1)(a1) +; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI188_1) +; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI188_1)(a1) ; LMULMAX2-RV64-NEXT: vle64.v v10, (a0) ; LMULMAX2-RV64-NEXT: vmv.v.i v12, -1 ; LMULMAX2-RV64-NEXT: vmerge.vim v12, v12, 0, v0 @@ -5057,8 +6178,8 @@ ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) -; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI160_0) -; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI160_0) +; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI188_0) +; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI188_0) ; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) ; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma @@ -5072,11 +6193,11 @@ ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) -; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI160_0) -; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI160_0) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI188_0) +; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI188_0) ; LMULMAX1-RV64-NEXT: vlse64.v v9, (a1), zero -; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI160_1) -; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI160_1)(a1) +; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI188_1) +; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI188_1)(a1) ; LMULMAX1-RV64-NEXT: addi a2, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v10, (a2) ; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma @@ -7710,8 +8831,8 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: lui a1, %hi(.LCPI289_0) -; RV64-NEXT: ld a1, %lo(.LCPI289_0)(a1) +; RV64-NEXT: lui a1, %hi(.LCPI317_0) +; RV64-NEXT: ld a1, %lo(.LCPI317_0)(a1) ; RV64-NEXT: vmulhu.vx v8, v8, a1 ; RV64-NEXT: vsrl.vi v8, v8, 1 ; RV64-NEXT: vse64.v v8, (a0) @@ -7828,8 +8949,8 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: lui a1, %hi(.LCPI293_0) -; RV64-NEXT: ld a1, %lo(.LCPI293_0)(a1) +; RV64-NEXT: lui a1, %hi(.LCPI321_0) +; RV64-NEXT: ld a1, %lo(.LCPI321_0)(a1) ; RV64-NEXT: vmulh.vx v8, v8, a1 ; RV64-NEXT: li a1, 63 ; RV64-NEXT: vsrl.vx v9, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -1,6 +1,521 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 + +define void @vselect_vv_v6i32(ptr %a, ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vv_v6i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a2, 0(a2) +; RV32-NEXT: vle32.v v8, (a1) +; RV32-NEXT: srli a1, a2, 5 +; RV32-NEXT: sb a1, 13(sp) +; RV32-NEXT: andi a1, a2, 1 +; RV32-NEXT: sb a1, 8(sp) +; RV32-NEXT: slli a1, a2, 27 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 12(sp) +; RV32-NEXT: slli a1, a2, 28 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 11(sp) +; RV32-NEXT: slli a1, a2, 29 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 10(sp) +; RV32-NEXT: slli a2, a2, 30 +; RV32-NEXT: srli a2, a2, 31 +; RV32-NEXT: sb a2, 9(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vle8.v v10, (a1) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV32-NEXT: vle32.v v8, (a0), v0.t +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a3, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a3) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vv_v6i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a2, 0(a2) +; RV64-NEXT: vle32.v v8, (a1) +; RV64-NEXT: srli a1, a2, 5 +; RV64-NEXT: sb a1, 13(sp) +; RV64-NEXT: andi a1, a2, 1 +; RV64-NEXT: sb a1, 8(sp) +; RV64-NEXT: slli a1, a2, 59 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 12(sp) +; RV64-NEXT: slli a1, a2, 60 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 11(sp) +; RV64-NEXT: slli a1, a2, 61 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 10(sp) +; RV64-NEXT: slli a2, a2, 62 +; RV64-NEXT: srli a2, a2, 63 +; RV64-NEXT: sb a2, 9(sp) +; RV64-NEXT: addi a1, sp, 8 +; RV64-NEXT: vle8.v v10, (a1) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vle32.v v8, (a0), v0.t +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a3, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a3) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %va = load <6 x i32>, ptr %a + %vb = load <6 x i32>, ptr %b + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb + store <6 x i32> %vsel, ptr %z + ret void +} + +define void @vselect_vx_v6i32(i32 %a, ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vx_v6i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a2, 0(a2) +; RV32-NEXT: vle32.v v8, (a1) +; RV32-NEXT: srli a1, a2, 5 +; RV32-NEXT: sb a1, 13(sp) +; RV32-NEXT: andi a1, a2, 1 +; RV32-NEXT: sb a1, 8(sp) +; RV32-NEXT: slli a1, a2, 27 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 12(sp) +; RV32-NEXT: slli a1, a2, 28 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 11(sp) +; RV32-NEXT: slli a1, a2, 29 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 10(sp) +; RV32-NEXT: slli a2, a2, 30 +; RV32-NEXT: srli a2, a2, 31 +; RV32-NEXT: sb a2, 9(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vle8.v v10, (a1) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV32-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a3, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a3) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vx_v6i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a2, 0(a2) +; RV64-NEXT: vle32.v v8, (a1) +; RV64-NEXT: srli a1, a2, 5 +; RV64-NEXT: sb a1, 13(sp) +; RV64-NEXT: andi a1, a2, 1 +; RV64-NEXT: sb a1, 8(sp) +; RV64-NEXT: slli a1, a2, 59 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 12(sp) +; RV64-NEXT: slli a1, a2, 60 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 11(sp) +; RV64-NEXT: slli a1, a2, 61 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 10(sp) +; RV64-NEXT: slli a2, a2, 62 +; RV64-NEXT: srli a2, a2, 63 +; RV64-NEXT: sb a2, 9(sp) +; RV64-NEXT: addi a1, sp, 8 +; RV64-NEXT: vle8.v v10, (a1) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a3, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a3) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %vb = load <6 x i32>, ptr %b + %ahead = insertelement <6 x i32> poison, i32 %a, i32 0 + %va = shufflevector <6 x i32> %ahead, <6 x i32> poison, <6 x i32> zeroinitializer + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb + store <6 x i32> %vsel, ptr %z + ret void +} + +define void @vselect_vi_v6i32(ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vi_v6i32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a1, 0(a1) +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: srli a0, a1, 5 +; RV32-NEXT: sb a0, 13(sp) +; RV32-NEXT: andi a0, a1, 1 +; RV32-NEXT: sb a0, 8(sp) +; RV32-NEXT: slli a0, a1, 27 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 12(sp) +; RV32-NEXT: slli a0, a1, 28 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 11(sp) +; RV32-NEXT: slli a0, a1, 29 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 10(sp) +; RV32-NEXT: slli a1, a1, 30 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 9(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vle8.v v10, (a0) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV32-NEXT: vmerge.vim v8, v8, -1, v0 +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a2, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a2) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vi_v6i32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a1, 0(a1) +; RV64-NEXT: vle32.v v8, (a0) +; RV64-NEXT: srli a0, a1, 5 +; RV64-NEXT: sb a0, 13(sp) +; RV64-NEXT: andi a0, a1, 1 +; RV64-NEXT: sb a0, 8(sp) +; RV64-NEXT: slli a0, a1, 59 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 12(sp) +; RV64-NEXT: slli a0, a1, 60 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 11(sp) +; RV64-NEXT: slli a0, a1, 61 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 10(sp) +; RV64-NEXT: slli a1, a1, 62 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 9(sp) +; RV64-NEXT: addi a0, sp, 8 +; RV64-NEXT: vle8.v v10, (a0) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV64-NEXT: vmerge.vim v8, v8, -1, v0 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a2, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a2) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %vb = load <6 x i32>, ptr %b + %a = insertelement <6 x i32> poison, i32 -1, i32 0 + %va = shufflevector <6 x i32> %a, <6 x i32> poison, <6 x i32> zeroinitializer + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb + store <6 x i32> %vsel, ptr %z + ret void +} + + +define void @vselect_vv_v6f32(ptr %a, ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vv_v6f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a2, 0(a2) +; RV32-NEXT: vle32.v v8, (a1) +; RV32-NEXT: srli a1, a2, 5 +; RV32-NEXT: sb a1, 13(sp) +; RV32-NEXT: andi a1, a2, 1 +; RV32-NEXT: sb a1, 8(sp) +; RV32-NEXT: slli a1, a2, 27 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 12(sp) +; RV32-NEXT: slli a1, a2, 28 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 11(sp) +; RV32-NEXT: slli a1, a2, 29 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 10(sp) +; RV32-NEXT: slli a2, a2, 30 +; RV32-NEXT: srli a2, a2, 31 +; RV32-NEXT: sb a2, 9(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vle8.v v10, (a1) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV32-NEXT: vle32.v v8, (a0), v0.t +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a3, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a3) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vv_v6f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a2, 0(a2) +; RV64-NEXT: vle32.v v8, (a1) +; RV64-NEXT: srli a1, a2, 5 +; RV64-NEXT: sb a1, 13(sp) +; RV64-NEXT: andi a1, a2, 1 +; RV64-NEXT: sb a1, 8(sp) +; RV64-NEXT: slli a1, a2, 59 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 12(sp) +; RV64-NEXT: slli a1, a2, 60 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 11(sp) +; RV64-NEXT: slli a1, a2, 61 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 10(sp) +; RV64-NEXT: slli a2, a2, 62 +; RV64-NEXT: srli a2, a2, 63 +; RV64-NEXT: sb a2, 9(sp) +; RV64-NEXT: addi a1, sp, 8 +; RV64-NEXT: vle8.v v10, (a1) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vle32.v v8, (a0), v0.t +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a3, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a3) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %va = load <6 x float>, ptr %a + %vb = load <6 x float>, ptr %b + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb + store <6 x float> %vsel, ptr %z + ret void +} + +define void @vselect_vx_v6f32(float %a, ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vx_v6f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a1, 0(a1) +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: srli a0, a1, 5 +; RV32-NEXT: sb a0, 13(sp) +; RV32-NEXT: andi a0, a1, 1 +; RV32-NEXT: sb a0, 8(sp) +; RV32-NEXT: slli a0, a1, 27 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 12(sp) +; RV32-NEXT: slli a0, a1, 28 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 11(sp) +; RV32-NEXT: slli a0, a1, 29 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 10(sp) +; RV32-NEXT: slli a1, a1, 30 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 9(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vle8.v v10, (a0) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV32-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a2, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a2) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vx_v6f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a1, 0(a1) +; RV64-NEXT: vle32.v v8, (a0) +; RV64-NEXT: srli a0, a1, 5 +; RV64-NEXT: sb a0, 13(sp) +; RV64-NEXT: andi a0, a1, 1 +; RV64-NEXT: sb a0, 8(sp) +; RV64-NEXT: slli a0, a1, 59 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 12(sp) +; RV64-NEXT: slli a0, a1, 60 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 11(sp) +; RV64-NEXT: slli a0, a1, 61 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 10(sp) +; RV64-NEXT: slli a1, a1, 62 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 9(sp) +; RV64-NEXT: addi a0, sp, 8 +; RV64-NEXT: vle8.v v10, (a0) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV64-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a2, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a2) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %vb = load <6 x float>, ptr %b + %ahead = insertelement <6 x float> poison, float %a, i32 0 + %va = shufflevector <6 x float> %ahead, <6 x float> poison, <6 x i32> zeroinitializer + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb + store <6 x float> %vsel, ptr %z + ret void +} + +define void @vselect_vfpzero_v6f32(ptr %b, ptr %cc, ptr %z) { +; RV32-LABEL: vselect_vfpzero_v6f32: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: lbu a1, 0(a1) +; RV32-NEXT: vle32.v v8, (a0) +; RV32-NEXT: srli a0, a1, 5 +; RV32-NEXT: sb a0, 13(sp) +; RV32-NEXT: andi a0, a1, 1 +; RV32-NEXT: sb a0, 8(sp) +; RV32-NEXT: slli a0, a1, 27 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 12(sp) +; RV32-NEXT: slli a0, a1, 28 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 11(sp) +; RV32-NEXT: slli a0, a1, 29 +; RV32-NEXT: srli a0, a0, 31 +; RV32-NEXT: sb a0, 10(sp) +; RV32-NEXT: slli a1, a1, 30 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 9(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vle8.v v10, (a0) +; RV32-NEXT: vand.vi v10, v10, 1 +; RV32-NEXT: vmsne.vi v0, v10, 0 +; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV32-NEXT: vmerge.vim v8, v8, 0, v0 +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma +; RV32-NEXT: vslidedown.vi v10, v8, 4 +; RV32-NEXT: addi a0, a2, 16 +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vse32.v v10, (a0) +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vse32.v v8, (a2) +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vselect_vfpzero_v6f32: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: lbu a1, 0(a1) +; RV64-NEXT: vle32.v v8, (a0) +; RV64-NEXT: srli a0, a1, 5 +; RV64-NEXT: sb a0, 13(sp) +; RV64-NEXT: andi a0, a1, 1 +; RV64-NEXT: sb a0, 8(sp) +; RV64-NEXT: slli a0, a1, 59 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 12(sp) +; RV64-NEXT: slli a0, a1, 60 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 11(sp) +; RV64-NEXT: slli a0, a1, 61 +; RV64-NEXT: srli a0, a0, 63 +; RV64-NEXT: sb a0, 10(sp) +; RV64-NEXT: slli a1, a1, 62 +; RV64-NEXT: srli a1, a1, 63 +; RV64-NEXT: sb a1, 9(sp) +; RV64-NEXT: addi a0, sp, 8 +; RV64-NEXT: vle8.v v10, (a0) +; RV64-NEXT: vand.vi v10, v10, 1 +; RV64-NEXT: vmsne.vi v0, v10, 0 +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; RV64-NEXT: vmerge.vim v8, v8, 0, v0 +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma +; RV64-NEXT: vslidedown.vi v10, v8, 2 +; RV64-NEXT: addi a0, a2, 16 +; RV64-NEXT: vse64.v v10, (a0) +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vse32.v v8, (a2) +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %vb = load <6 x float>, ptr %b + %a = insertelement <6 x float> poison, float 0.0, i32 0 + %va = shufflevector <6 x float> %a, <6 x float> poison, <6 x i32> zeroinitializer + %vcc = load <6 x i1>, ptr %cc + %vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb + store <6 x float> %vsel, ptr %z + ret void +} define void @vselect_vv_v8i32(ptr %a, ptr %b, ptr %cc, ptr %z) { ; CHECK-LABEL: vselect_vv_v8i32: