diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -611,3 +611,177 @@ store double %b, double* %p ret void } + +define i32 @ext_add_v4i32(<4 x i32> %x) { +; CHECK-LABEL: ext_add_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, 3 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = add <4 x i32> %x, + %ext = extractelement <4 x i32> %bo, i32 2 + ret i32 %ext +} + +define i32 @ext_sub_v4i32(<4 x i32> %x) { +; CHECK-LABEL: ext_sub_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vrsub.vi v8, v8, 2 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = sub <4 x i32> , %x + %ext = extractelement <4 x i32> %bo, i32 1 + ret i32 %ext +} + +define i32 @ext_mul_v4i32(<4 x i32> %x) { +; CHECK-LABEL: ext_mul_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 42 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 3 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = mul <4 x i32> %x, + %ext = extractelement <4 x i32> %bo, i32 3 + ret i32 %ext +} + +define i32 @ext_sdiv_v4i32(<4 x i32> %x) { +; CHECK-LABEL: ext_sdiv_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = sdiv <4 x i32> %x, + %ext = extractelement <4 x i32> %bo, i32 0 + ret i32 %ext +} + +define i32 @ext_sdiv_v4i32_constant_op0(<4 x i32> %x) { +; CHECK-LABEL: ext_sdiv_v4i32_constant_op0: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI39_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI39_0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vdiv.vv v8, v9, v8 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = sdiv <4 x i32> , %x + %ext = extractelement <4 x i32> %bo, i32 1 + ret i32 %ext +} + +define i32 @ext_udiv_v4i32(<4 x i32> %x) { +; CHECK-LABEL: ext_udiv_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = sdiv <4 x i32> %x, + %ext = extractelement <4 x i32> %bo, i32 0 + ret i32 %ext +} + +define i32 @ext_udiv_v4i32_constant_op0(<4 x i32> %x) { +; CHECK-LABEL: ext_udiv_v4i32_constant_op0: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI41_0) +; CHECK-NEXT: addi a0, a0, %lo(.LCPI41_0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vdiv.vv v8, v9, v8 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: ret + %bo = sdiv <4 x i32> , %x + %ext = extractelement <4 x i32> %bo, i32 1 + ret i32 %ext +} + +define float @ext_fadd_v4f32(<4 x float> %x) { +; CHECK-LABEL: ext_fadd_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI42_0) +; CHECK-NEXT: flw ft0, %lo(.LCPI42_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, ft0 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 2 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %bo = fadd <4 x float> %x, + %ext = extractelement <4 x float> %bo, i32 2 + ret float %ext +} + +define float @ext_fsub_v4f32(<4 x float> %x) { +; CHECK-LABEL: ext_fsub_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI43_0) +; CHECK-NEXT: flw ft0, %lo(.LCPI43_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, ft0 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %bo = fsub <4 x float> , %x + %ext = extractelement <4 x float> %bo, i32 1 + ret float %ext +} + +define float @ext_fmul_v4f32(<4 x float> %x) { +; CHECK-LABEL: ext_fmul_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI44_0) +; CHECK-NEXT: flw ft0, %lo(.LCPI44_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, ft0 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 3 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %bo = fmul <4 x float> %x, + %ext = extractelement <4 x float> %bo, i32 3 + ret float %ext +} + +define float @ext_fdiv_v4f32(<4 x float> %x) { +; CHECK-LABEL: ext_fdiv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %bo = fdiv <4 x float> %x, + %ext = extractelement <4 x float> %bo, i32 0 + ret float %ext +} + +define float @ext_fdiv_v4f32_constant_op0(<4 x float> %x) { +; CHECK-LABEL: ext_fdiv_v4f32_constant_op0: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI46_0) +; CHECK-NEXT: flw ft0, %lo(.LCPI46_0)(a0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, ft0 +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vslidedown.vi v8, v8, 1 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret + %bo = fdiv <4 x float> , %x + %ext = extractelement <4 x float> %bo, i32 1 + ret float %ext +}