diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -1019,6 +1019,30 @@ (!cast(NAME # _D) ZPR64:$Zd, FPR64asZPR:$Dn, 0), 2>; def : InstAlias<"mov $Zd, $Qn", (!cast(NAME # _Q) ZPR128:$Zd, FPR128asZPR:$Qn, 0), 2>; + + // Duplicate extracted element of vector into all vector elements + def : Pat<(nxv16i8 (AArch64dup (i32 (vector_extract (nxv16i8 ZPR:$vec), sve_elm_idx_extdup_b:$index)))), + (!cast(NAME # _B) ZPR:$vec, sve_elm_idx_extdup_b:$index)>; + def : Pat<(nxv8i16 (AArch64dup (i32 (vector_extract (nxv8i16 ZPR:$vec), sve_elm_idx_extdup_h:$index)))), + (!cast(NAME # _H) ZPR:$vec, sve_elm_idx_extdup_h:$index)>; + def : Pat<(nxv4i32 (AArch64dup (i32 (vector_extract (nxv4i32 ZPR:$vec), sve_elm_idx_extdup_s:$index)))), + (!cast(NAME # _S) ZPR:$vec, sve_elm_idx_extdup_s:$index)>; + def : Pat<(nxv2i64 (AArch64dup (i64 (vector_extract (nxv2i64 ZPR:$vec), sve_elm_idx_extdup_d:$index)))), + (!cast(NAME # _D) ZPR:$vec, sve_elm_idx_extdup_d:$index)>; + def : Pat<(nxv8f16 (AArch64dup (f16 (vector_extract (nxv8f16 ZPR:$vec), sve_elm_idx_extdup_h:$index)))), + (!cast(NAME # _H) ZPR:$vec, sve_elm_idx_extdup_h:$index)>; + def : Pat<(nxv8bf16 (AArch64dup (bf16 (vector_extract (nxv8bf16 ZPR:$vec), sve_elm_idx_extdup_h:$index)))), + (!cast(NAME # _H) ZPR:$vec, sve_elm_idx_extdup_h:$index)>; + def : Pat<(nxv4f16 (AArch64dup (f16 (vector_extract (nxv4f16 ZPR:$vec), sve_elm_idx_extdup_s:$index)))), + (!cast(NAME # _S) ZPR:$vec, sve_elm_idx_extdup_s:$index)>; + def : Pat<(nxv2f16 (AArch64dup (f16 (vector_extract (nxv2f16 ZPR:$vec), sve_elm_idx_extdup_d:$index)))), + (!cast(NAME # _D) ZPR:$vec, sve_elm_idx_extdup_d:$index)>; + def : Pat<(nxv4f32 (AArch64dup (f32 (vector_extract (nxv4f32 ZPR:$vec), sve_elm_idx_extdup_s:$index)))), + (!cast(NAME # _S) ZPR:$vec, sve_elm_idx_extdup_s:$index)>; + def : Pat<(nxv2f32 (AArch64dup (f32 (vector_extract (nxv2f32 ZPR:$vec), sve_elm_idx_extdup_d:$index)))), + (!cast(NAME # _D) ZPR:$vec, sve_elm_idx_extdup_d:$index)>; + def : Pat<(nxv2f64 (AArch64dup (f64 (vector_extract (nxv2f64 ZPR:$vec), sve_elm_idx_extdup_d:$index)))), + (!cast(NAME # _D) ZPR:$vec, sve_elm_idx_extdup_d:$index)>; } class sve_int_perm_tbl sz8_64, bits<2> opc, string asm, ZPRRegOp zprty, diff --git a/llvm/test/CodeGen/AArch64/aarch64-dup-extract-scalable.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-extract-scalable.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/aarch64-dup-extract-scalable.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple aarch64-none-linux-gnu -mattr=+sve | FileCheck %s + +define @dup_extract_i8( %data) { +; CHECK-LABEL: dup_extract_i8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.b, z0.b[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i8 1 + %.splatinsert = insertelement poison, i8 %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_i16( %data) { +; CHECK-LABEL: dup_extract_i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i16 1 + %.splatinsert = insertelement poison, i16 %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_i32( %data) { +; CHECK-LABEL: dup_extract_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i32 1 + %.splatinsert = insertelement poison, i32 %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_i64( %data) { +; CHECK-LABEL: dup_extract_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i64 1 + %.splatinsert = insertelement poison, i64 %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f16( %data) { +; CHECK-LABEL: dup_extract_f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i16 1 + %.splatinsert = insertelement poison, half %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f16_4( %data) { +; CHECK-LABEL: dup_extract_f16_4: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i16 1 + %.splatinsert = insertelement poison, half %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f16_2( %data) { +; CHECK-LABEL: dup_extract_f16_2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i16 1 + %.splatinsert = insertelement poison, half %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_bf16( %data) #0 { +; CHECK-LABEL: dup_extract_bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.h, z0.h[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i16 1 + %.splatinsert = insertelement poison, bfloat %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f32( %data) { +; CHECK-LABEL: dup_extract_f32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.s, z0.s[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i32 1 + %.splatinsert = insertelement poison, float %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f32_2( %data) { +; CHECK-LABEL: dup_extract_f32_2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i32 1 + %.splatinsert = insertelement poison, float %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +define @dup_extract_f64( %data) { +; CHECK-LABEL: dup_extract_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z0.d[1] +; CHECK-NEXT: ret + %1 = extractelement %data, i64 1 + %.splatinsert = insertelement poison, double %1, i32 0 + %.splat = shufflevector %.splatinsert, poison, zeroinitializer + ret %.splat +} + +; +bf16 is required for the bfloat version. +attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll --- a/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll +++ b/llvm/test/CodeGen/AArch64/sve-ld-post-inc.ll @@ -29,8 +29,8 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: add x8, x0, x2, lsl #3 -; CHECK-NEXT: mov z0.d, d0 ; CHECK-NEXT: str x8, [x1] +; CHECK-NEXT: mov z0.d, d0 ; CHECK-NEXT: ret %load = load double, double* %a %dup = call @llvm.aarch64.sve.dup.x.nxv2f64(double %load)