Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1185,15 +1185,20 @@ } } - for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, - MVT::nxv4f32, MVT::nxv2f64}) { - for (auto InnerVT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, - MVT::nxv2f32, MVT::nxv4f32, MVT::nxv2f64}) { + for (MVT VT : MVT::fp_scalable_vector_valuetypes()) { + for (MVT InnerVT : MVT::fp_scalable_vector_valuetypes()) { // Avoid marking truncating FP stores as legal to prevent the // DAGCombiner from creating unsupported truncating stores. setTruncStoreAction(VT, InnerVT, Expand); + // SVE does not have floating-point extending loads. + setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); } + } + for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32, + MVT::nxv4f32, MVT::nxv2f64}) { setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); setOperationAction(ISD::MGATHER, VT, Custom); Index: llvm/test/CodeGen/AArch64/sve-fpext-load.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-fpext-load.ll @@ -0,0 +1,85 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s + +; fpext -> +define @ext2_f16_f64( *%ptr, i64 %index) { +; CHECK-LABEL: ext2_f16_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] +; CHECK-NEXT: fcvt z0.d, p0/m, z0.h +; CHECK-NEXT: ret + %load = load , * %ptr, align 4 + %load.ext = fpext %load to + ret %load.ext +} + +; fpext -> +define @ext4_f16_f64( *%ptr, i64 %index) { +; CHECK-LABEL: ext4_f16_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpklo z1.d, z0.s +; CHECK-NEXT: uunpkhi z2.d, z0.s +; CHECK-NEXT: fcvt z0.d, p0/m, z1.h +; CHECK-NEXT: fcvt z1.d, p0/m, z2.h +; CHECK-NEXT: ret + %load = load , * %ptr, align 4 + %load.ext = fpext %load to + ret %load.ext +} + +; fpext -> +define @ext8_f16_f64( *%ptr, i64 %index) { +; CHECK-LABEL: ext8_f16_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpklo z1.s, z0.h +; CHECK-NEXT: uunpkhi z0.s, z0.h +; CHECK-NEXT: uunpklo z2.d, z1.s +; CHECK-NEXT: uunpkhi z1.d, z1.s +; CHECK-NEXT: uunpklo z3.d, z0.s +; CHECK-NEXT: uunpkhi z4.d, z0.s +; CHECK-NEXT: fcvt z0.d, p0/m, z2.h +; CHECK-NEXT: fcvt z1.d, p0/m, z1.h +; CHECK-NEXT: fcvt z2.d, p0/m, z3.h +; CHECK-NEXT: fcvt z3.d, p0/m, z4.h +; CHECK-NEXT: ret + %load = load , * %ptr, align 4 + %load.ext = fpext %load to + ret %load.ext +} + +; fpext -> +define @ext2_f32_f64( *%ptr, i64 %index) { +; CHECK-LABEL: ext2_f32_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] +; CHECK-NEXT: fcvt z0.d, p0/m, z0.s +; CHECK-NEXT: ret + %load = load , * %ptr, align 4 + %load.ext = fpext %load to + ret %load.ext +} + +; fpext -> +define @ext4_f32_f64( *%ptr, i64 %index) { +; CHECK-LABEL: ext4_f32_f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: uunpklo z1.d, z0.s +; CHECK-NEXT: uunpkhi z2.d, z0.s +; CHECK-NEXT: fcvt z0.d, p0/m, z1.s +; CHECK-NEXT: fcvt z1.d, p0/m, z2.s +; CHECK-NEXT: ret + %load = load , * %ptr, align 4 + %load.ext = fpext %load to + ret %load.ext +} Index: llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll +++ llvm/test/CodeGen/AArch64/sve-fptrunc-store.ll @@ -60,3 +60,28 @@ store %1, * %dst, align 2 ret void } + +define void @fptrunc8_f64_f16( *%dst, *%src) { +; CHECK-LABEL: fptrunc8_f64_f16: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x1] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x1, #1, mul vl] +; CHECK-NEXT: ld1d { z2.d }, p0/z, [x1, #2, mul vl] +; CHECK-NEXT: ld1d { z3.d }, p0/z, [x1, #3, mul vl] +; CHECK-NEXT: fcvt z0.h, p0/m, z0.d +; CHECK-NEXT: fcvt z1.h, p0/m, z1.d +; CHECK-NEXT: fcvt z2.h, p0/m, z2.d +; CHECK-NEXT: fcvt z3.h, p0/m, z3.d +; CHECK-NEXT: uzp1 z2.s, z2.s, z3.s +; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret +entry: + %0 = load , * %src, align 8 + %1 = fptrunc %0 to + store %1, * %dst, align 2 + ret void +}