diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -15268,11 +15268,7 @@ SDValue Op = N->getOperand(0); EVT VT = N->getValueType(0); - if (!VT.isFixedLengthVector()) - return SDValue(); - - if (DAG.getTargetLoweringInfo().isTypeLegal(VT) || - !Subtarget->useSVEForFixedLengthVectors()) + if (!VT.isFixedLengthVector() || !Subtarget->useSVEForFixedLengthVectors()) return SDValue(); // In cases where the result of the FP_EXTEND is not legal, it will be @@ -15286,6 +15282,10 @@ // As part of the lowering of FP_EXTEND for fixed length types uunpklo nodes // will be introduced which will then combine with the truncate introduced // after the load. + // + // In cases where the result of the FP_EXTEND is legal, it is still worth + // while to do this, as we will end up folding the integer extend produced by + // the FP_EXTEND lowering into the load. if (ISD::isNormalLoad(Op.getNode())) { LoadSDNode *LD = cast(Op.getNode()); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll @@ -48,9 +48,8 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 { ; CHECK-LABEL: fcvt_v8f16_v8f32: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ptrue p0.s, vl8 -; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0] ; CHECK-NEXT: fcvt z0.s, p0/m, z0.h ; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret @@ -76,10 +75,8 @@ ; ; VBITS_GE_512-LABEL: fcvt_v16f16_v16f32: ; VBITS_GE_512: // %bb.0: -; VBITS_GE_512-NEXT: ptrue p0.h, vl16 -; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 -; VBITS_GE_512-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_512-NEXT: ld1h { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: fcvt z0.s, p0/m, z0.h ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1] ; VBITS_GE_512-NEXT: ret @@ -92,10 +89,8 @@ define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 { ; VBITS_GE_1024-LABEL: fcvt_v32f16_v32f32: ; VBITS_GE_1024: // %bb.0: -; VBITS_GE_1024-NEXT: ptrue p0.h, vl32 -; VBITS_GE_1024-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_1024-NEXT: ptrue p0.s, vl32 -; VBITS_GE_1024-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_1024-NEXT: ld1h { z0.s }, p0/z, [x0] ; VBITS_GE_1024-NEXT: fcvt z0.s, p0/m, z0.h ; VBITS_GE_1024-NEXT: st1w { z0.s }, p0, [x1] ; VBITS_GE_1024-NEXT: ret @@ -108,10 +103,8 @@ define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 { ; VBITS_GE_2048-LABEL: fcvt_v64f16_v64f32: ; VBITS_GE_2048: // %bb.0: -; VBITS_GE_2048-NEXT: ptrue p0.h, vl64 -; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 -; VBITS_GE_2048-NEXT: uunpklo z0.s, z0.h +; VBITS_GE_2048-NEXT: ld1h { z0.s }, p0/z, [x0] ; VBITS_GE_2048-NEXT: fcvt z0.s, p0/m, z0.h ; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x1] ; VBITS_GE_2048-NEXT: ret @@ -153,10 +146,8 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v4f16_v4f64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr d0, [x0] ; CHECK-NEXT: ptrue p0.d, vl4 -; CHECK-NEXT: uunpklo z0.s, z0.h -; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0] ; CHECK-NEXT: fcvt z0.d, p0/m, z0.h ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret @@ -182,10 +173,8 @@ ; ; VBITS_GE_512-LABEL: fcvt_v8f16_v8f64: ; VBITS_GE_512: // %bb.0: -; VBITS_GE_512-NEXT: ldr q0, [x0] ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 -; VBITS_GE_512-NEXT: uunpklo z0.s, z0.h -; VBITS_GE_512-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_512-NEXT: ld1h { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.h ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_512-NEXT: ret @@ -198,11 +187,8 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 { ; VBITS_GE_1024-LABEL: fcvt_v16f16_v16f64: ; VBITS_GE_1024: // %bb.0: -; VBITS_GE_1024-NEXT: ptrue p0.h, vl16 -; VBITS_GE_1024-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_1024-NEXT: ptrue p0.d, vl16 -; VBITS_GE_1024-NEXT: uunpklo z0.s, z0.h -; VBITS_GE_1024-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_1024-NEXT: ld1h { z0.d }, p0/z, [x0] ; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.h ; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_1024-NEXT: ret @@ -215,11 +201,8 @@ define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 { ; VBITS_GE_2048-LABEL: fcvt_v32f16_v32f64: ; VBITS_GE_2048: // %bb.0: -; VBITS_GE_2048-NEXT: ptrue p0.h, vl32 -; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x0] ; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 -; VBITS_GE_2048-NEXT: uunpklo z0.s, z0.h -; VBITS_GE_2048-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_2048-NEXT: ld1h { z0.d }, p0/z, [x0] ; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.h ; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_2048-NEXT: ret @@ -257,9 +240,8 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v4f32_v4f64: ; CHECK: // %bb.0: -; CHECK-NEXT: ldr q0, [x0] ; CHECK-NEXT: ptrue p0.d, vl4 -; CHECK-NEXT: uunpklo z0.d, z0.s +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0] ; CHECK-NEXT: fcvt z0.d, p0/m, z0.s ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret @@ -285,10 +267,8 @@ ; ; VBITS_GE_512-LABEL: fcvt_v8f32_v8f64: ; VBITS_GE_512: // %bb.0: -; VBITS_GE_512-NEXT: ptrue p0.s, vl8 -; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 -; VBITS_GE_512-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_512-NEXT: ld1w { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: fcvt z0.d, p0/m, z0.s ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_512-NEXT: ret @@ -301,10 +281,8 @@ define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 { ; VBITS_GE_1024-LABEL: fcvt_v16f32_v16f64: ; VBITS_GE_1024: // %bb.0: -; VBITS_GE_1024-NEXT: ptrue p0.s, vl16 -; VBITS_GE_1024-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_1024-NEXT: ptrue p0.d, vl16 -; VBITS_GE_1024-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_1024-NEXT: ld1w { z0.d }, p0/z, [x0] ; VBITS_GE_1024-NEXT: fcvt z0.d, p0/m, z0.s ; VBITS_GE_1024-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_1024-NEXT: ret @@ -317,10 +295,8 @@ define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 { ; VBITS_GE_2048-LABEL: fcvt_v32f32_v32f64: ; VBITS_GE_2048: // %bb.0: -; VBITS_GE_2048-NEXT: ptrue p0.s, vl32 -; VBITS_GE_2048-NEXT: ld1w { z0.s }, p0/z, [x0] ; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 -; VBITS_GE_2048-NEXT: uunpklo z0.d, z0.s +; VBITS_GE_2048-NEXT: ld1w { z0.d }, p0/z, [x0] ; VBITS_GE_2048-NEXT: fcvt z0.d, p0/m, z0.s ; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_2048-NEXT: ret