diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -15245,11 +15245,7 @@ SDValue Op = N->getOperand(0); EVT VT = N->getValueType(0); - if (!VT.isFixedLengthVector()) - return SDValue(); - - if (DAG.getTargetLoweringInfo().isTypeLegal(VT) || - !Subtarget->useSVEForFixedLengthVectors()) + if (!VT.isFixedLengthVector() || !Subtarget->useSVEForFixedLengthVectors()) return SDValue(); // In cases where the result of the FP_EXTEND is not legal, it will be @@ -15263,6 +15259,10 @@ // As part of the lowering of FP_EXTEND for fixed length types uunpklo nodes // will be introduced which will then combine with the truncate introduced // after the load. + // + // In cases where the result of the FP_EXTEND is legal, it is still worth + // while to do this, as we will end up folding the integer extend produced by + // the FP_EXTEND lowering into the load. if (ISD::isNormalLoad(Op.getNode())) { LoadSDNode *LD = cast(Op.getNode()); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll @@ -44,10 +44,9 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 { ; CHECK-LABEL: fcvt_v8f16_v8f32: -; CHECK: ldr q[[OP:[0-9]+]], [x0] -; CHECK-NEXT: ptrue [[PG:p[0-9]+]].s, vl8 -; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].s, z[[OP]].h -; CHECK-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[UPK]].h +; CHECK: ptrue [[PG:p[0-9]+]].s, vl8 +; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; CHECK-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].h ; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x1] ; CHECK-NEXT: ret %op1 = load <8 x half>, <8 x half>* %a @@ -58,12 +57,10 @@ define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 { ; CHECK-LABEL: fcvt_v16f16_v16f32: -; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].h, vl16 -; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 -; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h -; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h -; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1] +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].h +; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x1] ; VBITS_GE_512-NEXT: ret ; Ensure sensible type legalisation @@ -83,12 +80,10 @@ define void @fcvt_v32f16_v32f32(<32 x half>* %a, <32 x float>* %b) #0 { ; CHECK-LABEL: fcvt_v32f16_v32f32: -; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl32 -; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0] -; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].s, vl32 -; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h -; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h -; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG1]], [x1] +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32 +; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].h +; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x1] ; VBITS_GE_1024-NEXT: ret %op1 = load <32 x half>, <32 x half>* %a %res = fpext <32 x half> %op1 to <32 x float> @@ -98,12 +93,10 @@ define void @fcvt_v64f16_v64f32(<64 x half>* %a, <64 x float>* %b) #0 { ; CHECK-LABEL: fcvt_v64f16_v64f32: -; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl64 -; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0] -; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s, vl64 -; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].s, [[OP]].h -; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG2]]/m, [[UPK]].h -; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG1]], [x1] +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64 +; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].h +; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x1] ; VBITS_GE_2048-NEXT: ret %op1 = load <64 x half>, <64 x half>* %a %res = fpext <64 x half> %op1 to <64 x float> @@ -138,11 +131,9 @@ define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v4f16_v4f64: -; CHECK: ldr d[[OP:[0-9]+]], [x0] -; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4 -; CHECK-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h -; CHECK-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s -; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].h +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].h ; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; CHECK-NEXT: ret %op1 = load <4 x half>, <4 x half>* %a @@ -153,11 +144,9 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v8f16_v8f64: -; VBITS_GE_512: ldr q[[OP:[0-9]+]], [x0] -; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8 -; VBITS_GE_512-NEXT: uunpklo [[UPK1:z[0-9]+]].s, z[[OP]].h -; VBITS_GE_512-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s -; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK2]].h +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].h ; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_512-NEXT: ret @@ -178,13 +167,10 @@ define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v16f16_v16f64: -; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].h, vl16 -; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0] -; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16 -; VBITS_GE_1024-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h -; VBITS_GE_1024-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK1]].s -; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].h -; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1] +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].h +; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_1024-NEXT: ret %op1 = load <16 x half>, <16 x half>* %a %res = fpext <16 x half> %op1 to <16 x double> @@ -194,13 +180,10 @@ define void @fcvt_v32f16_v32f64(<32 x half>* %a, <32 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v32f16_v32f64: -; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].h, vl32 -; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG1]]/z, [x0] -; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32 -; VBITS_GE_2048-NEXT: uunpklo [[UPK1:z[0-9]+]].s, [[OP]].h -; VBITS_GE_2048-NEXT: uunpklo [[UPK2:z[0-9]+]].d, [[UPK]].s -; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK2]].h -; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1] +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].h +; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_2048-NEXT: ret %op1 = load <32 x half>, <32 x half>* %a %res = fpext <32 x half> %op1 to <32 x double> @@ -232,10 +215,9 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v4f32_v4f64: -; CHECK: ldr q[[OP:[0-9]+]], [x0] -; CHECK-NEXT: ptrue [[PG:p[0-9]+]].d, vl4 -; CHECK-NEXT: uunpklo [[UPK:z[0-9]+]].d, z[[OP]].s -; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[UPK]].s +; CHECK: ptrue [[PG:p[0-9]+]].d, vl4 +; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; CHECK-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].s ; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; CHECK-NEXT: ret %op1 = load <4 x float>, <4 x float>* %a @@ -246,12 +228,10 @@ define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v8f32_v8f64: -; VBITS_GE_512: ptrue [[PG1:p[0-9]+]].s, vl8 -; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0] -; VBITS_GE_512-NEXT: ptrue [[PG:p[0-9]+]].d, vl8 -; VBITS_GE_512-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s -; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG1]]/m, [[UPK]].s -; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1] +; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_512-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].s +; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_512-NEXT: ret ; Ensure sensible type legalisation @@ -271,12 +251,10 @@ define void @fcvt_v16f32_v16f64(<16 x float>* %a, <16 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v16f32_v16f64: -; VBITS_GE_1024: ptrue [[PG1:p[0-9]+]].s, vl16 -; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0] -; VBITS_GE_1024-NEXT: ptrue [[PG2:p[0-9]+]].d, vl16 -; VBITS_GE_1024-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s -; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].s -; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG1]], [x1] +; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16 +; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_1024-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].s +; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_1024-NEXT: ret %op1 = load <16 x float>, <16 x float>* %a %res = fpext <16 x float> %op1 to <16 x double> @@ -286,12 +264,10 @@ define void @fcvt_v32f32_v32f64(<32 x float>* %a, <32 x double>* %b) #0 { ; CHECK-LABEL: fcvt_v32f32_v32f64: -; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl32 -; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0] -; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].d, vl32 -; VBITS_GE_2048-NEXT: uunpklo [[UPK:z[0-9]+]].d, [[OP]].s -; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG2]]/m, [[UPK]].s -; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG1]], [x1] +; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32 +; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0] +; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].s +; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x1] ; VBITS_GE_2048-NEXT: ret %op1 = load <32 x float>, <32 x float>* %a %res = fpext <32 x float> %op1 to <32 x double> @@ -386,7 +362,7 @@ ; VBITS_GE_2048: ptrue [[PG1:p[0-9]+]].s, vl64 ; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG1]]/z, [x0] ; VBITS_GE_2048-NEXT: ptrue [[PG2:p[0-9]+]].s -; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].h, [[PG2]]/m, [[UPK]].s +; VBITS_GE_2048-NEXT: fcvt [[RES:z[0-9]+]].h, [[PG2]]/m, [[OP]].s ; VBITS_GE_2048-NEXT: uzp1 [[RES:z[0-9]+]].h, [[CVT]].h, [[CVT]].h ; VBITS_GE_2048-NEXT: ptrue [[PG3:p[0-9]+]].h, vl64 ; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG3]], [x1]