diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -15733,6 +15733,22 @@ return SDValue(); } +static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) { + // sunpklo (mov z, p/z, -1) => mov z, (punpklo p), -1 + if (N->getOperand(0).getOpcode() == ISD::SIGN_EXTEND && + N->getOperand(0)->getOperand(0)->getValueType(0).getScalarType() == + MVT::i1) { + SDValue CC = N->getOperand(0)->getOperand(0); + auto VT = CC->getValueType(0).getHalfNumVectorElementsVT(*DAG.getContext()); + SDValue Unpk = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, CC, + DAG.getVectorIdxConstant(0, SDLoc(N))); + SDValue Sext = DAG.getSExtOrTrunc(Unpk, SDLoc(N), N->getValueType(0)); + return Sext; + } + + return SDValue(); +} + /// Target-specific DAG combine function for post-increment LD1 (lane) and /// post-increment LD1R. static SDValue performPostLD1Combine(SDNode *N, @@ -16410,6 +16426,22 @@ LHS->getOperand(0)->getOperand(0) == Pred) return LHS->getOperand(0); + // setcc_merge_zero pred + // (sign_extend (punpklo (setcc_merge_zero ... pred ...))), 0, ne + // => punpklo (inner setcc_merge_zero) + if (Cond == ISD::SETNE && isZerosVector(RHS.getNode()) && + LHS->getOpcode() == ISD::SIGN_EXTEND && + LHS->getOperand(0)->getValueType(0) == N->getValueType(0) && + LHS->getOperand(0)->getOpcode() == ISD::EXTRACT_SUBVECTOR && + LHS->getOperand(0).getValueType().getScalarType() == MVT::i1 && + LHS->getOperand(0)->getConstantOperandVal(1) == 0) { + auto OrigPred = LHS->getOperand(0)->getOperand(0)->getOperand(0); + if (Pred.getOpcode() == AArch64ISD::PTRUE && + OrigPred.getOpcode() == AArch64ISD::PTRUE && + Pred.getConstantOperandVal(0) == OrigPred.getConstantOperandVal(0)) + return LHS->getOperand(0); + } + return SDValue(); } @@ -17321,6 +17353,8 @@ case AArch64ISD::VASHR: case AArch64ISD::VLSHR: return performVectorShiftCombine(N, *this, DCI); + case AArch64ISD::SUNPKLO: + return performSunpkloCombine(N, DAG); case ISD::INSERT_VECTOR_ELT: return performInsertVectorEltCombine(N, DCI); case ISD::EXTRACT_VECTOR_ELT: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll @@ -268,11 +268,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.b, vl32 ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.b, p0/z, z0.b, #0 -; VBITS_GE_512-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1sb { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 -; VBITS_GE_512-NEXT: sunpklo z0.h, z0.b -; VBITS_GE_512-NEXT: cmpne p1.h, p0/z, z0.h, #0 -; VBITS_GE_512-NEXT: ld1sb { z0.h }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <32 x i8>, <32 x i8>* %bp @@ -327,11 +325,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.h, vl16 ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.h, p0/z, z0.h, #0 -; VBITS_GE_512-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1sh { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 -; VBITS_GE_512-NEXT: sunpklo z0.s, z0.h -; VBITS_GE_512-NEXT: cmpne p1.s, p0/z, z0.s, #0 -; VBITS_GE_512-NEXT: ld1sh { z0.s }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <16 x i16>, <16 x i16>* %bp @@ -366,11 +362,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.s, vl8 ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.s, p0/z, z0.s, #0 -; VBITS_GE_512-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1sw { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 -; VBITS_GE_512-NEXT: sunpklo z0.d, z0.s -; VBITS_GE_512-NEXT: cmpne p1.d, p0/z, z0.d, #0 -; VBITS_GE_512-NEXT: ld1sw { z0.d }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <8 x i32>, <8 x i32>* %bp @@ -386,11 +380,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.b, vl32 ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.b, p0/z, z0.b, #0 -; VBITS_GE_512-NEXT: mov z0.b, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1b { z0.h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.h, vl32 -; VBITS_GE_512-NEXT: sunpklo z0.h, z0.b -; VBITS_GE_512-NEXT: cmpne p1.h, p0/z, z0.h, #0 -; VBITS_GE_512-NEXT: ld1b { z0.h }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <32 x i8>, <32 x i8>* %bp @@ -445,11 +437,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.h, vl16 ; VBITS_GE_512-NEXT: ld1h { z0.h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.h, p0/z, z0.h, #0 -; VBITS_GE_512-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1h { z0.s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.s, vl16 -; VBITS_GE_512-NEXT: sunpklo z0.s, z0.h -; VBITS_GE_512-NEXT: cmpne p1.s, p0/z, z0.s, #0 -; VBITS_GE_512-NEXT: ld1h { z0.s }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <16 x i16>, <16 x i16>* %bp @@ -484,11 +474,9 @@ ; VBITS_GE_512-NEXT: ptrue p0.s, vl8 ; VBITS_GE_512-NEXT: ld1w { z0.s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq p0.s, p0/z, z0.s, #0 -; VBITS_GE_512-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; VBITS_GE_512-NEXT: punpklo p0.h, p0.b +; VBITS_GE_512-NEXT: ld1w { z0.d }, p0/z, [x0] ; VBITS_GE_512-NEXT: ptrue p0.d, vl8 -; VBITS_GE_512-NEXT: sunpklo z0.d, z0.s -; VBITS_GE_512-NEXT: cmpne p1.d, p0/z, z0.d, #0 -; VBITS_GE_512-NEXT: ld1w { z0.d }, p1/z, [x0] ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x8] ; VBITS_GE_512-NEXT: ret %b = load <8 x i32>, <8 x i32>* %bp @@ -690,6 +678,230 @@ ret <8 x i64> %ext } +define <128 x i16> @masked_load_sext_v128i8i16(<128 x i8>* %ap, <128 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v128i8i16: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl128 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sb { z0.h }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.h, vl128 +; VBITS_GE_2048-NEXT: st1h { z0.h }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <128 x i8>, <128 x i8>* %bp + %mask = icmp eq <128 x i8> %b, zeroinitializer + %load = call <128 x i8> @llvm.masked.load.v128i8(<128 x i8>* %ap, i32 8, <128 x i1> %mask, <128 x i8> undef) + %ext = sext <128 x i8> %load to <128 x i16> + ret <128 x i16> %ext +} + +define <64 x i32> @masked_load_sext_v64i8i32(<64 x i8>* %ap, <64 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v64i8i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl64 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sb { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <64 x i8>, <64 x i8>* %bp + %mask = icmp eq <64 x i8> %b, zeroinitializer + %load = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %ap, i32 8, <64 x i1> %mask, <64 x i8> undef) + %ext = sext <64 x i8> %load to <64 x i32> + ret <64 x i32> %ext +} + +define <32 x i64> @masked_load_sext_v32i8i64(<32 x i8>* %ap, <32 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v32i8i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl32 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sb { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i8>, <32 x i8>* %bp + %mask = icmp eq <32 x i8> %b, zeroinitializer + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = sext <32 x i8> %load to <32 x i64> + ret <32 x i64> %ext +} + +define <64 x i32> @masked_load_sext_v64i16i32(<64 x i16>* %ap, <64 x i16>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v64i16i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.h, vl64 +; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.h, p0/z, z0.h, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sh { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <64 x i16>, <64 x i16>* %bp + %mask = icmp eq <64 x i16> %b, zeroinitializer + %load = call <64 x i16> @llvm.masked.load.v64i16(<64 x i16>* %ap, i32 8, <64 x i1> %mask, <64 x i16> undef) + %ext = sext <64 x i16> %load to <64 x i32> + ret <64 x i32> %ext +} + +define <32 x i64> @masked_load_sext_v32i16i64(<32 x i16>* %ap, <32 x i16>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v32i16i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.h, vl32 +; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.h, p0/z, z0.h, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sh { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i16>, <32 x i16>* %bp + %mask = icmp eq <32 x i16> %b, zeroinitializer + %load = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %ap, i32 8, <32 x i1> %mask, <32 x i16> undef) + %ext = sext <32 x i16> %load to <32 x i64> + ret <32 x i64> %ext +} + +define <32 x i64> @masked_load_sext_v32i32i64(<32 x i32>* %ap, <32 x i32>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_sext_v32i32i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.s, vl32 +; VBITS_GE_2048-NEXT: ld1w { z0.s }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.s, p0/z, z0.s, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1sw { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i32>, <32 x i32>* %bp + %mask = icmp eq <32 x i32> %b, zeroinitializer + %load = call <32 x i32> @llvm.masked.load.v32i32(<32 x i32>* %ap, i32 8, <32 x i1> %mask, <32 x i32> undef) + %ext = sext <32 x i32> %load to <32 x i64> + ret <32 x i64> %ext +} + +define <128 x i16> @masked_load_zext_v128i8i16(<128 x i8>* %ap, <128 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v128i8i16: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl128 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1b { z0.h }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.h, vl128 +; VBITS_GE_2048-NEXT: st1h { z0.h }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <128 x i8>, <128 x i8>* %bp + %mask = icmp eq <128 x i8> %b, zeroinitializer + %load = call <128 x i8> @llvm.masked.load.v128i8(<128 x i8>* %ap, i32 8, <128 x i1> %mask, <128 x i8> undef) + %ext = zext <128 x i8> %load to <128 x i16> + ret <128 x i16> %ext +} + +define <64 x i32> @masked_load_zext_v64i8i32(<64 x i8>* %ap, <64 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v64i8i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl64 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1b { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <64 x i8>, <64 x i8>* %bp + %mask = icmp eq <64 x i8> %b, zeroinitializer + %load = call <64 x i8> @llvm.masked.load.v64i8(<64 x i8>* %ap, i32 8, <64 x i1> %mask, <64 x i8> undef) + %ext = zext <64 x i8> %load to <64 x i32> + ret <64 x i32> %ext +} + +define <32 x i64> @masked_load_zext_v32i8i64(<32 x i8>* %ap, <32 x i8>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v32i8i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.b, vl32 +; VBITS_GE_2048-NEXT: ld1b { z0.b }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1b { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i8>, <32 x i8>* %bp + %mask = icmp eq <32 x i8> %b, zeroinitializer + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = zext <32 x i8> %load to <32 x i64> + ret <32 x i64> %ext +} + +define <64 x i32> @masked_load_zext_v64i16i32(<64 x i16>* %ap, <64 x i16>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v64i16i32: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.h, vl64 +; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.h, p0/z, z0.h, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1h { z0.s }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.s, vl64 +; VBITS_GE_2048-NEXT: st1w { z0.s }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <64 x i16>, <64 x i16>* %bp + %mask = icmp eq <64 x i16> %b, zeroinitializer + %load = call <64 x i16> @llvm.masked.load.v64i16(<64 x i16>* %ap, i32 8, <64 x i1> %mask, <64 x i16> undef) + %ext = zext <64 x i16> %load to <64 x i32> + ret <64 x i32> %ext +} + +define <32 x i64> @masked_load_zext_v32i16i64(<32 x i16>* %ap, <32 x i16>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v32i16i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.h, vl32 +; VBITS_GE_2048-NEXT: ld1h { z0.h }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.h, p0/z, z0.h, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1h { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i16>, <32 x i16>* %bp + %mask = icmp eq <32 x i16> %b, zeroinitializer + %load = call <32 x i16> @llvm.masked.load.v32i16(<32 x i16>* %ap, i32 8, <32 x i1> %mask, <32 x i16> undef) + %ext = zext <32 x i16> %load to <32 x i64> + ret <32 x i64> %ext +} + +define <32 x i64> @masked_load_zext_v32i32i64(<32 x i32>* %ap, <32 x i32>* %bp) #0 { +; VBITS_GE_2048-LABEL: masked_load_zext_v32i32i64: +; VBITS_GE_2048: // %bb.0: +; VBITS_GE_2048-NEXT: ptrue p0.s, vl32 +; VBITS_GE_2048-NEXT: ld1w { z0.s }, p0/z, [x1] +; VBITS_GE_2048-NEXT: cmpeq p0.s, p0/z, z0.s, #0 +; VBITS_GE_2048-NEXT: punpklo p0.h, p0.b +; VBITS_GE_2048-NEXT: ld1w { z0.d }, p0/z, [x0] +; VBITS_GE_2048-NEXT: ptrue p0.d, vl32 +; VBITS_GE_2048-NEXT: st1d { z0.d }, p0, [x8] +; VBITS_GE_2048-NEXT: ret + %b = load <32 x i32>, <32 x i32>* %bp + %mask = icmp eq <32 x i32> %b, zeroinitializer + %load = call <32 x i32> @llvm.masked.load.v32i32(<32 x i32>* %ap, i32 8, <32 x i1> %mask, <32 x i32> undef) + %ext = zext <32 x i32> %load to <32 x i64> + ret <32 x i64> %ext +} + declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>) declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>) declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>) @@ -698,6 +910,7 @@ declare <32 x float> @llvm.masked.load.v32f32(<32 x float>*, i32, <32 x i1>, <32 x float>) declare <64 x float> @llvm.masked.load.v64f32(<64 x float>*, i32, <64 x i1>, <64 x float>) +declare <128 x i8> @llvm.masked.load.v128i8(<128 x i8>*, i32, <128 x i1>, <128 x i8>) declare <64 x i8> @llvm.masked.load.v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>) declare <32 x i8> @llvm.masked.load.v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>) declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>) @@ -705,7 +918,9 @@ declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>) declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>) declare <8 x i32> @llvm.masked.load.v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) +declare <32 x i32> @llvm.masked.load.v32i32(<32 x i32>*, i32, <32 x i1>, <32 x i32>) declare <32 x i16> @llvm.masked.load.v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>) +declare <64 x i16> @llvm.masked.load.v64i16(<64 x i16>*, i32, <64 x i1>, <64 x i16>) declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>) declare <8 x i64> @llvm.masked.load.v8i64(<8 x i64>*, i32, <8 x i1>, <8 x i64>) declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) diff --git a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s | FileCheck %s +target triple = "aarch64-unknown-linux-gnu" + +define @masked_load_sext_i8i16(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.8x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv8i16(i16 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv8i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + + +define @masked_load_sext_i8i16_pture_vl(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i16_pture_vl: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p0.h, vl32 +; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.8x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv8i16(i16 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv8i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +define @masked_load_sext_i8i16_pfalse(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i16_pfalse: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -32 +; CHECK-NEXT: mov x20, x0 +; CHECK-NEXT: mov w0, #11 +; CHECK-NEXT: mov x19, x1 +; CHECK-NEXT: bl llvm.aarch64.sve.pfalse.nxv16i1 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x19] +; CHECK-NEXT: ptrue p1.h, vl32 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.h, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.h, p1/z, z0.h, #0 +; CHECK-NEXT: ld1sb { z0.h }, p0/z, [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.pfalse.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.8x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv8i16(i16 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv8i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +define @masked_load_sext_i8i32(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.4x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv4i32(i32 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv4i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + + +define @masked_load_sext_i8i32_pture_vl(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i32_pture_vl: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p0.s, vl32 +; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0 +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.4x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv4i32(i32 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv4i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +define @masked_load_sext_i8i32_pfalse(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i32_pfalse: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -32 +; CHECK-NEXT: mov x20, x0 +; CHECK-NEXT: mov w0, #11 +; CHECK-NEXT: mov x19, x1 +; CHECK-NEXT: bl llvm.aarch64.sve.pfalse.nxv16i1 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x19] +; CHECK-NEXT: ptrue p1.s, vl32 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.s, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.s, p1/z, z0.s, #0 +; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.pfalse.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.4x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv4i32(i32 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv4i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +define @masked_load_sext_i8i64(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl32 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.2x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv2i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + + +define @masked_load_sext_i8i64_pture_vl(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i64_pture_vl: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b, vl64 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x1] +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: ptrue p0.d, vl32 +; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.2x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv2i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +define @masked_load_sext_i8i64_pfalse(i8* %ap, i8* %bp) #0 { +; CHECK-LABEL: masked_load_sext_i8i64_pfalse: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill +; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset w19, -8 +; CHECK-NEXT: .cfi_offset w20, -16 +; CHECK-NEXT: .cfi_offset w30, -32 +; CHECK-NEXT: mov x20, x0 +; CHECK-NEXT: mov w0, #11 +; CHECK-NEXT: mov x19, x1 +; CHECK-NEXT: bl llvm.aarch64.sve.pfalse.nxv16i1 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x19] +; CHECK-NEXT: ptrue p1.d, vl32 +; CHECK-NEXT: cmpeq p0.b, p0/z, z0.b, #0 +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: punpklo p0.h, p0.b +; CHECK-NEXT: mov z0.d, p0/z, #-1 // =0xffffffffffffffff +; CHECK-NEXT: cmpne p0.d, p1/z, z0.d, #0 +; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x20] +; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload +; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload +; CHECK-NEXT: ret + %p0 = call @llvm.aarch64.sve.pfalse.nxv16i1(i32 11) + %b = call @llvm.aarch64.sve.ld1.nxv16i8( %p0, i8* %bp) + %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) + %extract = call @llvm.experimental.vector.extract.2x16( %cmp, i64 0) + %ext1 = sext %extract to + %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) + %dup = call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) + %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) + %a = call @llvm.aarch64.sve.ld1.nxv2i8( %cmp1, i8* %ap) + %ext = sext %a to + ret %ext +} + +declare @llvm.aarch64.sve.pfalse.nxv16i1(i32) +declare @llvm.aarch64.sve.cmpeq.nxv16i8(, , ) + +declare @llvm.aarch64.sve.ptrue.nxv16i1(i32) +declare @llvm.aarch64.sve.ptrue.nxv8i1(i32) +declare @llvm.aarch64.sve.ptrue.nxv4i1(i32) +declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) + +declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv8i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv4i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv2i8(, i8*) + +declare @llvm.experimental.vector.extract.8x16(, i64) +declare @llvm.experimental.vector.extract.4x16(, i64) +declare @llvm.experimental.vector.extract.2x16(, i64) + +declare @llvm.aarch64.sve.cmpne.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpne.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpne.nxv2i64(, , ) + + +declare @llvm.aarch64.sve.dup.x.nxv8i16(i16) +declare @llvm.aarch64.sve.dup.x.nxv4i32(i32) +declare @llvm.aarch64.sve.dup.x.nxv2i64(i64) + + +attributes #0 = { "target-features"="+sve" }