diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -10877,7 +10877,7 @@ if (!Ld || Ld->getExtensionType() != ISD::NON_EXTLOAD) return SDValue(); - if (!TLI.isLoadExtLegal(ExtLoadType, VT, Ld->getValueType(0))) + if (!TLI.isLoadExtLegalOrCustom(ExtLoadType, VT, Ld->getValueType(0))) return SDValue(); if (!TLI.isVectorLoadExtDesirable(SDValue(N, 0))) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -18090,9 +18090,6 @@ SDValue Op, SelectionDAG &DAG) const { auto Load = cast(Op); - if (Load->getExtensionType() != ISD::LoadExtType::NON_EXTLOAD) - return SDValue(); - SDLoc DL(Op); EVT VT = Op.getValueType(); EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-loads.ll @@ -253,10 +253,12 @@ ; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG0]]/z, #-1 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8] +; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].h, [[PG2]]/z, [[Z0]].h, #0 +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].h }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <32 x i8>, <32 x i8>* %ap %b = load <32 x i8>, <32 x i8>* %bp @@ -270,14 +272,13 @@ ; CHECK-LABEL: masked_load_sext_v16i8i32: ; VBITS_GE_512: ldr q0, [x0] ; VBITS_GE_512-NEXT: ldr q1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl16 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].s, vl16 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b ; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0 +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_512: ret %a = load <16 x i8>, <16 x i8>* %ap %b = load <16 x i8>, <16 x i8>* %bp @@ -291,15 +292,14 @@ ; CHECK-LABEL: masked_load_sext_v8i8i64: ; VBITS_GE_512: ldr d0, [x0] ; VBITS_GE_512-NEXT: ldr d1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl8 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b -; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b ; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h ; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0 +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i8>, <8 x i8>* %ap %b = load <8 x i8>, <8 x i8>* %bp @@ -315,10 +315,12 @@ ; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8] +; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].s, [[PG2]]/z, [[Z0]].s, #0 +; VBITS_GE_512-NEXT: ld1sh { [[Z0]].s }, [[PG3]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <16 x i16>, <16 x i16>* %ap %b = load <16 x i16>, <16 x i16>* %bp @@ -332,14 +334,13 @@ ; CHECK-LABEL: masked_load_sext_v8i16i64: ; VBITS_GE_512: ldr q0, [x0] ; VBITS_GE_512-NEXT: ldr q1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h -; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0 -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h ; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0 +; VBITS_GE_512-NEXT: ld1sh { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i16>, <8 x i16>* %ap %b = load <8 x i16>, <8 x i16>* %bp @@ -355,10 +356,12 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG0]]/z, #-1 ; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8] +; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].d, [[PG2]]/z, [[Z0]].d, #0 +; VBITS_GE_512-NEXT: ld1sw { [[Z0]].d }, [[PG3]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i32>, <8 x i32>* %ap %b = load <8 x i32>, <8 x i32>* %bp @@ -374,11 +377,14 @@ ; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].b }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1b { [[Z1:z[0-9]+]].b }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, [[Z1]].b -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: mov [[Z0]].b, [[PG0]]/z, #-1 +; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].h, vl32 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG1]], [x8] +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].h, [[PG2]]/z, [[Z0]].h, #0 +; VBITS_GE_512-NEXT: ld1b { [[Z0]].h }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] ; VBITS_GE_512-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %ap %b = load <32 x i8>, <32 x i8>* %bp %mask = icmp eq <32 x i8> %a, %b @@ -391,14 +397,13 @@ ; CHECK-LABEL: masked_load_zext_v16i8i32: ; VBITS_GE_512: ldr q0, [x0] ; VBITS_GE_512-NEXT: ldr q1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl16 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].s, vl16 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b -; VBITS_GE_512-NEXT: cmpne [[PG2:p[0-9]+]].b, [[PG0]]/z, [[Z0]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, [[PG2]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512-NEXT: cmpne [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, #0 +; VBITS_GE_512-NEXT: ld1b { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG0]], [x8] ; VBITS_GE_512: ret %a = load <16 x i8>, <16 x i8>* %ap %b = load <16 x i8>, <16 x i8>* %bp @@ -412,15 +417,14 @@ ; CHECK-LABEL: masked_load_zext_v8i8i64: ; VBITS_GE_512: ldr d0, [x0] ; VBITS_GE_512-NEXT: ldr d1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].b, vl8 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b -; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].b, p0/z, z[[V]].b, #0 -; VBITS_GE_512-NEXT: ld1b { [[Z0]].b }, p[[PG]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].h, [[Z0]].b -; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: sunpklo [[Z0]].h, [[Z0]].b +; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0 +; VBITS_GE_512-NEXT: ld1b { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i8>, <8 x i8>* %ap %b = load <8 x i8>, <8 x i8>* %bp @@ -436,10 +440,12 @@ ; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].h }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: mov [[Z0]].h, [[PG1]]/z, #-1 +; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].s, vl16 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG1]], [x8] +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].s, [[PG2]]/z, [[Z0]].s, #0 +; VBITS_GE_512-NEXT: ld1h { [[Z0]].s }, [[PG3]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <16 x i16>, <16 x i16>* %ap %b = load <16 x i16>, <16 x i16>* %bp @@ -453,14 +459,13 @@ ; CHECK-LABEL: masked_load_zext_v8i16i64: ; VBITS_GE_512: ldr q0, [x0] ; VBITS_GE_512-NEXT: ldr q1, [x1] -; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].h, vl8 +; VBITS_GE_512-NEXT: ptrue [[PG0:p[0-9]+]].d, vl8 ; VBITS_GE_512-NEXT: cmeq v[[V:[0-9]+]].8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h -; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].h, p0/z, z[[V]].h, #0 -; VBITS_GE_512-NEXT: ld1h { [[Z0]].h }, p[[PG]]/z, [x{{[0-9]+}}] -; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].s, [[Z0]].h -; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: sunpklo [[Z0]].s, [[Z0]].h +; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s +; VBITS_GE_512-NEXT: cmpne p[[PG:[0-9]+]].d, p0/z, z[[V]].d, #0 +; VBITS_GE_512-NEXT: ld1h { [[Z0]].d }, p[[PG]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG0]], [x8] ; VBITS_GE_512-NEXT: ret %a = load <8 x i16>, <8 x i16>* %ap %b = load <8 x i16>, <8 x i16>* %bp @@ -476,11 +481,12 @@ ; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].s }, p0/z, [x0] ; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] ; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s -; VBITS_GE_512-NEXT: ld1w { [[Z0]].s }, [[PG1]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: mov [[Z0]].s, [[PG0]]/z, #-1 +; VBITS_GE_512-NEXT: sunpklo [[Z0]].d, [[Z0]].s ; VBITS_GE_512-NEXT: ptrue [[PG2:p[0-9]+]].d, vl8 -; VBITS_GE_512-NEXT: uunpklo [[Z0]].d, [[Z0]].s -; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG1]], [x8] -; VBITS_GE_512-NEXT: ret +; VBITS_GE_512-NEXT: cmpne [[PG3:p[0-9]+]].d, [[PG2]]/z, [[Z0]].d, #0 +; VBITS_GE_512-NEXT: ld1w { [[Z0]].d }, [[PG3]]/z, [x{{[0-9]+}}] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] %a = load <8 x i32>, <8 x i32>* %ap %b = load <8 x i32>, <8 x i32>* %bp %mask = icmp eq <8 x i32> %a, %b @@ -489,6 +495,222 @@ ret <8 x i64> %ext } +define <32 x i16> @masked_load_sext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v32i8i16_m16: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].h }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].h }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %ap + %b = load <32 x i16>, <32 x i16>* %bp + %ae = zext <32 x i8> %a to <32 x i16> + %mask = icmp eq <32 x i16> %ae, %b + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = sext <32 x i8> %load to <32 x i16> + ret <32 x i16> %ext +} + +define <16 x i32> @masked_load_sext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v16i8i32_m32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].s }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512: ret + %a = load <16 x i8>, <16 x i8>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %ae = zext <16 x i8> %a to <16 x i32> + %mask = icmp eq <16 x i32> %ae, %b + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef) + %ext = sext <16 x i8> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_sext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i8i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1sb { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i8> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef) + %ext = sext <8 x i8> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <16 x i32> @masked_load_sext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v16i16i32_m32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s +; VBITS_GE_512-NEXT: ld1sh { [[Z0]].s }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %ae = zext <16 x i16> %a to <16 x i32> + %mask = icmp eq <16 x i32> %ae, %b + %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef) + %ext = sext <16 x i16> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_sext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i16i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1sh { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i16> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef) + %ext = sext <8 x i16> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <8 x i64> @masked_load_sext_v8i32i64_m64(<8 x i32>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_sext_v8i32i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1sw { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i32> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef) + %ext = sext <8 x i32> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <32 x i16> @masked_load_zext_v32i8i16_m16(<32 x i8>* %ap, <32 x i16>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v32i8i16_m16: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].h, vl32 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].h }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1h { [[Z1:z[0-9]+]].h }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].h, [[PG0]]/z, [[Z0]].h, [[Z1]].h +; VBITS_GE_512-NEXT: ld1b { [[Z0]].h }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1h { [[Z0]].h }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <32 x i8>, <32 x i8>* %ap + %b = load <32 x i16>, <32 x i16>* %bp + %ae = zext <32 x i8> %a to <32 x i16> + %mask = icmp eq <32 x i16> %ae, %b + %load = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %ap, i32 8, <32 x i1> %mask, <32 x i8> undef) + %ext = zext <32 x i8> %load to <32 x i16> + ret <32 x i16> %ext +} + +define <16 x i32> @masked_load_zext_v16i8i32_m32(<16 x i8>* %ap, <16 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v16i8i32_m32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s +; VBITS_GE_512-NEXT: ld1b { [[Z0]].s }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <16 x i8>, <16 x i8>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %ae = zext <16 x i8> %a to <16 x i32> + %mask = icmp eq <16 x i32> %ae, %b + %load = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %ap, i32 8, <16 x i1> %mask, <16 x i8> undef) + %ext = zext <16 x i8> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_zext_v8i8i64_m64(<8 x i8>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i8i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1b { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1b { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i8>, <8 x i8>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i8> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %ap, i32 8, <8 x i1> %mask, <8 x i8> undef) + %ext = zext <8 x i8> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <16 x i32> @masked_load_zext_v16i16i32_m32(<16 x i16>* %ap, <16 x i32>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v16i16i32_m32: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].s, vl16 +; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].s }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1w { [[Z1:z[0-9]+]].s }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].s, [[PG0]]/z, [[Z0]].s, [[Z1]].s +; VBITS_GE_512-NEXT: ld1h { [[Z0]].s }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1w { [[Z0]].s }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <16 x i16>, <16 x i16>* %ap + %b = load <16 x i32>, <16 x i32>* %bp + %ae = zext <16 x i16> %a to <16 x i32> + %mask = icmp eq <16 x i32> %ae, %b + %load = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %ap, i32 8, <16 x i1> %mask, <16 x i16> undef) + %ext = zext <16 x i16> %load to <16 x i32> + ret <16 x i32> %ext +} + +define <8 x i64> @masked_load_zext_v8i16i64_m64(<8 x i16>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i16i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1h { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1h { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i16>, <8 x i16>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i16> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %ap, i32 8, <8 x i1> %mask, <8 x i16> undef) + %ext = zext <8 x i16> %load to <8 x i64> + ret <8 x i64> %ext +} + +define <8 x i64> @masked_load_zext_v8i32i64_m64(<8 x i32>* %ap, <8 x i64>* %bp) #0 { +; CHECK-LABEL: masked_load_zext_v8i32i64_m64: +; VBITS_GE_512: ptrue [[PG0:p[0-9]+]].d, vl8 +; VBITS_GE_512-NEXT: ld1w { [[Z0:z[0-9]+]].d }, p0/z, [x0] +; VBITS_GE_512-NEXT: ld1d { [[Z1:z[0-9]+]].d }, p0/z, [x1] +; VBITS_GE_512-NEXT: cmpeq [[PG1:p[0-9]+]].d, [[PG0]]/z, [[Z0]].d, [[Z1]].d +; VBITS_GE_512-NEXT: ld1w { [[Z0]].d }, [[PG3]]/z, [x0] +; VBITS_GE_512-NEXT: st1d { [[Z0]].d }, [[PG2]], [x8] +; VBITS_GE_512-NEXT: ret + %a = load <8 x i32>, <8 x i32>* %ap + %b = load <8 x i64>, <8 x i64>* %bp + %ae = zext <8 x i32> %a to <8 x i64> + %mask = icmp eq <8 x i64> %ae, %b + %load = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %ap, i32 8, <8 x i1> %mask, <8 x i32> undef) + %ext = zext <8 x i32> %load to <8 x i64> + ret <8 x i64> %ext +} + declare <2 x half> @llvm.masked.load.v2f16(<2 x half>*, i32, <2 x i1>, <2 x half>) declare <2 x float> @llvm.masked.load.v2f32(<2 x float>*, i32, <2 x i1>, <2 x float>) declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)