diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -371,8 +371,8 @@ unsigned Opc_rr, unsigned Opc_ri, bool IsIntr = false); void SelectContiguousMultiVectorLoad(SDNode *N, unsigned NumVecs, - unsigned Scale, unsigned Opc_rr, - unsigned Opc_ri); + unsigned Scale, unsigned Opc_ri, + unsigned Opc_rr); void SelectDestructiveMultiIntrinsic(SDNode *N, unsigned NumVecs, bool IsZmMulti, unsigned Opcode, bool HasPred = false); @@ -1792,10 +1792,12 @@ EVT VT = N->getValueType(0); SDValue Chain = N->getOperand(0); - // Use simplest addressing mode for now - base + 0 offset SDValue PNg = N->getOperand(2); SDValue Base = N->getOperand(3); SDValue Offset = CurDAG->getTargetConstant(0, DL, MVT::i64); + unsigned Opc; + std::tie(Opc, Base, Offset) = + findAddrModeSVELoadStore(N, Opc_rr, Opc_ri, Base, Offset, Scale); SDValue Ops[] = {PNg, // Predicate-as-counter Base, // Memory operand @@ -1803,7 +1805,7 @@ const EVT ResTys[] = {MVT::Untyped, MVT::Other}; - SDNode *Load = CurDAG->getMachineNode(Opc_ri, DL, ResTys, Ops); + SDNode *Load = CurDAG->getMachineNode(Opc, DL, ResTys, Ops); SDValue SuperReg = SDValue(Load, 0); for (unsigned i = 0; i < NumVecs; ++i) ReplaceUses(SDValue(N, i), CurDAG->getTargetExtractSubreg( diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll --- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll +++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-loads.ll @@ -20,6 +20,23 @@ ret { , } %res } +define { , } @ld1_x2_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_i8_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1b { z0.b, z1.b }, pn8/z, [x0, x1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i8, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_i16: ; CHECK: // %bb.0: @@ -36,6 +53,23 @@ ret { , } %res } +define { , } @ld1_x2_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_i16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_i32: ; CHECK: // %bb.0: @@ -52,6 +86,23 @@ ret { , } %res } +define { , } @ld1_x2_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_i32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i32, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_i64: ; CHECK: // %bb.0: @@ -68,6 +119,23 @@ ret { , } %res } +define { , } @ld1_x2_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_i64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i64, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_f16: ; CHECK: // %bb.0: @@ -84,6 +152,23 @@ ret { , } %res } +define { , } @ld1_x2_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_f16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr half, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_bf16: ; CHECK: // %bb.0: @@ -100,6 +185,23 @@ ret { , } %res } +define { , } @ld1_x2_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_bf16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr bfloat, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8bf16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_f32: ; CHECK: // %bb.0: @@ -116,6 +218,23 @@ ret { , } %res } +define { , } @ld1_x2_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_f32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr float, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4f32(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ld1_x2_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x2_f64: ; CHECK: // %bb.0: @@ -132,6 +251,23 @@ ret { , } %res } +define { , } @ld1_x2_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x2_f64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr double, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + ; Test to ensure we load into the correct registers for the instruction define @ld1_x2_i8_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, %val) { ; CHECK-LABEL: ld1_x2_i8_z0_taken: @@ -154,6 +290,29 @@ ret %res } +; Test to ensure we load into the correct registers for the instruction +define @ld1_x2_i8_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, %val, i64 %index) { +; CHECK-LABEL: ld1_x2_i8_z0_taken_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1b { z2.b, z3.b }, pn8/z, [x0, x1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: add z0.b, z0.b, z2.b +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i8, ptr %ptr, i64 %index + %ld1 = call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base); + %ld1_0 = extractvalue { , } %ld1, 0 + %res = add %val, %ld1_0 + ret %res +} + define { , , , } @ld1_x4_i8(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_i8: ; CHECK: // %bb.0: @@ -170,6 +329,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_i8_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1b { z0.b - z3.b }, pn8/z, [x0, x1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i8, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_i16: ; CHECK: // %bb.0: @@ -186,6 +362,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_i16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_i32: ; CHECK: // %bb.0: @@ -202,6 +395,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_i32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i32, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_i64: ; CHECK: // %bb.0: @@ -218,6 +428,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_i64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i64, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_f16: ; CHECK: // %bb.0: @@ -234,6 +461,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_f16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr half, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_bf16: ; CHECK: // %bb.0: @@ -250,6 +494,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_bf16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr bfloat, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8bf16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_f32: ; CHECK: // %bb.0: @@ -266,6 +527,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_f32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr float, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ld1_x4_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ld1_x4_f64: ; CHECK: // %bb.0: @@ -282,6 +560,23 @@ ret { , , , } %res } +define { , , , } @ld1_x4_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ld1_x4_f64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr double, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv2f64(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + ; Test to ensure we load into the correct registers for the instruction define @ld1_x4_i16_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, %val) { ; CHECK-LABEL: ld1_x4_i16_z0_taken: @@ -304,6 +599,28 @@ ret %res } +; Test to ensure we load into the correct registers for the instruction +define @ld1_x4_i16_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, %val, i64 %index) { +; CHECK-LABEL: ld1_x4_i16_z0_taken_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ld1h { z4.h - z7.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: add z0.h, z0.h, z4.h +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %ld1 = call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base); + %ld1_0 = extractvalue { , , , } %ld1, 0 + %res = add %val, %ld1_0 + ret %res +} ; == Non-temporal Multi-Vector Consecutive Loads == @@ -323,6 +640,23 @@ ret { , } %res } +define { , } @ldnt1_x2_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_i8_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1b { z0.b, z1.b }, pn8/z, [x0, x1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i8, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv16i8(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_i16: ; CHECK: // %bb.0: @@ -339,6 +673,23 @@ ret { , } %res } +define { , } @ldnt1_x2_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_i16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8i16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_i32: ; CHECK: // %bb.0: @@ -355,6 +706,23 @@ ret { , } %res } +define { , } @ldnt1_x2_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_i32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i32, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_i64: ; CHECK: // %bb.0: @@ -371,6 +739,23 @@ ret { , } %res } +define { , } @ldnt1_x2_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_i64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i64, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2i64(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_f16: ; CHECK: // %bb.0: @@ -387,6 +772,23 @@ ret { , } %res } +define { , } @ldnt1_x2_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_f16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8f16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_bf16: ; CHECK: // %bb.0: @@ -403,6 +805,23 @@ ret { , } %res } +define { , } @ldnt1_x2_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_bf16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h, z1.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr bfloat, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv8bf16(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_f32: ; CHECK: // %bb.0: @@ -419,6 +838,23 @@ ret { , } %res } +define { , } @ldnt1_x2_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_f32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1w { z0.s, z1.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr float, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4f32(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + define { , } @ldnt1_x2_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x2_f64: ; CHECK: // %bb.0: @@ -435,6 +871,23 @@ ret { , } %res } +define { , } @ldnt1_x2_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x2_f64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1d { z0.d, z1.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr double, ptr %ptr, i64 %index + %res = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv2f64(target("aarch64.svcount") %pn, ptr %base); + ret { , } %res +} + ; Test to ensure we load into the correct registers for the instruction define @ldnt1_x2_i32_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, %val) { ; CHECK-LABEL: ldnt1_x2_i32_z0_taken: @@ -457,6 +910,29 @@ ret %res } +; Test to ensure we load into the correct registers for the instruction +define @ldnt1_x2_i32_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, %val, i64 %index) { +; CHECK-LABEL: ldnt1_x2_i32_z0_taken_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1w { z2.s, z3.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: add z0.s, z0.s, z2.s +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i32, ptr %ptr, i64 %index + %ld1 = call { , } @llvm.aarch64.sve.ldnt1.pn.x2.nxv4i32(target("aarch64.svcount") %pn, ptr %base); + %ld1_0 = extractvalue { , } %ld1, 0 + %res = add %val, %ld1_0 + ret %res +} + define { , , , } @ldnt1_x4_i8(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_i8: ; CHECK: // %bb.0: @@ -473,6 +949,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_i8_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_i8_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1b { z0.b - z3.b }, pn8/z, [x0, x1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i8, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv16i8(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_i16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_i16: ; CHECK: // %bb.0: @@ -489,6 +982,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_i16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_i16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i16, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8i16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_i32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_i32: ; CHECK: // %bb.0: @@ -505,6 +1015,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_i32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_i32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i32, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4i32(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_i64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_i64: ; CHECK: // %bb.0: @@ -521,6 +1048,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_i64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_i64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i64, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_f16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_f16: ; CHECK: // %bb.0: @@ -537,6 +1081,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_f16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_f16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr half, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8f16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_bf16(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_bf16: ; CHECK: // %bb.0: @@ -553,6 +1114,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_bf16_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_bf16_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1h { z0.h - z3.h }, pn8/z, [x0, x1, lsl #1] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr bfloat, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv8bf16(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_f32(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_f32: ; CHECK: // %bb.0: @@ -569,6 +1147,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_f32_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_f32_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1w { z0.s - z3.s }, pn8/z, [x0, x1, lsl #2] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr float, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv4f32(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + define { , , , } @ldnt1_x4_f64(target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: ldnt1_x4_f64: ; CHECK: // %bb.0: @@ -585,6 +1180,23 @@ ret { , , , } %res } +define { , , , } @ldnt1_x4_f64_scalar(target("aarch64.svcount") %pn, ptr %ptr, i64 %index) nounwind { +; CHECK-LABEL: ldnt1_x4_f64_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1d { z0.d - z3.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr double, ptr %ptr, i64 %index + %res = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2f64(target("aarch64.svcount") %pn, ptr %base); + ret { , , , } %res +} + ; Test to ensure we load into the correct registers for the instruction define @ldnt1_x4_i64_z0_taken(target("aarch64.svcount") %pn, ptr %ptr, %val) { ; CHECK-LABEL: ldnt1_x4_i64_z0_taken: @@ -607,6 +1219,29 @@ ret %res } +; Test to ensure we load into the correct registers for the instruction +define @ldnt1_x4_i64_z0_taken_scalar(target("aarch64.svcount") %pn, ptr %ptr, %val, i64 %index) { +; CHECK-LABEL: ldnt1_x4_i64_z0_taken_scalar: +; CHECK: // %bb.0: +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-1 +; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov p8.b, p0.b +; CHECK-NEXT: ldnt1d { z4.d - z7.d }, pn8/z, [x0, x1, lsl #3] +; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload +; CHECK-NEXT: add z0.d, z0.d, z4.d +; CHECK-NEXT: addvl sp, sp, #1 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %base = getelementptr i64, ptr %ptr, i64 %index + %ld1 = call { , , , } @llvm.aarch64.sve.ldnt1.pn.x4.nxv2i64(target("aarch64.svcount") %pn, ptr %base); + %ld1_0 = extractvalue { , , , } %ld1, 0 + %res = add %val, %ld1_0 + ret %res +} + declare { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2i64(target("aarch64.svcount"), ptr) declare { , } @llvm.aarch64.sve.ld1.pn.x2.nxv4i32(target("aarch64.svcount"), ptr) declare { , } @llvm.aarch64.sve.ld1.pn.x2.nxv8i16(target("aarch64.svcount"), ptr)