diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1282,6 +1282,8 @@ // Loads // +def int_aarch64_sve_ld1 : AdvSIMD_1Vec_PredLoad_Intrinsic; + def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic; def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic; def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic; @@ -1290,6 +1292,8 @@ // Stores // +def int_aarch64_sve_st1 : AdvSIMD_1Vec_PredStore_Intrinsic; + def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic; // diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -8978,6 +8978,7 @@ Info.align = Align(16); Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; return true; + case Intrinsic::aarch64_sve_ld1: case Intrinsic::aarch64_sve_ldnt1: { PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; @@ -8985,9 +8986,12 @@ Info.ptrVal = I.getArgOperand(1); Info.offset = 0; Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType())); - Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal; + Info.flags = MachineMemOperand::MOLoad; + if (Intrinsic == Intrinsic::aarch64_sve_ldnt1) + Info.flags |= MachineMemOperand::MONonTemporal; return true; } + case Intrinsic::aarch64_sve_st1: case Intrinsic::aarch64_sve_stnt1: { PointerType *PtrTy = cast(I.getArgOperand(2)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; @@ -8995,7 +8999,9 @@ Info.ptrVal = I.getArgOperand(2); Info.offset = 0; Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType())); - Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal; + Info.flags = MachineMemOperand::MOStore; + if (Intrinsic == Intrinsic::aarch64_sve_stnt1) + Info.flags |= MachineMemOperand::MONonTemporal; return true; } default: @@ -11514,7 +11520,7 @@ } } -static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { +static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); EVT VT = N->getValueType(0); EVT PtrTy = N->getOperand(3).getValueType(); @@ -11539,7 +11545,7 @@ return L; } -static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) { +static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) { SDLoc DL(N); SDValue Data = N->getOperand(2); @@ -13130,8 +13136,9 @@ case Intrinsic::aarch64_neon_st3lane: case Intrinsic::aarch64_neon_st4lane: return performNEONPostLDSTCombine(N, DCI, DAG); + case Intrinsic::aarch64_sve_ld1: case Intrinsic::aarch64_sve_ldnt1: - return performLDNT1Combine(N, DAG); + return performLD1Combine(N, DAG); case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset: return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1); case Intrinsic::aarch64_sve_ldnt1_gather: @@ -13144,8 +13151,9 @@ return performLDNF1Combine(N, DAG, AArch64ISD::LDNF1); case Intrinsic::aarch64_sve_ldff1: return performLDNF1Combine(N, DAG, AArch64ISD::LDFF1); + case Intrinsic::aarch64_sve_st1: case Intrinsic::aarch64_sve_stnt1: - return performSTNT1Combine(N, DAG); + return performST1Combine(N, DAG); case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset: return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1); case Intrinsic::aarch64_sve_stnt1_scatter_uxtw: diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-ldst1.ll @@ -0,0 +1,182 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; LD1B +; + +define @ld1b_i8( %pred, i8* %addr) { +; CHECK-LABEL: ld1b_i8: +; CHECK: ld1b { z0.b }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv16i8( %pred, + i8* %addr) + ret %res +} + +; +; LD1H +; + +define @ld1h_i16( %pred, i16* %addr) { +; CHECK-LABEL: ld1h_i16: +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv8i16( %pred, + i16* %addr) + ret %res +} + +define @ld1h_f16( %pred, half* %addr) { +; CHECK-LABEL: ld1h_f16: +; CHECK: ld1h { z0.h }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv8f16( %pred, + half* %addr) + ret %res +} + +; +; LD1W +; + +define @ld1w_i32( %pred, i32* %addr) { +; CHECK-LABEL: ld1w_i32: +; CHECK: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv4i32( %pred, + i32* %addr) + ret %res +} + +define @ld1w_f32( %pred, float* %addr) { +; CHECK-LABEL: ld1w_f32: +; CHECK: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv4f32( %pred, + float* %addr) + ret %res +} + +; +; LD1D +; + +define @ld1d_i64( %pred, i64* %addr) { +; CHECK-LABEL: ld1d_i64: +; CHECK: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv2i64( %pred, + i64* %addr) + ret %res +} + +define @ld1d_f64( %pred, double* %addr) { +; CHECK-LABEL: ld1d_f64: +; CHECK: ld1d { z0.d }, p0/z, [x0] +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.ld1.nxv2f64( %pred, + double* %addr) + ret %res +} + +; +; ST1B +; + +define void @st1b_i8( %data, %pred, i8* %addr) { +; CHECK-LABEL: st1b_i8: +; CHECK: st1b { z0.b }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv16i8( %data, + %pred, + i8* %addr) + ret void +} + +; +; ST1H +; + +define void @st1h_i16( %data, %pred, i16* %addr) { +; CHECK-LABEL: st1h_i16: +; CHECK: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv8i16( %data, + %pred, + i16* %addr) + ret void +} + +define void @st1h_f16( %data, %pred, half* %addr) { +; CHECK-LABEL: st1h_f16: +; CHECK: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv8f16( %data, + %pred, + half* %addr) + ret void +} + +; +; ST1W +; + +define void @st1w_i32( %data, %pred, i32* %addr) { +; CHECK-LABEL: st1w_i32: +; CHECK: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv4i32( %data, + %pred, + i32* %addr) + ret void +} + +define void @st1w_f32( %data, %pred, float* %addr) { +; CHECK-LABEL: st1w_f32: +; CHECK: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv4f32( %data, + %pred, + float* %addr) + ret void +} + +; +; ST1D +; + +define void @st1d_i64( %data, %pred, i64* %addr) { +; CHECK-LABEL: st1d_i64: +; CHECK: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv2i64( %data, + %pred, + i64* %addr) + ret void +} + +define void @st1d_f64( %data, %pred, double* %addr) { +; CHECK-LABEL: st1d_f64: +; CHECK: st1d { z0.d }, p0, [x0] +; CHECK-NEXT: ret + call void @llvm.aarch64.sve.st1.nxv2f64( %data, + %pred, + double* %addr) + ret void +} + +declare @llvm.aarch64.sve.ld1.nxv16i8(, i8*) +declare @llvm.aarch64.sve.ld1.nxv8i16(, i16*) +declare @llvm.aarch64.sve.ld1.nxv4i32(, i32*) +declare @llvm.aarch64.sve.ld1.nxv2i64(, i64*) +declare @llvm.aarch64.sve.ld1.nxv8f16(, half*) +declare @llvm.aarch64.sve.ld1.nxv4f32(, float*) +declare @llvm.aarch64.sve.ld1.nxv2f64(, double*) + +declare void @llvm.aarch64.sve.st1.nxv16i8(, , i8*) +declare void @llvm.aarch64.sve.st1.nxv8i16(, , i16*) +declare void @llvm.aarch64.sve.st1.nxv4i32(, , i32*) +declare void @llvm.aarch64.sve.st1.nxv2i64(, , i64*) +declare void @llvm.aarch64.sve.st1.nxv8f16(, , half*) +declare void @llvm.aarch64.sve.st1.nxv4f32(, , float*) +declare void @llvm.aarch64.sve.st1.nxv2f64(, , double*)