diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td --- a/clang/include/clang/Basic/Attr.td +++ b/clang/include/clang/Basic/Attr.td @@ -1913,6 +1913,14 @@ let LangOpts = [COnly]; } +def BPFVirtualOffsets : InheritableAttr, + TargetSpecificAttr { + let Spellings = [Clang<"bpf_virtual_offsets">]; + let Subjects = SubjectList<[Record], ErrorDiag>; + let Documentation = [BPFVirtualOffsetsDocs]; + let LangOpts = [COnly]; +} + def WebAssemblyExportName : InheritableAttr, TargetSpecificAttr { let Spellings = [Clang<"export_name">]; diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td --- a/clang/include/clang/Basic/AttrDocs.td +++ b/clang/include/clang/Basic/AttrDocs.td @@ -2181,6 +2181,17 @@ }]; } +def BPFVirtualOffsetsDocs : Documentation { + let Category = DocCatFunction; + let Content = [{ +Clang supports the ``__attribute__((virtual_offsets))`` attribute for +the BPF target. This attribute may be attached to a struct or union +declaration, where it notifies the compiler that runtime environment +might patch access to the fields of this type by updating the field +offset. + }]; +} + def MipsInterruptDocs : Documentation { let Category = DocCatFunction; let Heading = "interrupt (MIPS)"; diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -4371,6 +4371,15 @@ return false; } +static Address wrapWithBPFVirtualOffsetMarker(CodeGenFunction &CGF, + Address &Addr) { + llvm::Function *Fn = + CGF.CGM.getIntrinsic(llvm::Intrinsic::bpf_virtual_offset_marker, + {Addr.getType(), Addr.getType()}); + llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()}); + return Address(Call, Addr.getElementType(), Addr.getAlignment()); +} + LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); @@ -4388,6 +4397,8 @@ Address Addr = base.getAddress(*this); unsigned Idx = RL.getLLVMFieldNo(field); const RecordDecl *rec = field->getParent(); + if (rec->hasAttr()) + Addr = wrapWithBPFVirtualOffsetMarker(*this, Addr); if (!UseVolatile) { if (!IsInPreservedAIRegion && (!getDebugInfo() || !rec->hasAttr())) { @@ -4461,6 +4472,8 @@ } Address addr = base.getAddress(*this); + if (rec->hasAttr()) + addr = wrapWithBPFVirtualOffsetMarker(*this, addr); if (auto *ClassDef = dyn_cast(rec)) { if (CGM.getCodeGenOpts().StrictVTablePointers && ClassDef->isDynamicClass()) { diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -7296,6 +7296,12 @@ return ::new (Context) BTFDeclTagAttr(Context, AL, AL.getBTFDeclTag()); } +static void handleBPFVirtualOffsetsAttr(Sema &S, Decl *D, + const ParsedAttr &AL) { + auto *Rec = cast(D); + Rec->addAttr(::new (S.Context) BPFVirtualOffsetsAttr(S.Context, AL)); +} + static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) { if (!isFunctionOrMethod(D)) { S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type) @@ -8486,6 +8492,9 @@ case ParsedAttr::AT_BTFDeclTag: handleBTFDeclTagAttr(S, D, AL); break; + case ParsedAttr::AT_BPFVirtualOffsets: + handleBPFVirtualOffsetsAttr(S, D, AL); + break; case ParsedAttr::AT_WebAssemblyExportName: handleWebAssemblyExportNameAttr(S, D, AL); break; diff --git a/clang/test/CodeGen/bpf-virtual-offsets-1.c b/clang/test/CodeGen/bpf-virtual-offsets-1.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/bpf-virtual-offsets-1.c @@ -0,0 +1,1345 @@ +// This test covers various usages of __attribute__((bpf_virtual_offsets)). +// This attribute is handled in three stages: +// - virtual.offset.marker intrinsic call is added while IR is generated from AST +// - virtual.offset.marker call is replaced with calls to either: +// llvm.bpf.getelementptr.and.load intrinsic, or +// llvm.bpf.getelementptr.and.store intrinsic +// at the early IR processing stage +// - get and load/store intrinsics are lowered at the late IR processing stage +// +// The test verifies all three stages using three separate RUN commands. +// +// First, check that virtual.offset.marker are inserted where expected: +// +// RUN: %clang -target bpf -Xclang -disable-llvm-passes -S -emit-llvm %s -o - | \ +// RUN: FileCheck -check-prefix=FRONT %s +// +// Next, check that gep_and_load/store calls were generated: +// +// RUN: %clang -target bpf -O2 -S -emit-llvm %s -o - | \ +// RUN: FileCheck -check-prefix=MID %s +// +// Finally, verify that gep_and_load/store calls were rewritten: +// +// RUN: %clang -mllvm --print-after=bpf-check-and-opt-ir \ +// RUN: -mllvm --stop-after=bpf-check-and-opt-ir \ +// RUN: -target bpf -S -O2 -o /dev/null 2>&1 %s | \ +// RUN: FileCheck -check-prefix=BACK %s + +// TODO: requires simplification!!! + +#define __virtual_offsets__ __attribute__((bpf_virtual_offsets)) +#define __preserve_access_index__ __attribute__((preserve_access_index)) + +struct inner_no_vof { + int a; + int b; +}; + +struct inner_vof { + int a; + int b; +} __virtual_offsets__; + +struct context1 { + int first; + struct inner_no_vof inner_inline; + struct inner_no_vof *inner_ptr; + int butlast; + int last; +} __virtual_offsets__; + +struct context2 { + int first; + struct inner_vof inner_inline; + struct inner_vof *inner_ptr; + int butlast; + int last; +} __virtual_offsets__; + +extern void consume_int(int); +extern void consume_float(float); +extern void consume_ptr(void *); + +// FRONT: define dso_local void @fields_access_1 +// MID: define dso_local void @fields_access_1 +// BACK: define dso_local void @fields_access_1 +void fields_access_1(struct context1 *ctx) { +// FRONT: [[ctx_addr:%[a-zA-Z0-9._]+]] = alloca ptr +// MID: entry: +// BACK: entry: + + consume_int(ctx->first); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[first:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r1]], i32 0, i32 0 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[first]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r2]]) + +// MID-NEXT: [[first14:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[first14]]) + +// BACK-NEXT: [[first1429:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[first1429]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + + consume_int(ctx->inner_inline.a); +// FRONT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r4]], i32 0, i32 1 +// FRONT-NEXT: [[a:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_inline]], i32 0, i32 0 +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load i32, ptr [[a]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r5]]) + +// TODO: BUG! +// MID-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1 +// MID-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[inner_inline]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + +// BACK-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[inner_inline]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + + consume_int(ctx->inner_inline.b); +// FRONT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[inner_inline1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r7]], i32 0, i32 1 +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_inline1]], i32 0, i32 1 +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r8]]) + +// TODO: BUG! +// MID-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1, i32 1 +// MID-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + +// BACK-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1, i32 1 +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r2]]) + + consume_int(ctx->inner_ptr->a); +// FRONT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r9]]) +// FRONT-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r10]], i32 0, i32 2 +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr]] +// FRONT-NEXT: [[a2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r11]], i32 0, i32 0 +// FRONT-NEXT: [[r12:%[a-zA-Z0-9._]+]] = load i32, ptr [[a2]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r12]]) + +// MID-NEXT: [[inner_ptr15:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[inner_ptr15]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r2]]) + +// BACK-NEXT: [[inner_ptr1530:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr1530]] +// BACK-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load i32, ptr [[r3]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r4]]) + + consume_int(ctx->inner_ptr->b); +// FRONT: [[r13:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r14:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r13]]) +// FRONT-NEXT: [[inner_ptr3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r14]], i32 0, i32 2 +// FRONT-NEXT: [[r15:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr3]] +// FRONT-NEXT: [[b4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r15]], i32 0, i32 1 +// FRONT-NEXT: [[r16:%[a-zA-Z0-9._]+]] = load i32, ptr [[b4]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r16]]) + +// MID-NEXT: [[inner_ptr316:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[b4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_ptr316]], i64 0, i32 1 +// MID-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[b4]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r3]]) + +// BACK-NEXT: [[inner_ptr31631:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr31631]] +// BACK-NEXT: [[b4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r5]], i64 0, i32 1 +// BACK-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load i32, ptr [[b4]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r6]]) + + ctx->first = 1; +// FRONT: [[r17:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r18:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r17]]) +// FRONT-NEXT: [[first5:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r18]], i32 0, i32 0 +// FRONT-NEXT: store i32 1, ptr [[first5]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.store.i32(i32 1, ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r7:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: store i32 1, ptr [[r7]] + + ctx->inner_inline.a = 2; +// FRONT: [[r19:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r20:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r19]]) +// FRONT-NEXT: [[inner_inline6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r20]], i32 0, i32 1 +// FRONT-NEXT: [[a7:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_inline6]], i32 0, i32 0 +// FRONT-NEXT: store i32 2, ptr [[a7]] +// TODO: BUG! +// MID-NEXT: store i32 2, ptr [[inner_inline]] +// BACK-NEXT: store i32 2, ptr [[inner_inline]] + + ctx->inner_inline.b = 3; +// FRONT: [[r21:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r22:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r21]]) +// FRONT-NEXT: [[inner_inline8:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r22]], i32 0, i32 1 +// FRONT-NEXT: [[b9:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_inline8]], i32 0, i32 1 +// FRONT-NEXT: store i32 3, ptr [[b9]] + +// TODO: BUG! +// MID-NEXT: store i32 3, ptr [[b]] +// BACK-NEXT: store i32 3, ptr [[b]] + + ctx->inner_ptr->a = 4; +// FRONT: [[r23:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r24:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r23]]) +// FRONT-NEXT: [[inner_ptr10:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r24]], i32 0, i32 2 +// FRONT-NEXT: [[r25:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr10]] +// FRONT-NEXT: [[a11:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r25]], i32 0, i32 0 +// FRONT-NEXT: store i32 4, ptr [[a11]] + +// MID-NEXT: [[inner_ptr1017:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: store i32 4, ptr [[inner_ptr1017]] + +// BACK-NEXT: [[inner_ptr101732:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr101732]] +// BACK-NEXT: store i32 4, ptr [[r8]] + + ctx->inner_ptr->b = 5; +// FRONT: [[r26:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r27:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r26]]) +// FRONT-NEXT: [[inner_ptr12:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r27]], i32 0, i32 2 +// FRONT-NEXT: [[r28:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr12]] +// FRONT-NEXT: [[b13:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r28]], i32 0, i32 1 + +// MID-NEXT: [[inner_ptr1218:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) +// MID-SAME: @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[b13:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_ptr1218]], i64 0, i32 1 +// MID-NEXT: store i32 5, ptr [[b13]] + +// BACK-NEXT: [[inner_ptr121833:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr121833]] +// BACK-NEXT: [[b13:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r9]], i64 0, i32 1 +// BACK-NEXT: store i32 5, ptr [[b13]] + + consume_ptr(ctx); +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +// FRONT: define dso_local void @fields_access_2 +// MID: define dso_local void @fields_access_2 +// BACK: define dso_local void @fields_access_2 +void fields_access_2(struct context2 *ctx) { + consume_int(ctx->inner_inline.a); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[inner_inline]]) +// FRONT-NEXT: [[a:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r2]], i32 0, i32 0 +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[a]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r3]]) + +// TODO: BUG combine getelementptr +// MID: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1 +// MID-NEXT: [[a13:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr nonnull elementtype(%struct.inner_vof) [[inner_inline]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[a13]]) + +// BACK: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1 +// BACK-NEXT: [[a1329:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[inner_inline]], i32 0, i32 0 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[a1329]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + + consume_int(ctx->inner_inline.b); +// FRONT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r4]]) +// FRONT-NEXT: [[inner_inline1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r5]], i32 0, i32 1 +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[inner_inline1]]) +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r6]], i32 0, i32 1 +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r7]]) + +// MID-NEXT: [[b14:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr nonnull elementtype(%struct.inner_vof) [[inner_inline]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b14]]) + +// BACK-NEXT: [[b1430:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[inner_inline]], i32 0, i32 1 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[b1430]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + + consume_int(ctx->inner_ptr->a); +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr [[ctx_addr]] +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r8]]) +// FRONT-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r9]], i32 0, i32 2 +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr]] +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r10]]) +// FRONT-NEXT: [[a2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r11]], i32 0, i32 0 +// FRONT-NEXT: [[r12:%[a-zA-Z0-9._]+]] = load i32, ptr [[a2]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r12]]) + +// MID-NEXT: [[inner_ptr15:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context2) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[a216:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.inner_vof) [[inner_ptr15]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[a216]]) + +// BACK-NEXT: [[inner_ptr1531:%[a-zA-Z0-9._]+]] = getelementptr %struct.context2, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr1531]] +// BACK-NEXT: [[a21632:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r2]], i32 0, i32 0 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[a21632]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r3]]) + + consume_int(ctx->inner_ptr->b); +// FRONT: [[r13:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r14:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r13]]) +// FRONT-NEXT: [[inner_ptr3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r14]], i32 0, i32 2 +// FRONT-NEXT: [[r15:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr3]] +// FRONT-NEXT: [[r16:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r15]]) +// FRONT-NEXT: [[b4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r16]], i32 0, i32 1 +// FRONT-NEXT: [[r17:%[a-zA-Z0-9._]+]] = load i32, ptr [[b4]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r17]]) + +// MID-NEXT: [[inner_ptr317:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context2) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[b418:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.inner_vof) [[inner_ptr317]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b418]]) + +// BACK-NEXT: [[inner_ptr31733:%[a-zA-Z0-9._]+]] = getelementptr %struct.context2, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr31733]] +// BACK-NEXT: [[b41834:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r4]], i32 0, i32 1 +// BACK-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load i32, ptr [[b41834]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r5]]) + + ctx->inner_inline.a = 2; +// FRONT: [[r18:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r19:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r18]]) +// FRONT-NEXT: [[inner_inline5:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r19]], i32 0, i32 1 +// FRONT-NEXT: [[r20:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[inner_inline5]]) +// FRONT-NEXT: [[a6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r20]], i32 0, i32 0 +// FRONT-NEXT: store i32 2, ptr [[a6]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 2, ptr nonnull elementtype(%struct.inner_vof) [[inner_inline]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r6:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[inner_inline]], i32 0, i32 0 +// BACK-NEXT: store i32 2, ptr [[r6]] + + ctx->inner_inline.b = 3; +// FRONT: [[r21:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r22:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r21]]) +// FRONT-NEXT: [[inner_inline7:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r22]], i32 0, i32 1 +// FRONT-NEXT: [[r23:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[inner_inline7]]) +// FRONT-NEXT: [[b8:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r23]], i32 0, i32 1 +// FRONT-NEXT: store i32 3, ptr [[b8]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 3, ptr nonnull elementtype(%struct.inner_vof) [[inner_inline]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) + +// BACK-NEXT: [[r7:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[inner_inline]], i32 0, i32 1 +// BACK-NEXT: store i32 3, ptr [[r7]] + + ctx->inner_ptr->a = 4; +// FRONT: [[r24:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r25:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r24]]) +// FRONT-NEXT: [[inner_ptr9:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r25]], i32 0, i32 2 +// FRONT-NEXT: [[r26:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr9]] +// FRONT-NEXT: [[r27:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r26]]) +// FRONT-NEXT: [[a10:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r27]], i32 0, i32 0 +// FRONT-NEXT: store i32 4, ptr [[a10]] + +// MID-NEXT: [[inner_ptr919:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context2) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 4, ptr elementtype(%struct.inner_vof) [[inner_ptr919]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[inner_ptr91935:%[a-zA-Z0-9._]+]] = getelementptr %struct.context2, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr91935]] +// BACK-NEXT: [[r9:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r8]], i32 0, i32 0 +// BACK-NEXT: store i32 4, ptr [[r9]] + + ctx->inner_ptr->b = 5; +// FRONT: [[r28:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r29:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r28]]) +// FRONT-NEXT: [[inner_ptr11:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r29]], i32 0, i32 2 +// FRONT-NEXT: [[r30:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr11]] +// FRONT-NEXT: [[r31:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r30]]) +// FRONT-NEXT: [[b12:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r31]], i32 0, i32 1 +// FRONT-NEXT: store i32 5, ptr [[b12]] + +// MID-NEXT: [[inner_ptr1120:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context2) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 5, ptr elementtype(%struct.inner_vof) [[inner_ptr1120]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) + +// BACK-NEXT: [[inner_ptr112036:%[a-zA-Z0-9._]+]] = getelementptr %struct.context2, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr112036]] +// BACK-NEXT: [[r11:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r10]], i32 0, i32 1 +// BACK-NEXT: store i32 5, ptr [[r11]] + + consume_ptr(ctx); +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +// FRONT: define dso_local void @pointer_access_not_affected_1 +// MID: define dso_local void @pointer_access_not_affected_1 +// BACK: define dso_local void @pointer_access_not_affected_1 +void pointer_access_not_affected_1(struct context1 *ctx) { + consume_ptr(&ctx->inner_inline); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[inner_inline]]) + +// MID: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_inline]]) + +// BACK: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_inline]]) + + consume_ptr(&ctx->inner_inline.b); +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r2]]) +// FRONT-NEXT: [[inner_inline1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r3]], i32 0, i32 1 +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_inline1]], i32 0, i32 1 + +// MID-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull %b) + +// BACK-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 1, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull %b) + + consume_ptr(&ctx->inner_ptr); +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[b]]) +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r4]]) +// FRONT-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r5]], i32 0, i32 2 + +// MID-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 2 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_ptr]]) + +// BACK-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 2 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_ptr]]) + + consume_ptr(&ctx->inner_ptr->b); +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[inner_ptr]]) +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[inner_ptr2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r7]], i32 0, i32 2 +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr2]] +// FRONT-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r8]], i32 0, i32 1 + +// MID-NEXT: [[inner_ptr24:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context1) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[inner_ptr24]], i64 0, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[b3]]) + +// BACK-NEXT: [[inner_ptr249:%[a-zA-Z0-9._]+]] = getelementptr %struct.context1, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr249]] +// BACK-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r0]], i64 0, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[b3]]) + + consume_ptr(&ctx->last); +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[b3]]) +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r9]]) +// FRONT-NEXT: [[last:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr [[r10]], i32 0, i32 4 + +// MID-NEXT: [[last:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 4 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[last]]) + +// BACK-NEXT: [[last:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context1, ptr %ctx, i64 0, i32 4 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[last]]) + +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +// FRONT: define dso_local void @pointer_access_not_affected_2 +// MID: define dso_local void @pointer_access_not_affected_2 +// BACK: define dso_local void @pointer_access_not_affected_2 +void pointer_access_not_affected_2(struct context2 *ctx) { + consume_ptr(&ctx->inner_inline); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[inner_inline]]) + +// MID: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_inline]]) + +// BACK: [[inner_inline:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_inline]]) + + consume_ptr(&ctx->inner_inline.b); +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r2]]) +// FRONT-NEXT: [[inner_inline1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r3]], i32 0, i32 1 +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[inner_inline1]]) +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r4]], i32 0, i32 1 + +// MID-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull %b) + +// BACK-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 1, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull %b) + + consume_ptr(&ctx->inner_ptr); +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[b]]) +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r5]]) +// FRONT-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r6]], i32 0, i32 2 +// FRONT-NEXT: call void @consume_ptr(ptr noundef [[inner_ptr]]) + +// MID-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 2 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_ptr]]) + +// BACK-NEXT: [[inner_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr %ctx, i64 0, i32 2 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[inner_ptr]]) + + consume_ptr(&ctx->inner_ptr->b); +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r7]]) +// FRONT-NEXT: [[inner_ptr2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context2, ptr [[r8]], i32 0, i32 2 +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr2]] +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r9]]) +// FRONT-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r10]], i32 0, i32 1 + +// MID-NEXT: [[inner_ptr24:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context2) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 2) +// MID-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[inner_ptr24]], i64 0, i32 1 +// MID-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[b3]]) + +// BACK-NEXT: [[inner_ptr248:%[a-zA-Z0-9._]+]] = getelementptr %struct.context2, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr [[inner_ptr248]] +// BACK-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r0]], i64 0, i32 1 +// BACK-NEXT: tail call void @consume_ptr(ptr noundef nonnull [[b3]]) + +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +struct context3 { + int first; + struct inner_no_vof *no_vof_ptr; + struct inner_no_vof no_vof_arr[5]; + struct inner_no_vof *no_vof_arr_ptr[7]; + struct inner_vof *vof_ptr; + struct inner_vof vof_arr[5]; + struct inner_vof *vof_arr_ptr[7]; + int arr[4]; +} __virtual_offsets__; + +// FRONT: define dso_local void @array_access +// MID: define dso_local void @array_access +// BACK: define dso_local void @array_access +void array_access(struct context3 *ctx) { + consume_int(ctx->no_vof_ptr[1].b); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[no_vof_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr [[no_vof_ptr]] +// FRONT-NEXT: [[arrayidx:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r2]], i64 1 +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[arrayidx]], i32 0, i32 1 +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r3]]) + +// MID: [[no_vof_ptr29:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context3) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 1) +// MID-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[no_vof_ptr29]], i64 1, i32 1 +// MID-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr %b +// MID-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + +// BACK: [[no_vof_ptr2947:%[a-zA-Z0-9._]+]] = getelementptr %struct.context3, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr [[no_vof_ptr2947]] +// BACK-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r0]], i64 1, i32 1 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr %b +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + + consume_int(ctx->no_vof_arr[2].b); +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r4]]) +// FRONT-NEXT: [[no_vof_arr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r5]], i32 0, i32 2 +// FRONT-NEXT: [[arrayidx1:%[a-zA-Z0-9._]+]] = getelementptr inbounds [5 x %struct.inner_no_vof], ptr [[no_vof_arr]], i64 0, i64 2 +// FRONT-NEXT: [[b2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[arrayidx1]], i32 0, i32 1 +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load i32, ptr [[b2]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r6]]) + +//TODO: BUG! +// MID-NEXT: [[b2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 2, i64 2, i32 1 +// MID-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[b2]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + +// BACK-NEXT: [[b2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 2, i64 2, i32 1 +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[b2]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r2]]) + + consume_int(ctx->no_vof_arr_ptr[3]->a); +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r7]]) +// FRONT-NEXT: [[no_vof_arr_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r8]], i32 0, i32 3 +// FRONT-NEXT: [[arrayidx3:%[a-zA-Z0-9._]+]] = getelementptr inbounds [7 x ptr], ptr [[no_vof_arr_ptr]], i64 0, i64 3 +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx3]] +// FRONT-NEXT: [[a:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r9]], i32 0, i32 0 +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load i32, ptr [[a]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r10]]) + +// MID-NEXT: [[arrayidx3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 3, i64 3 +// MID-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx3]] +// MID-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[r2]] +// MID-NEXT: tail call void @consume_int(i32 noundef [[r3]]) + +// BACK-NEXT: [[arrayidx3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 3, i64 3 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx3]] +// BACK-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load i32, ptr [[r3]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r4]]) + + consume_int(ctx->vof_ptr[1].b); +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r12:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r11]]) +// FRONT-NEXT: [[vof_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r12]], i32 0, i32 4 +// FRONT-NEXT: [[r13:%[a-zA-Z0-9._]+]] = load ptr, ptr [[vof_ptr]] +// FRONT-NEXT: [[arrayidx4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r13]], i64 1 +// FRONT-NEXT: [[r14:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[arrayidx4]]) +// FRONT-NEXT: [[b5:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r14]], i32 0, i32 1 +// FRONT-NEXT: [[r15:%[a-zA-Z0-9._]+]] = load i32, ptr [[b5]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r15]]) + +// MID-NEXT: [[vof_ptr30:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context3) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 4) +// MID-NEXT: [[arrayidx4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[vof_ptr30]], i64 1 +// MID-NEXT: [[b531:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr nonnull elementtype(%struct.inner_vof) [[arrayidx4]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b531]]) + +// BACK-NEXT: [[vof_ptr3048:%[a-zA-Z0-9._]+]] = getelementptr %struct.context3, ptr %ctx, i32 0, i32 4 +// BACK-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr [[vof_ptr3048]] +// BACK-NEXT: [[arrayidx4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r5]], i64 1 +// BACK-NEXT: [[b53149:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[arrayidx4]], i32 0, i32 1 +// BACK-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load i32, ptr [[b53149]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r6]]) + + consume_int(ctx->vof_arr[2].b); +// FRONT-NEXT: [[r16:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r17:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r16]]) +// FRONT-NEXT: [[vof_arr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r17]], i32 0, i32 5 +// FRONT-NEXT: [[arrayidx6:%[a-zA-Z0-9._]+]] = getelementptr inbounds [5 x %struct.inner_vof], ptr [[vof_arr]], i64 0, i64 2 +// FRONT-NEXT: [[r18:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[arrayidx6]]) +// FRONT-NEXT: [[b7:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r18]], i32 0, i32 1 +// FRONT-NEXT: [[r19:%[a-zA-Z0-9._]+]] = load i32, ptr [[b7]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r19]]) + +// MID-NEXT: [[arrayidx6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 5, i64 2 +// MID-NEXT: [[b732:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr nonnull elementtype(%struct.inner_vof) [[arrayidx6]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b732]]) + +// BACK-NEXT: [[arrayidx6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 5, i64 2 +// BACK-NEXT: [[b73250:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[arrayidx6]], i32 0, i32 1 +// BACK-NEXT: [[r7:%[a-zA-Z0-9._]+]] = load i32, ptr [[b73250]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r7]]) + + consume_int(ctx->vof_arr_ptr[3]->b); +// FRONT-NEXT: [[r20:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r21:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r20]]) +// FRONT-NEXT: [[vof_arr_ptr:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r21]], i32 0, i32 6 +// FRONT-NEXT: [[arrayidx8:%[a-zA-Z0-9._]+]] = getelementptr inbounds [7 x ptr], ptr [[vof_arr_ptr]], i64 0, i64 3 +// FRONT-NEXT: [[r22:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx8]] +// FRONT-NEXT: [[r23:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r22]]) +// FRONT-NEXT: [[b9:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r23]], i32 0, i32 1 +// FRONT-NEXT: [[r24:%[a-zA-Z0-9._]+]] = load i32, ptr [[b9]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r24]]) + +// MID-NEXT: [[arrayidx8:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 6, i64 3 +// MID-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx8]] +// MID-NEXT: [[b933:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.inner_vof) [[r4]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b933]]) + +// BACK-NEXT: [[arrayidx8:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr %ctx, i64 0, i32 6, i64 3 +// BACK-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx8]] +// BACK-NEXT: [[b93351:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r8]], i32 0, i32 1 +// BACK-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load i32, ptr [[b93351]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r9]]) + + ctx->no_vof_ptr[1].b = 2; +// FRONT-NEXT: [[r25:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r26:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r25]]) +// FRONT-NEXT: [[no_vof_ptr10:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r26]], i32 0, i32 1 +// FRONT-NEXT: [[r27:%[a-zA-Z0-9._]+]] = load ptr, ptr [[no_vof_ptr10]] +// FRONT-NEXT: [[arrayidx11:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r27]], i64 1 +// FRONT-NEXT: [[b12:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[arrayidx11]], i32 0, i32 1 +// FRONT-NEXT: store i32 2, ptr [[b12]] + +// MID-NEXT: [[no_vof_ptr1034:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context3) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 1) +// MID-NEXT: [[b12:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[no_vof_ptr1034]], i64 1, i32 1 +// MID-NEXT: store i32 2, ptr [[b12]] + +// BACK-NEXT: [[no_vof_ptr103452:%[a-zA-Z0-9._]+]] = getelementptr %struct.context3, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load ptr, ptr [[no_vof_ptr103452]] +// BACK-NEXT: [[b12:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r10]], i64 1, i32 1 +// BACK-NEXT: store i32 2, ptr [[b12]] + + ctx->no_vof_arr[2].b = 4; +// FRONT-NEXT: [[r28:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r29:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r28]]) +// FRONT-NEXT: [[no_vof_arr13:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r29]], i32 0, i32 2 +// FRONT-NEXT: [[arrayidx14:%[a-zA-Z0-9._]+]] = getelementptr inbounds [5 x %struct.inner_no_vof], ptr [[no_vof_arr13]], i64 0, i64 2 +// FRONT-NEXT: [[b15:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[arrayidx14]], i32 0, i32 1 +// FRONT-NEXT: store i32 4, ptr [[b15]] + +//TODO: BUG +// MID-NEXT: store i32 4, ptr [[b2]] +// BACK-NEXT: store i32 4, ptr [[b2]] + + ctx->no_vof_arr_ptr[3]->b = 6; +// FRONT-NEXT: [[r30:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r31:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r30]]) +// FRONT-NEXT: [[no_vof_arr_ptr16:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r31]], i32 0, i32 3 +// FRONT-NEXT: [[arrayidx17:%[a-zA-Z0-9._]+]] = getelementptr inbounds [7 x ptr], ptr [[no_vof_arr_ptr16]], i64 0, i64 3 +// FRONT-NEXT: [[r32:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx17]] +// FRONT-NEXT: [[b18:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r32]], i32 0, i32 1 +// FRONT-NEXT: store i32 6, ptr [[b18]] + +// MID-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx3]] +// MID-NEXT: [[b18:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r5]], i64 0, i32 1 +// MID-NEXT: store i32 6, ptr [[b18]] + +// BACK-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx3]] +// BACK-NEXT: [[b18:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_no_vof, ptr [[r11]], i64 0, i32 1 +// BACK-NEXT: store i32 6, ptr [[b18]] + + ctx->vof_ptr[1].b = 2; +// FRONT-NEXT: [[r33:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r34:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r33]]) +// FRONT-NEXT: [[vof_ptr19:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r34]], i32 0, i32 4 +// FRONT-NEXT: [[r35:%[a-zA-Z0-9._]+]] = load ptr, ptr [[vof_ptr19]] +// FRONT-NEXT: [[arrayidx20:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r35]], i64 1 +// FRONT-NEXT: [[r36:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[arrayidx20]]) +// FRONT-NEXT: [[b21:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r36]], i32 0, i32 1 +// FRONT-NEXT: store i32 2, ptr [[b21]] + +// MID-NEXT: [[vof_ptr1928:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.context3) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 4) +// MID-NEXT: [[arrayidx20:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[vof_ptr1928]], i64 1 +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 2, ptr nonnull elementtype(%struct.inner_vof) [[arrayidx20]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) + +// BACK-NEXT: [[vof_ptr192853:%[a-zA-Z0-9._]+]] = getelementptr %struct.context3, ptr %ctx, i32 0, i32 4 +// BACK-NEXT: [[r12:%[a-zA-Z0-9._]+]] = load ptr, ptr [[vof_ptr192853]] +// BACK-NEXT: [[arrayidx20:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r12]], i64 1 +// BACK-NEXT: [[r13:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[arrayidx20]], i32 0, i32 1 +// BACK-NEXT: store i32 2, ptr [[r13]] + + ctx->vof_arr[2].b = 4; +// FRONT-NEXT: [[r37:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r38:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r37]]) +// FRONT-NEXT: [[vof_arr22:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r38]], i32 0, i32 5 +// FRONT-NEXT: [[arrayidx23:%[a-zA-Z0-9._]+]] = getelementptr inbounds [5 x %struct.inner_vof], ptr [[vof_arr22]], i64 0, i64 2 +// FRONT-NEXT: [[r39:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[arrayidx23]]) +// FRONT-NEXT: [[b24:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r39]], i32 0, i32 1 +// FRONT-NEXT: store i32 4, ptr [[b24]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 4, ptr nonnull elementtype(%struct.inner_vof) [[arrayidx6]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) + +// BACK-NEXT: [[r14:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[arrayidx6]], i32 0, i32 1 +// BACK-NEXT: store i32 4, ptr [[r14]] + + ctx->vof_arr_ptr[3]->b = 6; +// FRONT-NEXT: [[r40:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r41:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r40]]) +// FRONT-NEXT: [[vof_arr_ptr25:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context3, ptr [[r41]], i32 0, i32 6 +// FRONT-NEXT: [[arrayidx26:%[a-zA-Z0-9._]+]] = getelementptr inbounds [7 x ptr], ptr [[vof_arr_ptr25]], i64 0, i64 3 +// FRONT-NEXT: [[r42:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx26]] +// FRONT-NEXT: [[r43:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r42]]) +// FRONT-NEXT: [[b27:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.inner_vof, ptr [[r43]], i32 0, i32 1 +// FRONT-NEXT: store i32 6, ptr [[b27]] + +// MID-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx8]] +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 6, ptr elementtype(%struct.inner_vof) [[r6]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) + +// BACK-NEXT: [[r15:%[a-zA-Z0-9._]+]] = load ptr, ptr [[arrayidx8]] +// BACK-NEXT: [[r16:%[a-zA-Z0-9._]+]] = getelementptr %struct.inner_vof, ptr [[r15]], i32 0, i32 1 +// BACK-NEXT: store i32 6, ptr [[r16]] + consume_ptr(ctx); + +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +typedef int aligned_int __attribute__((aligned(128))); + +struct context4 { + volatile int a; + aligned_int b; + volatile aligned_int c; +} __virtual_offsets__; + +// FRONT: define dso_local void @load_and_store_attrs +// MID: define dso_local void @load_and_store_attrs +// BACK: define dso_local void @load_and_store_attrs +void load_and_store_attrs(struct context4 *ctx) { + int r; + consume_int(ctx->a); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[a:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r1]], i32 0, i32 0 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load volatile i32, ptr [[a]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r2]]) + +// MID: [[a6:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context4) %ctx, i1 true, i8 0, i8 -128, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[a6]]) + +// BACK: [[a619:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load volatile i32, ptr [[a619]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + + consume_int(ctx->b); +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[b:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r4]], i32 0, i32 2 +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load i32, ptr [[b]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r5]]) + +// MID-NEXT: [[b7:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context4) %ctx, i1 false, i8 0, i8 -128, i1 true, i32 0, i32 2) +// MID-NEXT: tail call void @consume_int(i32 noundef [[b7]]) + +// BACK-NEXT: [[b720:%[a-zA-Z0-9._]+]] = getelementptr %struct.context4, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[b720]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r1]]) + + consume_int(ctx->c); +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[c:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r7]], i32 0, i32 4 +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load volatile i32, ptr [[c]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r8]]) + +// MID-NEXT: [[c8:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context4) %ctx, i1 true, i8 0, i8 -128, i1 true, i32 0, i32 4) +// MID-NEXT: tail call void @consume_int(i32 noundef [[c8]]) + +// BACK-NEXT: [[c821:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 4 +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load volatile i32, ptr [[c821]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r2]]) + + __atomic_load(&ctx->a, &r, 2); + consume_int(r); +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r9]]) +// FRONT-NEXT: [[a1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r10]], i32 0, i32 0 +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load atomic volatile i32, ptr [[a1]] acquire +// FRONT: call void @consume_int + +// MID-NEXT: [[a19:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context4) %ctx, i1 true, i8 4, i8 -128, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[a19]]) + +// BACK-NEXT: [[a1922:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load atomic volatile i32, ptr [[a1922]] acquire +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r3]]) + + ctx->a = 1; +// FRONT-NEXT: [[r13:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r14:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r13]]) +// FRONT-NEXT: [[a2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r14]], i32 0, i32 0 +// FRONT-NEXT: store volatile i32 1, ptr [[a2]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 1, ptr elementtype(%struct.context4) %ctx, i1 true, i8 0, i8 -128, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: store volatile i32 1, ptr [[r4]] + + ctx->b = 2; +// FRONT-NEXT: [[r15:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r16:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r15]]) +// FRONT-NEXT: [[b3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r16]], i32 0, i32 2 +// FRONT-NEXT: store i32 2, ptr [[b3]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 2, ptr elementtype(%struct.context4) %ctx, i1 false, i8 0, i8 -128, i1 true, i32 0, i32 2) + +// BACK-NEXT: [[r5:%[a-zA-Z0-9._]+]] = getelementptr %struct.context4, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: store i32 2, ptr [[r5]] + + ctx->c = 3; +// FRONT-NEXT: [[r17:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r18:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r17]]) +// FRONT-NEXT: [[c4:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r18]], i32 0, i32 4 +// FRONT-NEXT: store volatile i32 3, ptr [[c4]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 3, ptr elementtype(%struct.context4) %ctx, i1 true, i8 0, i8 -128, i1 true, i32 0, i32 4) + +// BACK-NEXT: [[r6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 4 +// BACK-NEXT: store volatile i32 3, ptr [[r6]] + + r = 7; + __atomic_store(&ctx->a, &r, 3); +// FRONT-NEXT: store i32 7, ptr %r +// FRONT-NEXT: [[r19:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r20:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r19]]) +// FRONT-NEXT: [[a5:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr [[r20]], i32 0, i32 0 +// FRONT-NEXT: [[r21:%[a-zA-Z0-9._]+]] = load i32, ptr %r +// FRONT-NEXT: store atomic volatile i32 [[r21]], ptr [[a5]] release + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 7, ptr elementtype(%struct.context4) %ctx, i1 true, i8 5, i8 -128, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r7:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context4, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: store atomic volatile i32 7, ptr [[r7]] release +// BACK-NEXT: tail call void @consume_ptr(ptr noundef %ctx) + + consume_ptr(ctx); +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +struct context5 { + union { + int a; + float b; + } u; +} __virtual_offsets__; + +// FRONT: define dso_local void @union_access +// MID: define dso_local void @union_access +// BACK: define dso_local void @union_access +void union_access(struct context5 *ctx1, struct context5 *ctx2) { + consume_int(ctx1->u.a); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx1.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[u:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context5, ptr [[r1]], i32 0, i32 0 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[u]] +// FRONT-NEXT: call void @consume_int(i32 noundef [[r2]]) + +// MID: [[u4:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.context5) %ctx1, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_int(i32 noundef [[u4]]) + +// BACK: [[u410:%[a-zA-Z0-9._]+]] = getelementptr %struct.context5, ptr %ctx1, i32 0, i32 0 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[u410]] +// BACK-NEXT: tail call void @consume_int(i32 noundef [[r0]]) + + consume_float(ctx2->u.b); +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx2.addr +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[u1:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context5, ptr [[r4]], i32 0, i32 0 +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load float, ptr [[u1]] +// FRONT-NEXT: call void @consume_float(float noundef [[r5]]) + +// MID-NEXT: [[u15:%[a-zA-Z0-9._]+]] = tail call float (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.f32(ptr elementtype(%struct.context5) %ctx2, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: tail call void @consume_float(float noundef [[u15]]) + +// BACK-NEXT: [[u1511:%[a-zA-Z0-9._]+]] = getelementptr %struct.context5, ptr %ctx2, i32 0, i32 0 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load float, ptr [[u1511]] +// BACK-NEXT: tail call void @consume_float(float noundef [[r1]]) + + ctx1->u.a = 2; +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx1.addr +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[u2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context5, ptr [[r7]], i32 0, i32 0 +// FRONT-NEXT: store i32 2, ptr [[u2]] + +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 2, ptr elementtype(%struct.context5) %ctx1, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = getelementptr %struct.context5, ptr %ctx1, i32 0, i32 0 +// BACK-NEXT: store i32 2, ptr [[r2]] + + ctx2->u.b = 3.4f; +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx2.addr +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r8]]) +// FRONT-NEXT: [[u3:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.context5, ptr [[r9]], i32 0, i32 0 +// FRONT-NEXT: store float {{.*}}, ptr [[u3]] + +// MID-NEXT: tail call void (float, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.f32(float 0x400B333340000000, ptr elementtype(%struct.context5) %ctx2, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) + +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = getelementptr %struct.context5, ptr %ctx2, i32 0, i32 0 +// BACK-NEXT: store float {{.*}}, ptr [[r3]] + + consume_ptr(ctx1); + consume_ptr(ctx2); +// FRONT: ret void +// MID: ret void +// BACK: ret void +} + +// TODO: add an offset at the start! +struct context6 { + unsigned a:1; + unsigned b:2; +} __virtual_offsets__; + +// FRONT: define dso_local void @bitfield_access +// MID: define dso_local void @bitfield_access +// BACK: define dso_local void @bitfield_access +void bitfield_access(struct context6 *ctx) { + consume_int(ctx->a); +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[bf_load:%[a-zA-Z0-9._]+]] = load i8, ptr [[r1]] +// FRONT-NEXT: [[bf_clear:%[a-zA-Z0-9._]+]] = and i8 [[bf_load]], 1 +// FRONT-NEXT: [[bf_cast:%[a-zA-Z0-9._]+]] = zext i8 [[bf_clear]] to i32 +// FRONT-NEXT: call void @consume_int(i32 noundef [[bf_cast]]) + +// MID: [[bf_load:%[a-zA-Z0-9._]+]] = load i8, ptr %ctx +// MID-NEXT: [[bf_clear:%[a-zA-Z0-9._]+]] = and i8 [[bf_load]], 1 +// MID-NEXT: [[bf_cast:%[a-zA-Z0-9._]+]] = zext i8 [[bf_clear]] to i32 +// MID-NEXT: tail call void @consume_int(i32 noundef [[bf_cast]]) + + consume_int(ctx->b); +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r2]]) +// FRONT-NEXT: [[bf_load1:%[a-zA-Z0-9._]+]] = load i8, ptr [[r3]] +// FRONT-NEXT: [[bf_lshr:%[a-zA-Z0-9._]+]] = lshr i8 [[bf_load1]], 1 +// FRONT-NEXT: [[bf_clear2:%[a-zA-Z0-9._]+]] = and i8 [[bf_lshr]], 3 +// FRONT-NEXT: [[bf_cast3:%[a-zA-Z0-9._]+]] = zext i8 [[bf_clear2]] to i32 +// FRONT-NEXT: call void @consume_int(i32 noundef [[bf_cast3]]) + +// MID-NEXT: [[bf_load1:%[a-zA-Z0-9._]+]] = load i8, ptr %ctx +// MID-NEXT: [[bf_lshr:%[a-zA-Z0-9._]+]] = lshr i8 [[bf_load1]], 1 +// MID-NEXT: [[bf_clear2:%[a-zA-Z0-9._]+]] = and i8 [[bf_lshr]], 3 +// MID-NEXT: [[bf_cast3:%[a-zA-Z0-9._]+]] = zext i8 [[bf_clear2]] to i32 +// MID-NEXT: tail call void @consume_int(i32 noundef [[bf_cast3]]) + + ctx->a = 1; +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r4]]) +// FRONT-NEXT: [[bf_load4:%[a-zA-Z0-9._]+]] = load i8, ptr [[r5]] +// FRONT-NEXT: [[bf_clear5:%[a-zA-Z0-9._]+]] = and i8 [[bf_load4]], -2 +// FRONT-NEXT: [[bf_set:%[a-zA-Z0-9._]+]] = or i8 [[bf_clear5]], 1 +// FRONT-NEXT: store i8 [[bf_set]], ptr [[r5]] + + ctx->b = 2; +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[bf_load6:%[a-zA-Z0-9._]+]] = load i8, ptr [[r7]] +// FRONT-NEXT: [[bf_clear7:%[a-zA-Z0-9._]+]] = and i8 [[bf_load6]], -7 +// FRONT-NEXT: [[bf_set8:%[a-zA-Z0-9._]+]] = or i8 [[bf_clear7]], 4 +// FRONT-NEXT: store i8 [[bf_set8]], ptr [[r7]] + +// MID-NEXT: [[bf_load4:%[a-zA-Z0-9._]+]] = load i8, ptr %ctx +// MID-NEXT: [[bf_set:%[a-zA-Z0-9._]+]] = and i8 [[bf_load4]], -8 +// MID-NEXT: [[bf_set8:%[a-zA-Z0-9._]+]] = or i8 [[bf_set]], 5 +// MID-NEXT: store i8 [[bf_set8]], ptr %ctx + + consume_ptr(ctx); +// FRONT: ret void +// MID: ret void +} + +struct pattern_bug1 { + int src_port; + int dst_port; +} __virtual_offsets__; + +// FRONT: define dso_local i32 @pattern_bug1 +// MID: define dso_local i32 @pattern_bug1 +// BACK: define dso_local i32 @pattern_bug1 +int pattern_bug1(struct pattern_bug1 *sk) { +// FRONT: [[sk_addr:%[a-zA-Z0-9._]+]] = alloca ptr +// FRONT: [[port:%[a-zA-Z0-9._]+]] = alloca ptr + int *port = &sk->dst_port; +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr [[sk_addr]] +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[dst_port:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.pattern_bug1, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: store ptr [[dst_port]], ptr [[port]] + return port[0] == 0x777; +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load ptr, ptr [[port]] +// FRONT-NEXT: [[arrayidx:%[a-zA-Z0-9._]+]] = getelementptr inbounds i32, ptr [[r2]], i64 0 +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[arrayidx]] +// FRONT-NEXT: [[cmp:%[a-zA-Z0-9._]+]] = icmp eq i32 [[r3]], 1911 +// FRONT-NEXT: [[conv:%[a-zA-Z0-9._]+]] = zext i1 [[cmp]] to i32 +// FRONT: ret i32 [[conv]] + +// MID: [[dst_port:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.pattern_bug1, ptr %sk, i64 0, i32 1 +// MID-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[dst_port]] +// MID-NEXT: [[cmp:%[a-zA-Z0-9._]+]] = icmp eq i32 [[r0]], 1911 +// MID-NEXT: [[conv:%[a-zA-Z0-9._]+]] = zext i1 [[cmp]] to i32 +// MID-NEXT: ret i32 [[conv]] + +// No need to check BACK +} + +struct bpf_sock { + int bound_dev_if; + int family; +} __virtual_offsets__; + +struct bpf_sockopt { + struct bpf_sock *sk; + int level; + int optlen; +} __virtual_offsets__; + +extern int magic(void); +extern int magic2(int); + +// The next test case is related to the issue described in the following thread: +// https://lore.kernel.org/bpf/CAA-VZPmxh8o8EBcJ=m-DH4ytcxDFmo0JKsm1p1gf40kS0CE3NQ@mail.gmail.com/T/#m4b9ce2ce73b34f34172328f975235fc6f19841b6 + +// FRONT: define dso_local i32 @known_load_sink_example_1 +// MID: define dso_local i32 @known_load_sink_example_1 +// BACK: define dso_local i32 @known_load_sink_example_1 +int known_load_sink_example_1(struct bpf_sockopt *ctx) +{ + unsigned g = 0; +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[level:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.bpf_sockopt, ptr [[r1]], i32 0, i32 1 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[level]] +// FRONT-NEXT: switch i32 [[r2]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// FRONT-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: ] + +// MID: [[level3:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.bpf_sockopt) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 1) +// MID-NEXT: switch i32 [[level3]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// MID-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// MID-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// MID-NEXT: ] + +// BACK: [[level39:%[a-zA-Z0-9._]+]] = getelementptr %struct.bpf_sockopt, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[level39]] +// BACK-NEXT: switch i32 [[r0]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// BACK-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// BACK-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// BACK-NEXT: ] + switch (ctx->level) { + case 10: + g = magic2(ctx->sk->family); + break; +// FRONT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[sk:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.bpf_sockopt, ptr [[r4]], i32 0, i32 0 +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr [[sk]] +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r5]]) +// FRONT-NEXT: [[family:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.bpf_sock, ptr [[r6]], i32 0, i32 1 +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = load i32, ptr [[family]] +// FRONT-NEXT: [[call:%[a-zA-Z0-9._]+]] = call i32 @magic2(i32 noundef [[r7]]) +// FRONT-NEXT: store i32 [[call]], ptr %g +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.bb: +// MID-NEXT: [[sk4:%[a-zA-Z0-9._]+]] = tail call ptr (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.p0(ptr elementtype(%struct.bpf_sockopt) %ctx, i1 false, i8 0, i8 8, i1 true, i32 0, i32 0) +// MID-NEXT: [[family5:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.bpf_sock) [[sk4]], i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: br label [[sw_epilog_sink_split:%[a-zA-Z0-9._]+]] + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[sk410:%[a-zA-Z0-9._]+]] = getelementptr %struct.bpf_sockopt, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load ptr, ptr [[sk410]] +// BACK-NEXT: [[family511:%[a-zA-Z0-9._]+]] = getelementptr %struct.bpf_sock, ptr [[r1]], i32 0, i32 1 +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[family511]] +// BACK-NEXT: br label %sw.epilog.sink.split + + case 20: + g = magic2(ctx->optlen); + break; +// FRONT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r8]]) +// FRONT-NEXT: [[optlen:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.bpf_sockopt, ptr [[r9]], i32 0, i32 2 +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load i32, ptr [[optlen]] +// FRONT-NEXT: [[call2:%[a-zA-Z0-9._]+]] = call i32 @magic2(i32 noundef [[r10]]) +// FRONT-NEXT: store i32 [[call2]], ptr %g +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.{{.*}}: +// MID-NEXT: [[optlen6:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.bpf_sockopt) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 2) +// MID-NEXT: br label %sw.epilog.sink.split + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[optlen612:%[a-zA-Z0-9._]+]] = getelementptr %struct.bpf_sockopt, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[optlen612]] +// BACK-NEXT: br label %sw.epilog.sink.split + } +// FRONT: sw.epilog: +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load i32, ptr %g +// FRONT-NEXT: [[rem:%[a-zA-Z0-9._]+]] = urem i32 [[r11]], 2 +// FRONT: ret i32 [[rem]] + +// MID: sw.epilog.sink.split: +// MID-NEXT: [[optlen6_sink:%[a-zA-Z0-9._]+]] = phi i32 [ [[optlen6]], [[sw_bb1]] ], [ [[family5]], [[sw_bb]] ] +// MID-NEXT: [[call2:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[optlen6_sink]]) +// MID-NEXT: [[phi_bo:%[a-zA-Z0-9._]+]] = and i32 [[call2]], 1 +// MID-NEXT: br label %sw.epilog + +// MID: sw.epilog: +// MID-NEXT: [[g_0:%[a-zA-Z0-9._]+]] = phi i32 [ 0, %entry ], [ %phi.bo, %sw.epilog.sink.split ] +// MID-NEXT: ret i32 [[g_0]] + +// BACK: sw.epilog.sink.split: +// BACK-NEXT: [[optlen6_sink:%[a-zA-Z0-9._]+]] = phi i32 [ [[r3]], [[sw_bb1]] ], [ [[r2]], [[sw_bb]] ] +// BACK-NEXT: [[call2:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[optlen6_sink]]) +// BACK-NEXT: [[phi_bo:%[a-zA-Z0-9._]+]] = and i32 [[call2]], 1 +// BACK-NEXT: br label %sw.epilog + +// BACK: sw.epilog: +// BACK-NEXT: [[g_0:%[a-zA-Z0-9._]+]] = phi +// BACK-NEXT: ret i32 [[g_0]] + + return g % 2; +} + +struct __sk_buff { + int priority; + int mark; + int tc_index; +} __virtual_offsets__; + +// FRONT: define dso_local i32 @known_store_sink_example_1 +// MID: define dso_local i32 @known_store_sink_example_1 +// BACK: define dso_local i32 @known_store_sink_example_1 +int known_store_sink_example_1(struct __sk_buff *ctx) { + switch (ctx->priority) { +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[priority:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r1]], i32 0, i32 0 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[priority]] +// FRONT-NEXT: switch i32 [[r2]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// FRONT-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: ] + +// MID: [[priority3:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: switch i32 [[priority3]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// MID-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// MID-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// MID-NEXT: ] + +// BACK: [[priority36:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[priority36]] +// BACK-NEXT: switch i32 [[r0]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// BACK-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// BACK-NEXT: i32 20, label [[sw_bb1:%[a-zA-Z0-9._]+]] +// BACK-NEXT: ] + case 10: + ctx->mark = 3; + break; +// FRONT: sw.bb: +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[mark:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r4]], i32 0, i32 1 +// FRONT-NEXT: store i32 3, ptr [[mark]] +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.bb: +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 3, ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: br label %sw.epilog + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: store i32 3, ptr [[r1]] +// BACK-NEXT: br label %sw.epilog + + case 20: + ctx->priority = 4; + break; +// FRONT: sw.bb1: +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r5]]) +// FRONT-NEXT: [[priority2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r6]], i32 0, i32 0 +// FRONT-NEXT: store i32 4, ptr [[priority2]] +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.bb1: +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 4, ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: br label %sw.epilog + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: store i32 4, ptr [[r2]] +// BACK-NEXT: br label %sw.epilog + } +// FRONT: sw.epilog: +// FRONT-NEXT: ret i32 0 +// MID: sw.epilog: +// MID-NEXT: ret i32 0 +// BACK: sw.epilog: +// BACK-NEXT: ret i32 0 + return 0; +} + +// FRONT: define dso_local i32 @known_store_sink_example_2 +// MID: define dso_local i32 @known_store_sink_example_2 +// BACK: define dso_local i32 @known_store_sink_example_2 +int known_store_sink_example_2(struct __sk_buff *ctx) { + switch (ctx->tc_index) { +// FRONT: [[r0:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r1:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r0]]) +// FRONT-NEXT: [[tc_index:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r1]], i32 0, i32 2 +// FRONT-NEXT: [[r2:%[a-zA-Z0-9._]+]] = load i32, ptr [[tc_index]] +// FRONT-NEXT: switch i32 [[r2]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// FRONT-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: i32 20, label [[sw_bb3:%[a-zA-Z0-9._]+]] +// FRONT-NEXT: ] + +// MID: [[tc_index8:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 2) +// MID-NEXT: switch i32 [[tc_index8]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// MID-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// MID-NEXT: i32 20, label [[sw_bb3:%[a-zA-Z0-9._]+]] +// MID-NEXT: ] + +// BACK: [[tc_index814:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 2 +// BACK-NEXT: [[r0:%[a-zA-Z0-9._]+]] = load i32, ptr [[tc_index814]] +// BACK-NEXT: switch i32 [[r0]], label [[sw_epilog:%[a-zA-Z0-9._]+]] [ +// BACK-NEXT: i32 10, label [[sw_bb:%[a-zA-Z0-9._]+]] +// BACK-NEXT: i32 20, label [[sw_bb3:%[a-zA-Z0-9._]+]] +// BACK-NEXT: ] + + case 10: + magic2(ctx->mark); + ctx->mark = magic(); + break; +// FRONT: sw.bb: +// FRONT-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r4:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r3]]) +// FRONT-NEXT: [[mark:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r4]], i32 0, i32 1 +// FRONT-NEXT: [[r5:%[a-zA-Z0-9._]+]] = load i32, ptr [[mark]] +// FRONT-NEXT: [[call:%[a-zA-Z0-9._]+]] = call i32 @magic2(i32 noundef [[r5]]) +// FRONT-NEXT: [[call1:%[a-zA-Z0-9._]+]] = call i32 @magic() +// FRONT-NEXT: [[r6:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r7:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r6]]) +// FRONT-NEXT: [[mark2:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r7]], i32 0, i32 1 +// FRONT-NEXT: store i32 [[call1]], ptr [[mark2]] +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.bb: +// MID-NEXT: [[mark7:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: [[call:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[mark7]]) +// MID-NEXT: [[call1:%[a-zA-Z0-9._]+]] = tail call i32 @magic() +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 [[call1]], ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 1) +// MID-NEXT: br label %sw.epilog + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[mark715:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: [[r1:%[a-zA-Z0-9._]+]] = load i32, ptr [[mark715]] +// BACK-NEXT: [[call:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[r1]]) +// BACK-NEXT: [[call1:%[a-zA-Z0-9._]+]] = tail call i32 @magic() +// BACK-NEXT: [[r2:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 1 +// BACK-NEXT: store i32 [[call1]], ptr [[r2]] +// BACK-NEXT: br label %sw.epilog + + case 20: + magic2(ctx->priority); + ctx->priority = magic(); + break; +// FRONT: sw.{{.*}}: +// FRONT-NEXT: [[r8:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r9:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r8]]) +// FRONT-NEXT: [[priority:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r9]], i32 0, i32 0 +// FRONT-NEXT: [[r10:%[a-zA-Z0-9._]+]] = load i32, ptr [[priority]] +// FRONT-NEXT: [[call4:%[a-zA-Z0-9._]+]] = call i32 @magic2(i32 noundef [[r10]]) +// FRONT-NEXT: [[call5:%[a-zA-Z0-9._]+]] = call i32 @magic() +// FRONT-NEXT: [[r11:%[a-zA-Z0-9._]+]] = load ptr, ptr %ctx.addr +// FRONT-NEXT: [[r12:%[a-zA-Z0-9._]+]] = call ptr @llvm.bpf.virtual.offset.marker.p0.p0(ptr [[r11]]) +// FRONT-NEXT: [[priority6:%[a-zA-Z0-9._]+]] = getelementptr inbounds %struct.__sk_buff, ptr [[r12]], i32 0, i32 0 +// FRONT-NEXT: store i32 [[call5]], ptr [[priority6]] +// FRONT-NEXT: br label %sw.epilog + +// MID: sw.{{.*}}: +// MID-NEXT: [[priority9:%[a-zA-Z0-9._]+]] = tail call i32 (ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.load.i32(ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: [[call4:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[priority9]]) +// MID-NEXT: [[call5:%[a-zA-Z0-9._]+]] = tail call i32 @magic() +// MID-NEXT: tail call void (i32, ptr, i1, i8, i8, i1, ...) @llvm.bpf.getelementptr.and.store.i32(i32 [[call5]], ptr elementtype(%struct.__sk_buff) %ctx, i1 false, i8 0, i8 4, i1 true, i32 0, i32 0) +// MID-NEXT: br label %sw.epilog + +// BACK: sw.{{.*}}: +// BACK-NEXT: [[priority916:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: [[r3:%[a-zA-Z0-9._]+]] = load i32, ptr [[priority916]] +// BACK-NEXT: [[call4:%[a-zA-Z0-9._]+]] = tail call i32 @magic2(i32 noundef [[r3]]) +// BACK-NEXT: [[call5:%[a-zA-Z0-9._]+]] = tail call i32 @magic() +// BACK-NEXT: [[r4:%[a-zA-Z0-9._]+]] = getelementptr %struct.__sk_buff, ptr %ctx, i32 0, i32 0 +// BACK-NEXT: store i32 [[call5]], ptr [[r4]] +// BACK-NEXT: br label %sw.epilog + } + return 0; +// FRONT: sw.epilog: +// FRONT-NEXT: ret i32 0 +// MID: sw.epilog: +// MID-NEXT: ret i32 0 +// BACK: sw.epilog: +// BACK-NEXT: ret i32 0 +} diff --git a/clang/test/Misc/pragma-attribute-supported-attributes-list.test b/clang/test/Misc/pragma-attribute-supported-attributes-list.test --- a/clang/test/Misc/pragma-attribute-supported-attributes-list.test +++ b/clang/test/Misc/pragma-attribute-supported-attributes-list.test @@ -22,6 +22,7 @@ // CHECK-NEXT: Assumption (SubjectMatchRule_function, SubjectMatchRule_objc_method) // CHECK-NEXT: Availability ((SubjectMatchRule_record, SubjectMatchRule_enum, SubjectMatchRule_enum_constant, SubjectMatchRule_field, SubjectMatchRule_function, SubjectMatchRule_namespace, SubjectMatchRule_objc_category, SubjectMatchRule_objc_implementation, SubjectMatchRule_objc_interface, SubjectMatchRule_objc_method, SubjectMatchRule_objc_property, SubjectMatchRule_objc_protocol, SubjectMatchRule_record, SubjectMatchRule_type_alias, SubjectMatchRule_variable)) // CHECK-NEXT: BPFPreserveAccessIndex (SubjectMatchRule_record) +// CHECK-NEXT: BPFVirtualOffsets (SubjectMatchRule_record) // CHECK-NEXT: BTFDeclTag (SubjectMatchRule_variable, SubjectMatchRule_function, SubjectMatchRule_record, SubjectMatchRule_field, SubjectMatchRule_type_alias) // CHECK-NEXT: BuiltinAlias (SubjectMatchRule_function) // CHECK-NEXT: CFAuditedTransfer (SubjectMatchRule_function) diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1963,6 +1963,9 @@ [IntrNoMem, ImmArg>, ImmArg>]>; +def int_bpf_virtual_offset_marker : DefaultAttrsIntrinsic<[llvm_any_ty], + [llvm_any_ty], + [IntrNoMem]>; //===------------ Intrinsics to perform common vector shuffles ------------===// diff --git a/llvm/include/llvm/IR/IntrinsicsBPF.td b/llvm/include/llvm/IR/IntrinsicsBPF.td --- a/llvm/include/llvm/IR/IntrinsicsBPF.td +++ b/llvm/include/llvm/IR/IntrinsicsBPF.td @@ -37,4 +37,39 @@ def int_bpf_compare : ClangBuiltin<"__builtin_bpf_compare">, Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_anyint_ty, llvm_anyint_ty], [IntrNoMem]>; + def int_bpf_getelementptr_and_load : ClangBuiltin<"__builtin_bpf_getelementptr_and_load">, + Intrinsic<[llvm_any_ty], + [llvm_ptr_ty, // base ptr for getelementptr + llvm_i1_ty, // volatile + llvm_i8_ty, // atomic order + llvm_i8_ty, // alignment + llvm_i1_ty, // inbounds + llvm_vararg_ty], // indices for getelementptr insn + [IntrReadMem, + IntrArgMemOnly, + NoCapture >, + ReadOnly >, + ImmArg >, // volatile + ImmArg >, // atomic order + ImmArg >, // alignment + ImmArg >, // inbounds + ]>; + def int_bpf_getelementptr_and_store : ClangBuiltin<"__builtin_bpf_getelementptr_and_store">, + Intrinsic<[], + [llvm_any_ty, // value to store + llvm_ptr_ty, // base ptr for getelementptr + llvm_i1_ty, // volatile + llvm_i8_ty, // atomic order + llvm_i8_ty, // alignment + llvm_i1_ty, // inbounds + llvm_vararg_ty], // indexes for getelementptr insn + [IntrWriteMem, + IntrArgMemOnly, + NoCapture >, + WriteOnly >, + ImmArg >, // volatile + ImmArg >, // atomic order + ImmArg >, // alignment + ImmArg >, // inbounds + ]>; } diff --git a/llvm/lib/Target/BPF/BPF.h b/llvm/lib/Target/BPF/BPF.h --- a/llvm/lib/Target/BPF/BPF.h +++ b/llvm/lib/Target/BPF/BPF.h @@ -30,6 +30,7 @@ FunctionPass *createBPFMIPeepholeTruncElimPass(); FunctionPass *createBPFMIPreEmitPeepholePass(); FunctionPass *createBPFMIPreEmitCheckingPass(); +FunctionPass *createBPFVirtualOffsetsPass(); void initializeBPFAdjustOptPass(PassRegistry&); void initializeBPFCheckAndAdjustIRPass(PassRegistry&); @@ -42,6 +43,7 @@ void initializeBPFMIPeepholeTruncElimPass(PassRegistry&); void initializeBPFMIPreEmitPeepholePass(PassRegistry&); void initializeBPFMIPreEmitCheckingPass(PassRegistry&); +void initializeBPFVirtualOffsetsLegacyPassPass(PassRegistry &); class BPFAbstractMemberAccessPass : public PassInfoMixin { @@ -72,6 +74,12 @@ public: PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); }; + +class BPFVirtualOffsetsPass : public PassInfoMixin { +public: + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); +}; + } // namespace llvm #endif diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -100,16 +100,29 @@ Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB, Instruction *Input, - Instruction *Before) { + BasicBlock::InstListType::iterator Where, + uint32_t SeqNum) { Function *Fn = Intrinsic::getDeclaration( M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()}); Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()), - BPFCoreSharedInfo::SeqNum++); + SeqNum); auto *NewInst = CallInst::Create(Fn, {SeqNumVal, Input}); - BB->getInstList().insert(Before->getIterator(), NewInst); + BB->getInstList().insert(Where, NewInst); return NewInst; } + +Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB, + Instruction *Input, + BasicBlock::InstListType::iterator Where) { + return insertPassThrough(M, BB, Input, Where, BPFCoreSharedInfo::SeqNum++); +} + +Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB, + Instruction *Input, + Instruction *Before) { + return insertPassThrough(M, BB, Input, Before->getIterator()); +} } // namespace llvm using namespace llvm; diff --git a/llvm/lib/Target/BPF/BPFCORE.h b/llvm/lib/Target/BPF/BPFCORE.h --- a/llvm/lib/Target/BPF/BPFCORE.h +++ b/llvm/lib/Target/BPF/BPFCORE.h @@ -10,6 +10,7 @@ #define LLVM_LIB_TARGET_BPF_BPFCORE_H #include "llvm/ADT/StringRef.h" +#include "llvm/IR/BasicBlock.h" namespace llvm { @@ -71,6 +72,14 @@ static Instruction *insertPassThrough(Module *M, BasicBlock *BB, Instruction *Input, Instruction *Before); + + static Instruction * + insertPassThrough(Module *M, BasicBlock *BB, Instruction *Input, + BasicBlock::InstListType::iterator Where); + + static Instruction * + insertPassThrough(Module *M, BasicBlock *BB, Instruction *Input, + BasicBlock::InstListType::iterator Where, uint32_t SeqNum); }; } // namespace llvm diff --git a/llvm/lib/Target/BPF/BPFCheckAndAdjustIR.cpp b/llvm/lib/Target/BPF/BPFCheckAndAdjustIR.cpp --- a/llvm/lib/Target/BPF/BPFCheckAndAdjustIR.cpp +++ b/llvm/lib/Target/BPF/BPFCheckAndAdjustIR.cpp @@ -18,15 +18,22 @@ #include "BPF.h" #include "BPFCORE.h" #include "BPFTargetMachine.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/IR/Attributes.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicsBPF.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/Pass.h" +#include "llvm/Support/AtomicOrdering.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #define DEBUG_TYPE "bpf-check-and-opt-ir" @@ -47,6 +54,7 @@ bool adjustIR(Module &M); bool removePassThroughBuiltin(Module &M); bool removeCompareBuiltin(Module &M); + bool removeGEPBuiltins(Module &M); }; } // End anonymous namespace @@ -161,9 +169,90 @@ return Changed; } +static unsigned getOperandAsUnsigned(CallInst *Call, unsigned ArgNo) { + if (auto *Int = dyn_cast(Call->getOperand(ArgNo))) + return Int->getValue().getZExtValue(); + std::string Report; + raw_string_ostream ReportS(Report); + ReportS << "Expecting ConstantInt as argument #" << ArgNo << " of " << *Call + << "\n"; + report_fatal_error(StringRef(Report)); +} + +static GetElementPtrInst *reconstructGEP(CallInst *Call, int Delta) { + SmallVector Indices; + Indices.append(Call->data_operands_begin() + 5 + Delta, + Call->data_operands_end()); + auto *GEPPointeeType = Call->getParamElementType(Delta); + auto *GEP = GetElementPtrInst::Create(GEPPointeeType, Call->getOperand(Delta), + ArrayRef(Indices), + Call->getName(), Call); + GEP->setIsInBounds(getOperandAsUnsigned(Call, 1 + Delta)); + return GEP; +} + +// TODO: iterator instead of delta +template > +static void unrollCommon(CallInst *Call, GetElementPtrInst *GEP, T *Insn, + int Delta) { + Insn->setVolatile(getOperandAsUnsigned(Call, 1 + Delta)); + Insn->setOrdering((AtomicOrdering)getOperandAsUnsigned(Call, 2 + Delta)); + Insn->setAlignment(Align(getOperandAsUnsigned(Call, 3 + Delta))); + GEP->setDebugLoc(Call->getDebugLoc()); + Insn->setDebugLoc(Call->getDebugLoc()); + Call->replaceAllUsesWith(Insn); + Call->eraseFromParent(); +} + +static void unrollGEPLoad(CallInst *Call) { + auto *GEP = reconstructGEP(Call, 0); + auto *ReturnType = Call->getFunctionType()->getReturnType(); + auto *Load = new LoadInst(ReturnType, GEP, "", Call); + unrollCommon(Call, GEP, Load, 0); +} + +static void unrollGEPStore(CallInst *Call) { + auto *GEP = reconstructGEP(Call, 1); + auto *Store = new StoreInst(Call->getOperand(0), GEP, Call); + unrollCommon(Call, GEP, Store, 1); +} + +static bool removeGEPBuiltinsInFunc(Function &F) { + SmallVector GEPLoads; + SmallVector GEPStores; + for (auto &BB : F) + for (auto &Insn : BB) + if (auto *Call = dyn_cast(&Insn)) + if (auto *Called = Call->getCalledFunction()) + switch (Called->getIntrinsicID()) { + case Intrinsic::bpf_getelementptr_and_load: + GEPLoads.push_back(Call); + break; + case Intrinsic::bpf_getelementptr_and_store: + GEPStores.push_back(Call); + break; + } + + if (GEPLoads.empty() && GEPStores.empty()) + return false; + + for_each(GEPLoads, unrollGEPLoad); + for_each(GEPStores, unrollGEPStore); + + return true; +} + +bool BPFCheckAndAdjustIR::removeGEPBuiltins(Module &M) { + bool Changed = false; + for (auto &F : M) + Changed = removeGEPBuiltinsInFunc(F) || Changed; + return Changed; +} + bool BPFCheckAndAdjustIR::adjustIR(Module &M) { bool Changed = removePassThroughBuiltin(M); Changed = removeCompareBuiltin(M) || Changed; + Changed = removeGEPBuiltins(M) || Changed; return Changed; } diff --git a/llvm/lib/Target/BPF/BPFTargetMachine.cpp b/llvm/lib/Target/BPF/BPFTargetMachine.cpp --- a/llvm/lib/Target/BPF/BPFTargetMachine.cpp +++ b/llvm/lib/Target/BPF/BPFTargetMachine.cpp @@ -48,6 +48,7 @@ initializeBPFCheckAndAdjustIRPass(PR); initializeBPFMIPeepholePass(PR); initializeBPFMIPeepholeTruncElimPass(PR); + initializeBPFVirtualOffsetsLegacyPassPass(PR); } // DataLayout: little or big endian @@ -108,6 +109,7 @@ [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) { PM.add(createBPFAbstractMemberAccess(this)); PM.add(createBPFPreserveDIType()); + PM.add(createBPFVirtualOffsetsPass()); PM.add(createBPFIRPeephole()); }); @@ -130,6 +132,7 @@ FunctionPassManager FPM; FPM.addPass(BPFAbstractMemberAccessPass(this)); FPM.addPass(BPFPreserveDITypePass()); + FPM.addPass(BPFVirtualOffsetsPass()); FPM.addPass(BPFIRPeepholePass()); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); }); diff --git a/llvm/lib/Target/BPF/BPFVirtualOffsets.cpp b/llvm/lib/Target/BPF/BPFVirtualOffsets.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/BPF/BPFVirtualOffsets.cpp @@ -0,0 +1,399 @@ +//===------ BPFVirtualOffsetsPass.cpp --------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// BPF verifier limits access patterns allowed for BPF program +// parameter passed in a context register (r1). +// Only BASE + static-offset memory accesses are allowed. +// +// The goal of the BPFVirtualOffsetsPass is to ensure that +// SimplifyCFGPass optimization pass will not generate the code that +// uses unsupported access patterns for context parameter. +// +// The following code is used as a running example: +// +// #define __ctx__ __attribute__((btf_decl_tag("ctx"))) +// +// struct bpf_sock { +// int bound_dev_if; +// int family; +// }; +// +// struct bpf_sockopt { +// struct bpf_sock *sk; +// int level; +// int optlen; +// }; +// +// __attribute__((noinline)) +// static int f(int x) { ... } +// +// __attribute__((section("cgroup/getsockopt"))) +// int _getsockopt(struct bpf_sockopt *ctx __ctx__) +// { +// unsigned g = 0; +// switch (ctx->level) { +// case 10: +// g = f(ctx->sk->family); +// break; +// case 20: +// g = f(ctx->optlen); +// break; +// } +// return g % 2; +// } +// +// Here the attribute btf_decl_tag("ctx") marks a context parameter. +// The initial (simplified) IR for function _getsockopt looks as follows: +// +// define dso_local i32 @_getsockopt(ptr noundef %ctx) +// ... +// sw.bb: +// %1 = load ptr, ptr %ctx ;; +// %family = getelementptr inbounds %struct.bpf_sock, ptr %1 ;; access to ctx->sk->family +// %2 = load i32, ptr %family ;; (a) +// %call = call i32 @f(i32 noundef %2) +// br label %sw.epilog +// +// sw.bb1: +// %optlen = getelementptr inbounds %struct.bpf_sockopt, ptr %ctx ;; access to ctx->optlen +// %3 = load i32, ptr %optlen ;; (b) +// %call2 = call i32 @f(i32 noundef %3) +// br label %sw.epilog +// +// sw.epilog: +// ... +// +// W/o additional code motion machine code for field accesses would +// looks as follows: +// +// ... +// $r1 = LDW $r1, 4 ;; for ctx->sk->family +// ... +// $r1 = LDW $r1, 12 ;; for ctx->optlen +// +// Which matches the pattern allowed by BPF verifier. +// +// However, SimplifyCFGPass may rewrite the above IR separating +// getelementptr and load instructions as shown below: +// +// ... +// sw.bb: +// %1 = load ptr, ptr %ctx +// %family = getelementptr inbounds %struct.bpf_sock, ptr %1 +// br label %sw.epilog.sink.split +// +// sw.bb1: +// %optlen = getelementptr inbounds %struct.bpf_sockopt, ptr %ctx +// br label %sw.epilog.sink.split +// +// sw.epilog.sink.split: +// %optlen.sink = phi ptr [ %optlen, %sw.bb1 ], [ %family, %sw.bb ] +// %2 = load i32, ptr %optlen.sink ;; (c) +// %call2 = call fastcc i32 @f(i32 noundef %2) +// br label %sw.epilog +// +// sw.epilog: +// ... +// +// Note that load instructions (a) and (b) are replaced by a single +// load instruction (c) that gets it's value from a PHI node. The two +// calls to @f are also replaced by a single call that uses result of +// (c). This is done by a code sinking part of the +// SimplifyCFGPass. This leads to the following machine code: +// +// bb.2.sw.bb: +// $r1 = LDD $r1, 0 +// $r1 = ADD_ri $r1, 4 +// JMP %bb.4 +// +// bb.3.sw.bb1: +// $r1 = ADD_ri $r1, 12 +// +// bb.4.sw.epilog.sink.split: +// $r1 = LDW $r1, 0 +// JAL @f +// +// Here the offset is dynamically added to r1 (context register), this +// access pattern is not allowed by BPF verifier. +// +// TODO: describe the transformation + +#include "BPF.h" +#include "BPFCORE.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/None.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/IR/Argument.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicsBPF.h" +#include "llvm/IR/Metadata.h" +#include "llvm/IR/PassManager.h" +#include "llvm/IR/Type.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include +#include +#include + +#define DEBUG_TYPE "bpf-context-access-marker" + +using namespace llvm; + +static bool isNamedFuncCall(Value *I, const char *Name) { + auto *Call = dyn_cast(I); + if (!Call) + return false; + auto *Func = Call->getCalledFunction(); + if (!Func) + return false; + return Func->getName().startswith(Name); +} + +static bool isPassThroughCall(Value *I) { // TODO: use ID instead + return isNamedFuncCall(I, "llvm.bpf.passthrough"); +} + +static bool isVirtualOffsetMarkerCall(Value *I) { + return isNamedFuncCall(I, "llvm.bpf.virtual.offset.marker"); +} + +static std::set collectVirtualOffsetMarkers(Function &F) { + std::set Markers; + + for (auto &Insn : instructions(F)) + if (isVirtualOffsetMarkerCall(&Insn)) + Markers.insert(&Insn); + + return Markers; +} + +static Instruction *getUniqueInstructionUser(Value *Val) { + auto *Insn = dyn_cast_or_null(Val->getUniqueUndroppableUser()); + if (!Insn) + return nullptr; + if (isa(Insn) && cast(Insn)->getPointerOperand() != Val) + return nullptr; + if (isa(Insn) && cast(Insn)->getPointerOperand() != Val) + return nullptr; + return Insn; +} + +static Instruction *skipBitcast(Instruction *Val, Instruction *&Bitcast) { + if (isa_and_nonnull(Val)) { + Bitcast = cast(Val); + return getUniqueInstructionUser(Val); + } + + return Val; +} + +static Instruction *skipPassThrough(Instruction *Val, Instruction *&Call) { + if (Val && isPassThroughCall(Val)) { + Call = cast(Val); + return getUniqueInstructionUser(Val); + } + + return Val; +} + +static CallInst *makeIntrinsicCall(Module *M, + Intrinsic::BPFIntrinsics Intrinsic, + ArrayRef Types, + SmallVector &Args) { + + Function *Fn = + Intrinsic::getDeclaration(M, Intrinsic, ArrayRef(Types)); + return CallInst::Create(Fn, ArrayRef(Args)); +} + +static void setParamElementType(CallInst *Call, unsigned ArgNo, Type *Type) { + auto &C = Call->getContext(); + Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ElementType, Type)); +} + +static void setParamReadNone(CallInst *Call, unsigned ArgNo) { + auto &C = Call->getContext(); + Call->addParamAttr(ArgNo, Attribute::get(C, Attribute::ReadNone)); +} + +template > +static void fillCommonArgs(SmallVector &Args, + GetElementPtrInst *GEP, T *Insn) { + auto &C = GEP->getContext(); + auto *Int8Ty = Type::getInt8Ty(C); + auto *Int1Ty = Type::getInt1Ty(C); + Args.push_back(GEP->getPointerOperand()); + Args.push_back(ConstantInt::get(Int1Ty, Insn->isVolatile())); + Args.push_back(ConstantInt::get(Int8Ty, (unsigned) Insn->getOrdering())); + Args.push_back(ConstantInt::get(Int8Ty, Insn->getAlign().value())); + Args.push_back(ConstantInt::get(Int1Ty, GEP->isInBounds())); + Args.append(GEP->indices().begin(), GEP->indices().end()); +} + +static Instruction *makeGEPAndLoad(GetElementPtrInst *GEP, LoadInst *Load) { + auto *M = GEP->getModule(); + SmallVector Args; + fillCommonArgs(Args, GEP, Load); + auto *Call = makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_load, + {Load->getType()}, Args); + setParamElementType(Call, 0, GEP->getSourceElementType()); + Call->applyMergedLocation(GEP->getDebugLoc(), Load->getDebugLoc()); + Call->setName(GEP->getName()); + return Call; +} + +// TODO: what about nested access? +// TODO: syncscope, ! +static Instruction *makeGEPAndStore(GetElementPtrInst *GEP, StoreInst *Store) { + auto *M = GEP->getModule(); + SmallVector Args; + Args.push_back(Store->getValueOperand()); + fillCommonArgs(Args, GEP, Store); + auto *Call = makeIntrinsicCall(M, Intrinsic::bpf_getelementptr_and_store, + {Store->getValueOperand()->getType()}, Args); + setParamElementType(Call, 1, GEP->getSourceElementType()); + if (Store->getValueOperand()->getType()->isPointerTy()) + setParamReadNone(Call, 0); + Call->applyMergedLocation(GEP->getDebugLoc(), Store->getDebugLoc()); + return Call; +} + +static bool rewriteVirtualOffsetAccess(Instruction *Marker) { + // marker -> bitcast1 -> gep -> bitcast2 -> [passthrough] -> load / store + // marker -> gep -> load / store + // ^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + // delete replace & put before gep passthrough + Instruction *Bitcast1 = nullptr; + Instruction *Bitcast2 = nullptr; + Instruction *PassThrough = nullptr; + Instruction *Replacement = nullptr; + + //dbgs() << "Marker: " << *Marker << "\n"; + auto *MaybeGEP = skipBitcast(getUniqueInstructionUser(Marker), Bitcast1); + if (!MaybeGEP) + return false; + //dbgs() << "MaybeGEP: " << *MaybeGEP << "\n"; + + auto *GEP = dyn_cast(MaybeGEP); + if (!GEP) + return false; + //dbgs() << "GEP: " << *GEP << "\n"; + + auto *GEPUse = skipBitcast(getUniqueInstructionUser(GEP), Bitcast2); + if (!GEPUse) + return false; + //dbgs() << "GEPUse: " << *GEPUse << "\n"; + + auto *LoadOrStore = skipPassThrough(GEPUse, PassThrough); + if (!LoadOrStore) + return false; + //dbgs() << "LoadOrStore: " << *LoadOrStore << "\n"; + + if (auto *Load = dyn_cast(LoadOrStore)) + Replacement = makeGEPAndLoad(GEP, Load); + else if (auto *Store = dyn_cast(LoadOrStore)) + Replacement = makeGEPAndStore(GEP, Store); + else + return false; + //dbgs() << "Replacement: " << *Replacement << "\n"; + + Replacement->insertBefore(GEP); + if (isa(LoadOrStore)) + LoadOrStore->replaceAllUsesWith(Replacement); + LoadOrStore->eraseFromParent(); + if (PassThrough) + PassThrough->eraseFromParent(); + if (Bitcast2) + Bitcast2->eraseFromParent(); + GEP->eraseFromParent(); + + if (PassThrough) + BPFCoreSharedInfo::insertPassThrough(Replacement->getModule(), + Replacement->getParent(), + Bitcast1 ? Bitcast1 : Marker, + Replacement); + + return true; +} + +static void replaceWithFirstOperand(std::set &Insns) { + for (auto *Insn : Insns) { + Insn->replaceAllUsesWith(Insn->getOperand(0)); + Insn->eraseFromParent(); + } +} + +static bool rewriteVirtualOffsetAccess(Function &F) { + LLVM_DEBUG(dbgs() << "********** Context Access Markers ************\n"); + + auto VirtualOffsetMarkers = collectVirtualOffsetMarkers(F); + + LLVM_DEBUG(dbgs() + << "There are " << VirtualOffsetMarkers.size() + << " virtual offset markers\n"); + + if (VirtualOffsetMarkers.empty()) + return false; + + unsigned Updated = 0; + for (auto *Marker : VirtualOffsetMarkers) + Updated += rewriteVirtualOffsetAccess(Marker); + + LLVM_DEBUG(dbgs() << "Modified " << Updated << " access chains\n"); + + if (Updated != VirtualOffsetMarkers.size()) + errs() << "warning: " << (VirtualOffsetMarkers.size() - Updated) + << " chains are not rewritten\n"; + + replaceWithFirstOperand(VirtualOffsetMarkers); + + return true; +} + +namespace { + +class BPFVirtualOffsetsLegacyPass final : public FunctionPass { +public: + static char ID; + + BPFVirtualOffsetsLegacyPass() : FunctionPass(ID) {} + + bool runOnFunction(Function &F) override { + return rewriteVirtualOffsetAccess(F); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + } +}; + +} // End anonymous namespace + +char BPFVirtualOffsetsLegacyPass::ID = 0; +INITIALIZE_PASS(BPFVirtualOffsetsLegacyPass, DEBUG_TYPE, + "BPF Context Access Marker", false, false) + +FunctionPass *llvm::createBPFVirtualOffsetsPass() { + return new BPFVirtualOffsetsLegacyPass(); +} + +PreservedAnalyses +llvm::BPFVirtualOffsetsPass::run(Function &F, FunctionAnalysisManager &AM) { + return rewriteVirtualOffsetAccess(F) + ? PreservedAnalyses::none() + : PreservedAnalyses::all(); +} diff --git a/llvm/lib/Target/BPF/CMakeLists.txt b/llvm/lib/Target/BPF/CMakeLists.txt --- a/llvm/lib/Target/BPF/CMakeLists.txt +++ b/llvm/lib/Target/BPF/CMakeLists.txt @@ -16,6 +16,7 @@ add_llvm_target(BPFCodeGen BPFAbstractMemberAccess.cpp + BPFVirtualOffsets.cpp BPFAdjustOpt.cpp BPFAsmPrinter.cpp BPFCheckAndAdjustIR.cpp