diff --git a/llvm/docs/BitCodeFormat.rst b/llvm/docs/BitCodeFormat.rst --- a/llvm/docs/BitCodeFormat.rst +++ b/llvm/docs/BitCodeFormat.rst @@ -1079,6 +1079,11 @@ * code 77: ``elementtype`` * code 78: ``disable_sanitizer_instrumentation`` * code 79: ``nosanitize_bounds`` +* code 80: ``allocalign`` +* code 81: ``allocptr`` +* code 82: ``allockind`` +* code 83: ``presplitcoroutine`` +* code 84: ``fine_grained_bitfields`` .. note:: The ``allocsize`` attribute has a special encoding for its arguments. Its two diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -1688,6 +1688,52 @@ * ``"non-leaf"`` - the frame pointer should be kept if the function calls other functions. * ``"all"`` - the frame pointer should be kept. +``fine_grained_bitfields`` + This attribute indicates that the function was called from a scope that + enabled fine grained bit-fields. This flag is used to prevent IPO inlining + of callee functions with different bit-field element addressing schemes than + that of the caller. This prevents data corruption during bit-field + initialization. + + For example: + + .. code-block:: c + + // File A: compiled with -ffine-grained-bitfield-accesses + struct X { + int a : 8; + int b : 24; + }; + + void callee(struct X*); + + int caller() { + struct X x; + x.a = 10; // Variable a is directly stored to. + callee(&x); + return x.a; + } + + .. code-block:: c + + // File B: compiled with -fno-fine-grained-bitfield-accesses + struct X { + int a : 8; + int b : 24; + }; + + void callee(struct X* x) { + x->b = 10; // Load occurs on struct object, followed by freeze, + // clear, set, and store sequence to assign b. + } + + Because the caller uses ``fine-grained-bitfield-accesses``, only the byte + associated with ``a`` is assigned and the value of ``b`` remains + ``poison``. The callee does not have individual member variable addressing + and thus loads the full 32-bits (8-bits of value and 24-bits ``poison``) + resulting in a load of ``poison``. The proceeding ``freeze`` in the freeze, + clear, set, and store sequence will corrupt the already assigned value of + ``a``. ``hot`` This attribute indicates that this function is a hot spot of the program execution. The function will be optimized more aggressively and will be diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -686,6 +686,7 @@ ATTR_KIND_ALLOCATED_POINTER = 81, ATTR_KIND_ALLOC_KIND = 82, ATTR_KIND_PRESPLIT_COROUTINE = 83, + ATTR_KIND_FINE_GRAINED_BITFIELDS = 84 }; enum ComdatSelectionKindCodes { diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td --- a/llvm/include/llvm/IR/Attributes.td +++ b/llvm/include/llvm/IR/Attributes.td @@ -102,6 +102,9 @@ /// Provide pointer element type to intrinsic. def ElementType : TypeAttr<"elementtype", [ParamAttr]>; +/// Function was called in a scope requiring fine grained bit-field accesses. +def FineGrainedBitfields : EnumAttr<"fine_grained_bitfields", [FnAttr]>; + /// Function may only access memory that is inaccessible from IR. def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly", [FnAttr]>; diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1636,6 +1636,8 @@ return Attribute::Hot; case bitc::ATTR_KIND_PRESPLIT_COROUTINE: return Attribute::PresplitCoroutine; + case bitc::ATTR_KIND_FINE_GRAINED_BITFIELDS: + return Attribute::FineGrainedBitfields; } } diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -636,6 +636,8 @@ return bitc::ATTR_KIND_HOT; case Attribute::ElementType: return bitc::ATTR_KIND_ELEMENTTYPE; + case Attribute::FineGrainedBitfields: + return bitc::ATTR_KIND_FINE_GRAINED_BITFIELDS; case Attribute::InaccessibleMemOnly: return bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY; case Attribute::InaccessibleMemOrArgMemOnly: diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1795,6 +1795,14 @@ return InlineResult::failure("incompatible strictfp attributes"); } + // Prevent inlining of a callee function with a different bit-field element + // addressing scheme than that of the caller. + if (CalledFunc->getAttributes().hasFnAttr(Attribute::FineGrainedBitfields) != + Caller->getAttributes().hasFnAttr(Attribute::FineGrainedBitfields)) { + return InlineResult::failure( + "incompatible fine_grained_bitfields attribute"); + } + // GC poses two hazards to inlining, which only occur when the callee has GC: // 1. If the caller has no GC, then the callee's GC must be propagated to the // caller. diff --git a/llvm/test/Bitcode/compatibility.ll b/llvm/test/Bitcode/compatibility.ll --- a/llvm/test/Bitcode/compatibility.ll +++ b/llvm/test/Bitcode/compatibility.ll @@ -1532,7 +1532,7 @@ ; CHECK: select <2 x i1> , <2 x i8> , <2 x i8> call void @f.nobuiltin() builtin - ; CHECK: call void @f.nobuiltin() #50 + ; CHECK: call void @f.nobuiltin() #51 call fastcc noalias i32* @f.noalias() noinline ; CHECK: call fastcc noalias i32* @f.noalias() #12 @@ -1958,6 +1958,9 @@ declare void @f.allockind() allockind("alloc,uninitialized") ; CHECK: declare void @f.allockind() #49 +declare void @f.finegrainedbitfields() fine_grained_bitfields +; CHECK: declare void @f.finegrainedbitfields() #50 + ; CHECK: attributes #0 = { alignstack=4 } ; CHECK: attributes #1 = { alignstack=8 } ; CHECK: attributes #2 = { alwaysinline } @@ -2008,7 +2011,8 @@ ; CHECK: attributes #47 = { allocsize(1,0) } ; CHECK: attributes #48 = { nosanitize_bounds } ; CHECK: attributes #49 = { allockind("alloc,uninitialized") } -; CHECK: attributes #50 = { builtin } +; CHECK: attributes #50 = { fine_grained_bitfields } +; CHECK: attributes #51 = { builtin } ;; Metadata diff --git a/llvm/test/Transforms/Inline/fine_grained_bitfields.ll b/llvm/test/Transforms/Inline/fine_grained_bitfields.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/fine_grained_bitfields.ll @@ -0,0 +1,420 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=always-inline < %s | FileCheck %s -check-prefix=ALWAYS +; RUN: opt -S -passes='cgscc(inline)' < %s | FileCheck %s -check-prefix=INLINE +; RUN: opt -S -passes=partial-inliner -skip-partial-inlining-cost-analysis < %s | \ +; RUN: FileCheck %s -check-prefix=PARTIAL +; +; The following tests verify fine_grained_bitfields function attribute under +; different inlining passes. + +define i32 @inner() { +; ALWAYS-LABEL: @inner( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 0 +; +; INLINE-LABEL: @inner( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @inner( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: ret i32 0 +; +entry: + ret i32 0 +} + +define i32 @inner_always() alwaysinline { +; ALWAYS-LABEL: @inner_always( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 1 +; +; INLINE-LABEL: @inner_always( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 1 +; +; PARTIAL-LABEL: @inner_always( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: ret i32 1 +; +entry: + ret i32 1 +} + +define i32 @inner_fgb() fine_grained_bitfields { +; ALWAYS-LABEL: @inner_fgb( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 2 +; +; INLINE-LABEL: @inner_fgb( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 2 +; +; PARTIAL-LABEL: @inner_fgb( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: ret i32 2 +; +entry: + ret i32 2 +} + +define i32 @inner_always_fgb() alwaysinline fine_grained_bitfields { +; ALWAYS-LABEL: @inner_always_fgb( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 3 +; +; INLINE-LABEL: @inner_always_fgb( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 3 +; +; PARTIAL-LABEL: @inner_always_fgb( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: ret i32 3 +; +entry: + ret i32 3 +} + +; Outer functions without the fine_grained_bitfields attribute. + +define i32 @outer_inner() { +; ALWAYS-LABEL: @outer_inner( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_inner( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @outer_inner( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner() + ret i32 %ret +} + +define i32 @outer_inner_always() { +; ALWAYS-LABEL: @outer_inner_always( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 1 +; +; INLINE-LABEL: @outer_inner_always( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 1 +; +; PARTIAL-LABEL: @outer_inner_always( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_always() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_always() + ret i32 %ret +} + +define i32 @outer_inner_fbg() { +; ALWAYS-LABEL: @outer_inner_fbg( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_fgb() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_inner_fbg( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner_fgb() +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_inner_fbg( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_fgb() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_fgb() + ret i32 %ret +} + +define i32 @outer_inner_always_fbg() { +; ALWAYS-LABEL: @outer_inner_always_fbg( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_always_fgb() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_inner_always_fbg( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner_always_fgb() +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_inner_always_fbg( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_always_fgb() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_always_fgb() + ret i32 %ret +} + +; Outer functions with the fine_grained_bitfields attribute. + +define i32 @outer_fgb_inner() fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_fgb_inner( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner() +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_fgb_inner( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner() + ret i32 %ret +} + +define i32 @outer_fgb_inner_always() fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner_always( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_always() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_fgb_inner_always( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner_always() +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_fgb_inner_always( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_always() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_always() + ret i32 %ret +} + +define i32 @outer_fgb_inner_fbg() fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner_fbg( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_fgb() +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_fgb_inner_fbg( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 2 +; +; PARTIAL-LABEL: @outer_fgb_inner_fbg( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_fgb() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_fgb() + ret i32 %ret +} + +define i32 @outer_fgb_inner_always_fbg() fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner_always_fbg( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: ret i32 3 +; +; INLINE-LABEL: @outer_fgb_inner_always_fbg( +; INLINE-NEXT: entry: +; INLINE-NEXT: ret i32 3 +; +; PARTIAL-LABEL: @outer_fgb_inner_always_fbg( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_always_fgb() +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_always_fgb() + ret i32 %ret +} + +define i32 @inner_partial(i1 %cond, i32* align 4 %value) { +; ALWAYS-LABEL: @inner_partial( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; ALWAYS: if.then: +; ALWAYS-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; ALWAYS-NEXT: br label [[RETURN]] +; ALWAYS: return: +; ALWAYS-NEXT: ret i32 0 +; +; INLINE-LABEL: @inner_partial( +; INLINE-NEXT: entry: +; INLINE-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; INLINE: if.then: +; INLINE-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; INLINE-NEXT: br label [[RETURN]] +; INLINE: return: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @inner_partial( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; PARTIAL: if.then: +; PARTIAL-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; PARTIAL-NEXT: br label [[RETURN]] +; PARTIAL: return: +; PARTIAL-NEXT: ret i32 0 +; +entry: + br i1 %cond, label %if.then, label %return +if.then: + store i32 10, i32* %value, align 4 + br label %return +return: + ret i32 0 +} + +define i32 @inner_partial_fgb(i1 %cond, i32* align 4 %value) fine_grained_bitfields { +; ALWAYS-LABEL: @inner_partial_fgb( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; ALWAYS: if.then: +; ALWAYS-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; ALWAYS-NEXT: br label [[RETURN]] +; ALWAYS: return: +; ALWAYS-NEXT: ret i32 0 +; +; INLINE-LABEL: @inner_partial_fgb( +; INLINE-NEXT: entry: +; INLINE-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; INLINE: if.then: +; INLINE-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; INLINE-NEXT: br label [[RETURN]] +; INLINE: return: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @inner_partial_fgb( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[RETURN:%.*]] +; PARTIAL: if.then: +; PARTIAL-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; PARTIAL-NEXT: br label [[RETURN]] +; PARTIAL: return: +; PARTIAL-NEXT: ret i32 0 +; +entry: + br i1 %cond, label %if.then, label %return +if.then: + store i32 10, i32* %value, align 4 + br label %return +return: + ret i32 0 +} + +; Outer functions without fine_grained_bitfields and partial inners. + +define i32 @outer_inner_partial(i1 %cond, i32* align 4 %value) { +; ALWAYS-LABEL: @outer_inner_partial( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_partial(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_inner_partial( +; INLINE-NEXT: entry: +; INLINE-NEXT: br i1 [[COND:%.*]], label [[IF_THEN_I:%.*]], label [[INNER_PARTIAL_EXIT:%.*]] +; INLINE: if.then.i: +; INLINE-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; INLINE-NEXT: br label [[INNER_PARTIAL_EXIT]] +; INLINE: inner_partial.exit: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @outer_inner_partial( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: br i1 [[COND:%.*]], label [[CODEREPL_I:%.*]], label [[INNER_PARTIAL_2_EXIT:%.*]] +; PARTIAL: codeRepl.i: +; PARTIAL-NEXT: call void @inner_partial.2.if.then(i32* [[VALUE:%.*]]) +; PARTIAL-NEXT: br label [[INNER_PARTIAL_2_EXIT]] +; PARTIAL: inner_partial.2.exit: +; PARTIAL-NEXT: ret i32 0 +; +entry: + %ret = call i32 @inner_partial(i1 %cond, i32* %value) + ret i32 %ret +} + +define i32 @outer_inner_partial_fgb(i1 %cond, i32* align 4 %value) { +; ALWAYS-LABEL: @outer_inner_partial_fgb( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_partial_fgb(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_inner_partial_fgb( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner_partial_fgb(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_inner_partial_fgb( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_partial_fgb(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_partial_fgb(i1 %cond, i32* %value) + ret i32 %ret +} + +; Outer functions with fine_grained_bitfields and partial inners. + +define i32 @outer_fgb_inner_partial(i1 %cond, i32* align 4 %value) fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner_partial( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_partial(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_fgb_inner_partial( +; INLINE-NEXT: entry: +; INLINE-NEXT: [[RET:%.*]] = call i32 @inner_partial(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; INLINE-NEXT: ret i32 [[RET]] +; +; PARTIAL-LABEL: @outer_fgb_inner_partial( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: [[RET:%.*]] = call i32 @inner_partial(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; PARTIAL-NEXT: ret i32 [[RET]] +; +entry: + %ret = call i32 @inner_partial(i1 %cond, i32* %value) + ret i32 %ret +} + +define i32 @outer_fgb_inner_partial_fgb(i1 %cond, i32* align 4 %value) fine_grained_bitfields { +; ALWAYS-LABEL: @outer_fgb_inner_partial_fgb( +; ALWAYS-NEXT: entry: +; ALWAYS-NEXT: [[RET:%.*]] = call i32 @inner_partial_fgb(i1 [[COND:%.*]], i32* [[VALUE:%.*]]) +; ALWAYS-NEXT: ret i32 [[RET]] +; +; INLINE-LABEL: @outer_fgb_inner_partial_fgb( +; INLINE-NEXT: entry: +; INLINE-NEXT: br i1 [[COND:%.*]], label [[IF_THEN_I:%.*]], label [[INNER_PARTIAL_FGB_EXIT:%.*]] +; INLINE: if.then.i: +; INLINE-NEXT: store i32 10, i32* [[VALUE:%.*]], align 4 +; INLINE-NEXT: br label [[INNER_PARTIAL_FGB_EXIT]] +; INLINE: inner_partial_fgb.exit: +; INLINE-NEXT: ret i32 0 +; +; PARTIAL-LABEL: @outer_fgb_inner_partial_fgb( +; PARTIAL-NEXT: entry: +; PARTIAL-NEXT: br i1 [[COND:%.*]], label [[CODEREPL_I:%.*]], label [[INNER_PARTIAL_FGB_1_EXIT:%.*]] +; PARTIAL: codeRepl.i: +; PARTIAL-NEXT: call void @inner_partial_fgb.1.if.then(i32* [[VALUE:%.*]]) +; PARTIAL-NEXT: br label [[INNER_PARTIAL_FGB_1_EXIT]] +; PARTIAL: inner_partial_fgb.1.exit: +; PARTIAL-NEXT: ret i32 0 +; +entry: + %ret = call i32 @inner_partial_fgb(i1 %cond, i32* %value) + ret i32 %ret +} diff --git a/llvm/test/Transforms/Inline/module-fine_grained_bitfield-both.ll b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-both.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-both.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=module-inline -S < %s | FileCheck %s --check-prefix=CHECK +; +; Verify module inlining occurs when both caller and callee have a +; fine_grained_bitfields attribute. + +define void @modify_value({i32, float}* %v) fine_grained_bitfields { +; CHECK-LABEL: @modify_value( +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[V:%.*]], i64 0, i32 0 +; CHECK-NEXT: store i32 10, i32* [[F]], align 4 +; CHECK-NEXT: ret void +; + %f = getelementptr { i32, float }, { i32, float }* %v, i64 0, i32 0 + store i32 10, i32* %f + ret void +} + +define i32 @main() fine_grained_bitfields { +; CHECK-LABEL: @main( +; CHECK-NEXT: [[MY_VAL:%.*]] = alloca { i32, float }, align 8 +; CHECK-NEXT: [[F_I:%.*]] = getelementptr { i32, float }, { i32, float }* [[MY_VAL]], i64 0, i32 0 +; CHECK-NEXT: store i32 10, i32* [[F_I]], align 4 +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[MY_VAL]], i64 0, i32 0 +; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[F]], align 4 +; CHECK-NEXT: ret i32 [[RET]] +; + %my_val = alloca {i32, float} + call void @modify_value({i32, float}* %my_val) + %f = getelementptr { i32, float }, { i32, float }* %my_val, i64 0, i32 0 + %ret = load i32, i32* %f + ret i32 %ret +} diff --git a/llvm/test/Transforms/Inline/module-fine_grained_bitfield-callee.ll b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-callee.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-callee.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=module-inline -S < %s | FileCheck %s --check-prefix=CHECK +; +; Verify module inlining does not occur when only the callee has a +; fine_grained_bitfields attribute. + +define void @modify_value({i32, float}* %v) fine_grained_bitfields { +; CHECK-LABEL: @modify_value( +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[V:%.*]], i64 0, i32 0 +; CHECK-NEXT: store i32 10, i32* [[F]], align 4 +; CHECK-NEXT: ret void +; + %f = getelementptr { i32, float }, { i32, float }* %v, i64 0, i32 0 + store i32 10, i32* %f + ret void +} + +define i32 @main() { +; CHECK-LABEL: @main( +; CHECK-NEXT: [[MY_VAL:%.*]] = alloca { i32, float }, align 8 +; CHECK-NEXT: call void @modify_value({ i32, float }* [[MY_VAL]]) +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[MY_VAL]], i64 0, i32 0 +; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[F]], align 4 +; CHECK-NEXT: ret i32 [[RET]] +; + %my_val = alloca {i32, float} + call void @modify_value({i32, float}* %my_val) + %f = getelementptr { i32, float }, { i32, float }* %my_val, i64 0, i32 0 + %ret = load i32, i32* %f + ret i32 %ret +} diff --git a/llvm/test/Transforms/Inline/module-fine_grained_bitfield-caller.ll b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-caller.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/module-fine_grained_bitfield-caller.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=module-inline -S < %s | FileCheck %s --check-prefix=CHECK +; +; Verify module inlining does not occur when only the caller has a +; fine_grained_bitfields attribute. + +define void @modify_value({i32, float}* %v) { +; CHECK-LABEL: @modify_value( +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[V:%.*]], i64 0, i32 0 +; CHECK-NEXT: store i32 10, i32* [[F]], align 4 +; CHECK-NEXT: ret void +; + %f = getelementptr { i32, float }, { i32, float }* %v, i64 0, i32 0 + store i32 10, i32* %f + ret void +} + +define i32 @main() fine_grained_bitfields { +; CHECK-LABEL: @main( +; CHECK-NEXT: [[MY_VAL:%.*]] = alloca { i32, float }, align 8 +; CHECK-NEXT: call void @modify_value({ i32, float }* [[MY_VAL]]) +; CHECK-NEXT: [[F:%.*]] = getelementptr { i32, float }, { i32, float }* [[MY_VAL]], i64 0, i32 0 +; CHECK-NEXT: [[RET:%.*]] = load i32, i32* [[F]], align 4 +; CHECK-NEXT: ret i32 [[RET]] +; + %my_val = alloca {i32, float} + call void @modify_value({i32, float}* %my_val) + %f = getelementptr { i32, float }, { i32, float }* %my_val, i64 0, i32 0 + %ret = load i32, i32* %f + ret i32 %ret +} diff --git a/llvm/utils/emacs/llvm-mode.el b/llvm/utils/emacs/llvm-mode.el --- a/llvm/utils/emacs/llvm-mode.el +++ b/llvm/utils/emacs/llvm-mode.el @@ -27,7 +27,7 @@ "nocallback" "nocf_check" "noduplicate" "nofree" "noimplicitfloat" "noinline" "nomerge" "nonlazybind" "noprofile" "noredzone" "noreturn" "norecurse" "nosync" "noundef" "nounwind" "nosanitize_bounds" "nosanitize_coverage" "null_pointer_is_valid" "optforfuzzing" "optnone" "optsize" "preallocated" "readnone" "readonly" "returned" "returns_twice" "shadowcallstack" "speculatable" "speculative_load_hardening" "ssp" "sspreq" "sspstrong" "safestack" "sanitize_address" "sanitize_hwaddress" "sanitize_memtag" - "sanitize_thread" "sanitize_memory" "strictfp" "swifterror" "uwtable" "vscale_range" "willreturn" "writeonly" "immarg") 'symbols) . font-lock-constant-face) + "sanitize_thread" "sanitize_memory" "strictfp" "swifterror" "uwtable" "vscale_range" "willreturn" "writeonly" "immarg" "fine_grained_bitfields") 'symbols) . font-lock-constant-face) ;; Variables '("%[-a-zA-Z$._][-a-zA-Z$._0-9]*" . font-lock-variable-name-face) ;; Labels diff --git a/llvm/utils/vim/syntax/llvm.vim b/llvm/utils/vim/syntax/llvm.vim --- a/llvm/utils/vim/syntax/llvm.vim +++ b/llvm/utils/vim/syntax/llvm.vim @@ -88,6 +88,7 @@ \ externally_initialized \ fastcc \ filter + \ fine_grained_bitfields \ from \ gc \ global diff --git a/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml b/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml --- a/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml +++ b/llvm/utils/vscode/llvm/syntaxes/ll.tmLanguage.yaml @@ -188,6 +188,7 @@ \\bexternally_initialized\\b|\ \\bfastcc\\b|\ \\bfilter\\b|\ + \\bfine_grained_bitfields\\b|\ \\bfrom\\b|\ \\bgc\\b|\ \\bglobal\\b|\