diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -388,6 +388,9 @@ /// Whether to emit unused static constants. CODEGENOPT(KeepStaticConsts, 1, 0) +/// Whether to not follow the AAPCS that enforce at least one read before storing to a volatile bitfield +CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0) + #undef CODEGENOPT #undef ENUM_CODEGENOPT #undef VALUE_CODEGENOPT diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -2322,6 +2322,9 @@ def mcmse : Flag<["-"], "mcmse">, Group, Flags<[DriverOption,CC1Option]>, HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">; +def ForceAAPCSBitfieldLoad : Flag<["-"], "fAAPCSBitfieldLoad">, Group, + Flags<[DriverOption,CC1Option]>, + HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">; def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group, HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">; diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -415,6 +415,11 @@ llvm_unreachable("unknown storage duration"); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().startswith("aapcs"); +} + LValue CodeGenFunction:: EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { const Expr *E = M->getSubExpr(); @@ -2068,6 +2073,14 @@ SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); } else { assert(Info.Offset == 0); + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once and + // written exactly once using the access width appropriate to the type of the + // container. The two accesses are not atomic. + if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + Builder.CreateLoad(Ptr, true, "bf.load"); } // Write the new value back out. diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1437,6 +1437,7 @@ Opts.SymbolPartition = std::string(Args.getLastArgValue(OPT_fsymbol_partition_EQ)); + Opts.ForceAAPCSBitfieldLoad = Args.hasArg(OPT_ForceAAPCSBitfieldLoad); return Success; } diff --git a/clang/test/CodeGen/aapcs-bitfield.c b/clang/test/CodeGen/aapcs-bitfield.c --- a/clang/test/CodeGen/aapcs-bitfield.c +++ b/clang/test/CodeGen/aapcs-bitfield.c @@ -1,6 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=LE // RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 | FileCheck %s -check-prefix=BE +// RUN: %clang_cc1 -triple armv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=LE,LENUMLOADS +// RUN: %clang_cc1 -triple armebv8-none-linux-eabi %s -emit-llvm -o - -O3 -fAAPCSBitfieldLoad | FileCheck %s -check-prefixes=BE,BENUMLOADS struct st0 { short c : 7; @@ -144,7 +146,7 @@ void st2_check_store(struct st2 *m) { m->c = 1; } - +// Volatile access is allowed to use 16 bits struct st3 { volatile short c : 7; }; @@ -191,7 +193,7 @@ void st3_check_store(struct st3 *m) { m->c = 1; } - +// Volatile access to st4.c should use a char ld/st struct st4 { int b : 9; volatile char c : 5; @@ -324,16 +326,16 @@ // LE-LABEL: @st6_check_load( // LE-NEXT: entry: // LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4 +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4 // LE-NEXT: [[BF_SHL:%.*]] = shl i16 [[BF_LOAD]], 4 // LE-NEXT: [[BF_ASHR:%.*]] = ashr exact i16 [[BF_SHL]], 4 // LE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// LE-NEXT: [[TMP1:%.*]] = load i8, i8* [[B]], align 2, !tbaa !3 +// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]] // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 -// LE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1 // LE-NEXT: [[BF_SHL2:%.*]] = shl i8 [[BF_LOAD1]], 3 // LE-NEXT: [[BF_ASHR3:%.*]] = ashr exact i8 [[BF_SHL2]], 3 // LE-NEXT: [[BF_CAST4:%.*]] = sext i8 [[BF_ASHR3]] to i32 @@ -343,21 +345,21 @@ // BE-LABEL: @st6_check_load( // BE-NEXT: entry: // BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST6:%.*]], %struct.st6* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[BF_LOAD:%.*]] = load i16, i16* [[TMP0]], align 4 +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[TMP0]], align 4 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i16 [[BF_LOAD]], 4 // BE-NEXT: [[BF_CAST:%.*]] = sext i16 [[BF_ASHR]] to i32 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// BE-NEXT: [[TMP1:%.*]] = load i8, i8* [[B]], align 2, !tbaa !3 +// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[B]], align 2 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[BF_CAST]], [[CONV]] // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 -// BE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[C]], align 1 // BE-NEXT: [[BF_ASHR2:%.*]] = ashr i8 [[BF_LOAD1]], 3 // BE-NEXT: [[BF_CAST3:%.*]] = sext i8 [[BF_ASHR2]] to i32 // BE-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD]], [[BF_CAST3]] // BE-NEXT: ret i32 [[ADD4]] // -int st6_check_load(struct st6 *m) { +int st6_check_load(volatile struct st6 *m) { int x = m->a; x += m->b; x += m->c; @@ -372,7 +374,7 @@ // LE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 1 // LE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// LE-NEXT: store i8 2, i8* [[B]], align 2, !tbaa !3 +// LE-NEXT: store i8 2, i8* [[B]], align 2 // LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 // LE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 // LE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], -32 @@ -388,7 +390,7 @@ // BE-NEXT: [[BF_SET:%.*]] = or i16 [[BF_CLEAR]], 16 // BE-NEXT: store i16 [[BF_SET]], i16* [[TMP0]], align 4 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 1 -// BE-NEXT: store i8 2, i8* [[B]], align 2, !tbaa !3 +// BE-NEXT: store i8 2, i8* [[B]], align 2 // BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST6]], %struct.st6* [[M]], i32 0, i32 2 // BE-NEXT: [[BF_LOAD1:%.*]] = load i8, i8* [[C]], align 1 // BE-NEXT: [[BF_CLEAR2:%.*]] = and i8 [[BF_LOAD1]], 7 @@ -410,20 +412,20 @@ struct st7b { char x; - struct st7a y; + volatile struct st7a y; }; // LE-LABEL: @st7_check_load( // LE-NEXT: entry: // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8 +// LE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4 // LE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// LE-NEXT: [[TMP1:%.*]] = load i8, i8* [[A]], align 4, !tbaa !11 +// LE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4 // LE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32 // LE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]] // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 -// LE-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 1 +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // LE-NEXT: [[BF_SHL:%.*]] = shl i8 [[BF_LOAD]], 3 // LE-NEXT: [[BF_ASHR:%.*]] = ashr exact i8 [[BF_SHL]], 3 // LE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32 @@ -433,14 +435,14 @@ // BE-LABEL: @st7_check_load( // BE-NEXT: entry: // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4, !tbaa !8 +// BE-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 4 // BE-NEXT: [[CONV:%.*]] = sext i8 [[TMP0]] to i32 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// BE-NEXT: [[TMP1:%.*]] = load i8, i8* [[A]], align 4, !tbaa !11 +// BE-NEXT: [[TMP1:%.*]] = load volatile i8, i8* [[A]], align 4 // BE-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32 // BE-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]] // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 -// BE-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 1 +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // BE-NEXT: [[BF_ASHR:%.*]] = ashr i8 [[BF_LOAD]], 3 // BE-NEXT: [[BF_CAST:%.*]] = sext i8 [[BF_ASHR]] to i32 // BE-NEXT: [[ADD3:%.*]] = add nsw i32 [[ADD]], [[BF_CAST]] @@ -456,27 +458,27 @@ // LE-LABEL: @st7_check_store( // LE-NEXT: entry: // LE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: store i8 1, i8* [[X]], align 4, !tbaa !8 +// LE-NEXT: store i8 1, i8* [[X]], align 4 // LE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// LE-NEXT: store i8 2, i8* [[A]], align 4, !tbaa !11 +// LE-NEXT: store volatile i8 2, i8* [[A]], align 4 // LE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 -// LE-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 1 +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // LE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], -32 // LE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 3 -// LE-NEXT: store i8 [[BF_SET]], i8* [[B]], align 1 +// LE-NEXT: store volatile i8 [[BF_SET]], i8* [[B]], align 1 // LE-NEXT: ret void // // BE-LABEL: @st7_check_store( // BE-NEXT: entry: // BE-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_ST7B:%.*]], %struct.st7b* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: store i8 1, i8* [[X]], align 4, !tbaa !8 +// BE-NEXT: store i8 1, i8* [[X]], align 4 // BE-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 0 -// BE-NEXT: store i8 2, i8* [[A]], align 4, !tbaa !11 +// BE-NEXT: store volatile i8 2, i8* [[A]], align 4 // BE-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST7B]], %struct.st7b* [[M]], i32 0, i32 2, i32 1 -// BE-NEXT: [[BF_LOAD:%.*]] = load i8, i8* [[B]], align 1 +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[B]], align 1 // BE-NEXT: [[BF_CLEAR:%.*]] = and i8 [[BF_LOAD]], 7 // BE-NEXT: [[BF_SET:%.*]] = or i8 [[BF_CLEAR]], 24 -// BE-NEXT: store i8 [[BF_SET]], i8* [[B]], align 1 +// BE-NEXT: store volatile i8 [[BF_SET]], i8* [[B]], align 1 // BE-NEXT: ret void // void st7_check_store(struct st7b *m) { @@ -531,12 +533,14 @@ // LE-LABEL: @store_st9( // LE-NEXT: entry: // LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // LE-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 // LE-NEXT: ret void // // BE-LABEL: @store_st9( // BE-NEXT: entry: // BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // BE-NEXT: store volatile i8 1, i8* [[TMP0]], align 4 // BE-NEXT: ret void // @@ -549,6 +553,7 @@ // LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 // LE-NEXT: ret void // @@ -557,6 +562,7 @@ // BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST9:%.*]], %struct.st9* [[M:%.*]], i32 0, i32 0 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 4 // BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 4 // BE-NEXT: ret void // @@ -667,12 +673,14 @@ // LE-LABEL: @store_st11( // LE-NEXT: entry: // LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// LENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 // LE-NEXT: store volatile i16 1, i16* [[F]], align 1 // LE-NEXT: ret void // // BE-LABEL: @store_st11( // BE-NEXT: entry: // BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 +// BENUMLOADS-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 // BE-NEXT: store volatile i16 1, i16* [[F]], align 1 // BE-NEXT: ret void // @@ -685,6 +693,7 @@ // LE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 // LE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 // LE-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1 // LE-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 // LE-NEXT: ret void // @@ -693,6 +702,7 @@ // BE-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 1 // BE-NEXT: [[BF_LOAD:%.*]] = load volatile i16, i16* [[F]], align 1 // BE-NEXT: [[INC:%.*]] = add i16 [[BF_LOAD]], 1 +// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i16, i16* [[F]], align 1 // BE-NEXT: store volatile i16 [[INC]], i16* [[F]], align 1 // BE-NEXT: ret void // @@ -703,17 +713,17 @@ // LE-LABEL: @increment_e_st11( // LE-NEXT: entry: // LE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0 -// LE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12 +// LE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4 // LE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1 -// LE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12 +// LE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4 // LE-NEXT: ret void // // BE-LABEL: @increment_e_st11( // BE-NEXT: entry: // BE-NEXT: [[E:%.*]] = getelementptr inbounds [[STRUCT_ST11:%.*]], %struct.st11* [[M:%.*]], i32 0, i32 0 -// BE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4, !tbaa !12 +// BE-NEXT: [[TMP0:%.*]] = load volatile i8, i8* [[E]], align 4 // BE-NEXT: [[INC:%.*]] = add i8 [[TMP0]], 1 -// BE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4, !tbaa !12 +// BE-NEXT: store volatile i8 [[INC]], i8* [[E]], align 4 // BE-NEXT: ret void // void increment_e_st11(volatile struct st11 *m) { @@ -822,3 +832,433 @@ void increment_e_st12(volatile struct st12 *m) { ++m->e; } + +struct st13 { + char a : 8; + int b : 32; +} __attribute__((packed)); + +// LE-LABEL: @increment_b_st13( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i40 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255 +// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]] +// LE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_b_st13( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st13* [[S:%.*]] to i40* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296 +// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]] +// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// BE-NEXT: ret void +// +void increment_b_st13(volatile struct st13 *s) { + s->b++; +} + +struct st14 { + char a : 8; +} __attribute__((packed)); + +// LE-LABEL: @increment_a_st14( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_a_st14( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST14:%.*]], %struct.st14* [[S:%.*]], i32 0, i32 0 +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// BE-NEXT: ret void +// +void increment_a_st14(volatile struct st14 *s) { + s->a++; +} + +struct st15 { + short a : 8; +} __attribute__((packed)); + +// LE-LABEL: @increment_a_st15( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// LENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// LE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_a_st15( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = getelementptr [[STRUCT_ST15:%.*]], %struct.st15* [[S:%.*]], i32 0, i32 0 +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BE-NEXT: [[INC:%.*]] = add i8 [[BF_LOAD]], 1 +// BENUMLOADS-NEXT: [[BF_LOAD1:%.*]] = load volatile i8, i8* [[TMP0]], align 1 +// BE-NEXT: store volatile i8 [[INC]], i8* [[TMP0]], align 1 +// BE-NEXT: ret void +// +void increment_a_st15(volatile struct st15 *s) { + s->a++; +} + +struct st16 { + int a : 32; + int b : 16; + int c : 32; + int d : 16; +}; + +// LE-LABEL: @increment_a_st16( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]] +// LE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_a_st16( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64 +// BE-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]] +// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_a_st16(struct st16 *s) { + s->a++; +} + +// LE-LABEL: @increment_b_st16( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// LE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 +// LE-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1 +// LE-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535 +// LE-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64 +// LE-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]] +// LE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_b_st16( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// BE-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536 +// BE-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536 +// BE-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]] +// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_b_st16(struct st16 *s) { + s->b++; +} + +// LE-LABEL: @increment_c_st16( +// LE-NEXT: entry: +// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// LE-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294967296 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]] +// LE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_c_st16( +// BE-NEXT: entry: +// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// BE-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64 +// BE-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], 4294967295 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]] +// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_c_st16(struct st16 *s) { + s->c++; +} + +// LE-LABEL: @increment_d_st16( +// LE-NEXT: entry: +// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// LE-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// LE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 +// LE-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1 +// LE-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535 +// LE-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64 +// LE-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -281470681743361 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]] +// LE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_d_st16( +// BE-NEXT: entry: +// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// BE-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// BE-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536 +// BE-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536 +// BE-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD]], -4294901761 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]] +// BE-NEXT: store i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_d_st16(struct st16 *s) { + s->d++; +} + +// LE-LABEL: @increment_v_a_st16( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]] +// LE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_a_st16( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]] +// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_v_a_st16(volatile struct st16 *s) { + s->a++; +} + +// LE-LABEL: @increment_v_b_st16( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// LE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 +// LE-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535 +// LE-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64 +// LE-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]] +// LE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_b_st16( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st16* [[S:%.*]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// BE-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536 +// BE-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536 +// BE-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]] +// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_v_b_st16(volatile struct st16 *s) { + s->b++; +} + +// LE-LABEL: @increment_v_c_st16( +// LE-NEXT: entry: +// LE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// LE-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i64 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294967296 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[TMP1]] +// LE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_c_st16( +// BE-NEXT: entry: +// BE-NEXT: [[C:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// BE-NEXT: [[TMP0:%.*]] = bitcast i48* [[C]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i64 [[TMP1]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i64 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[BF_SHL:%.*]] = shl nuw i64 [[TMP2]], 32 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], 4294967295 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL]], [[BF_CLEAR]] +// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_v_c_st16(volatile struct st16 *s) { + s->c++; +} + +// LE-LABEL: @increment_v_d_st16( +// LE-NEXT: entry: +// LE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// LE-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP1:%.*]] = lshr i64 [[BF_LOAD]], 32 +// LE-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 +// LE-NEXT: [[INC:%.*]] = add i32 [[TMP2]], 1 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// LE-NEXT: [[TMP3:%.*]] = and i32 [[INC]], 65535 +// LE-NEXT: [[BF_VALUE:%.*]] = zext i32 [[TMP3]] to i64 +// LE-NEXT: [[BF_SHL2:%.*]] = shl nuw nsw i64 [[BF_VALUE]], 32 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -281470681743361 +// LE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_SHL2]], [[BF_CLEAR]] +// LE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_d_st16( +// BE-NEXT: entry: +// BE-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT_ST16:%.*]], %struct.st16* [[S:%.*]], i32 0, i32 1 +// BE-NEXT: [[TMP0:%.*]] = bitcast i48* [[D]] to i64* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i64, i64* [[TMP0]], align 4 +// BE-NEXT: [[TMP1:%.*]] = trunc i64 [[BF_LOAD]] to i32 +// BE-NEXT: [[INC4:%.*]] = add i32 [[TMP1]], 65536 +// BE-NEXT: [[TMP2:%.*]] = and i32 [[INC4]], -65536 +// BE-NEXT: [[BF_SHL2:%.*]] = zext i32 [[TMP2]] to i64 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i64 [[BF_LOAD1]], -4294901761 +// BE-NEXT: [[BF_SET:%.*]] = or i64 [[BF_CLEAR]], [[BF_SHL2]] +// BE-NEXT: store volatile i64 [[BF_SET]], i64* [[TMP0]], align 4 +// BE-NEXT: ret void +// +void increment_v_d_st16(volatile struct st16 *s) { + s->d++; +} +// st17 has alignment = 1, the AAPCS defines nothing for the +// accessing of b, but accessing c should use char +struct st17 { +int b : 32; +char c : 8; +} __attribute__((packed)); + +// LE-LABEL: @increment_v_b_st17( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i32 +// LE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// LE-NEXT: [[TMP1:%.*]] = zext i32 [[INC]] to i40 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -4294967296 +// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]] +// LE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_b_st17( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 8 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i32 +// BE-NEXT: [[INC:%.*]] = add nsw i32 [[BF_CAST]], 1 +// BE-NEXT: [[TMP2:%.*]] = zext i32 [[INC]] to i40 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 8 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 255 +// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]] +// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// BE-NEXT: ret void +// +void increment_v_b_st17(volatile struct st17 *s) { + s->b++; +} + +// LE-LABEL: @increment_v_c_st17( +// LE-NEXT: entry: +// LE-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40* +// LE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[TMP1:%.*]] = lshr i40 [[BF_LOAD]], 32 +// LE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[TMP1]] to i8 +// LE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1 +// LE-NEXT: [[TMP2:%.*]] = zext i8 [[INC]] to i40 +// LE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// LE-NEXT: [[BF_SHL:%.*]] = shl nuw i40 [[TMP2]], 32 +// LE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], 4294967295 +// LE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_SHL]], [[BF_CLEAR]] +// LE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// LE-NEXT: ret void +// +// BE-LABEL: @increment_v_c_st17( +// BE-NEXT: entry: +// BE-NEXT: [[TMP0:%.*]] = bitcast %struct.st17* [[S:%.*]] to i40* +// BE-NEXT: [[BF_LOAD:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[BF_CAST:%.*]] = trunc i40 [[BF_LOAD]] to i8 +// BE-NEXT: [[INC:%.*]] = add i8 [[BF_CAST]], 1 +// BE-NEXT: [[TMP1:%.*]] = zext i8 [[INC]] to i40 +// BE-NEXT: [[BF_LOAD1:%.*]] = load volatile i40, i40* [[TMP0]], align 1 +// BE-NEXT: [[BF_CLEAR:%.*]] = and i40 [[BF_LOAD1]], -256 +// BE-NEXT: [[BF_SET:%.*]] = or i40 [[BF_CLEAR]], [[TMP1]] +// BE-NEXT: store volatile i40 [[BF_SET]], i40* [[TMP0]], align 1 +// BE-NEXT: ret void +// +void increment_v_c_st17(volatile struct st17 *s) { + s->c++; +}