Index: include/clang/Driver/CLCompatOptions.td =================================================================== --- include/clang/Driver/CLCompatOptions.td +++ include/clang/Driver/CLCompatOptions.td @@ -201,7 +201,8 @@ def _SLASH_Tp : CLCompileJoinedOrSeparate<"Tp">, HelpText<"Specify a C++ source file">, MetaVarName<"">; def _SLASH_TP : CLCompileFlag<"TP">, HelpText<"Treat all source files as C++">; - +def _SLASH_volatile : CLCompileJoined<"volatile:">, + HelpText<"Set semantics for volatile loads and stores">; // Ignored: @@ -220,7 +221,6 @@ def _SLASH_RTC : CLIgnoredJoined<"RTC">; def _SLASH_sdl : CLIgnoredFlag<"sdl">; def _SLASH_sdl_ : CLIgnoredFlag<"sdl-">; -def _SLASH_volatile_iso : CLIgnoredFlag<"volatile:iso">; def _SLASH_w : CLIgnoredJoined<"w">; def _SLASH_Zc_auto : CLIgnoredFlag<"Zc:auto">; def _SLASH_Zc_forScope : CLIgnoredFlag<"Zc:forScope">; @@ -281,7 +281,6 @@ def _SLASH_Qvec_report : CLJoined<"Qvec-report">; def _SLASH_u : CLFlag<"u">; def _SLASH_V : CLFlag<"V">; -def _SLASH_volatile_ms : CLFlag<"volatile:ms">; def _SLASH_WL : CLFlag<"WL">; def _SLASH_Wp64 : CLFlag<"Wp64">; def _SLASH_X : CLFlag<"X">; Index: include/clang/Driver/Options.td =================================================================== --- include/clang/Driver/Options.td +++ include/clang/Driver/Options.td @@ -628,6 +628,7 @@ HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">; def fms_compatibility : Flag<["-"], "fms-compatibility">, Group, Flags<[CC1Option]>, HelpText<"Enable full Microsoft Visual C++ compatibility">; +def fms_volatile : Joined<["-"], "fms-volatile">, Group, Flags<[CC1Option]>; def fmsc_version : Joined<["-"], "fmsc-version=">, Group, Flags<[DriverOption, CoreOption]>, HelpText<"Microsoft compiler version number to report in _MSC_VER (0 = don't define it (default))">; def fms_compatibility_version Index: include/clang/Frontend/CodeGenOptions.def =================================================================== --- include/clang/Frontend/CodeGenOptions.def +++ include/clang/Frontend/CodeGenOptions.def @@ -68,6 +68,7 @@ ///< be generated. CODEGENOPT(MergeAllConstants , 1, 1) ///< Merge identical constants. CODEGENOPT(MergeFunctions , 1, 0) ///< Set when -fmerge-functions is enabled. +CODEGENOPT(MSVolatile , 1, 0) ///< Set when /volatile:ms is enabled. CODEGENOPT(NoCommon , 1, 0) ///< Set when -fno-common or C++ is enabled. CODEGENOPT(NoDwarfDirectoryAsm , 1, 0) ///< Set when -fno-dwarf-directory-asm is ///< enabled. Index: lib/CodeGen/CGAtomic.cpp =================================================================== --- lib/CodeGen/CGAtomic.cpp +++ lib/CodeGen/CGAtomic.cpp @@ -1006,9 +1006,22 @@ return convertTempToRValue(Temp, ResultSlot, Loc); } +bool CodeGenFunction::volatileIsAtomic(LValue LV) { + AtomicInfo AI(*this, LV); + return CGM.getCodeGenOpts().MSVolatile && !AI.shouldUseLibcall() && + (LV.isVolatile() || hasVolatileMember(LV.getType())); +} + +bool CodeGenFunction::volatileIsAtomic(QualType Ty) { + return CGM.getCodeGenOpts().MSVolatile && + getContext().getTargetInfo().hasBuiltinAtomic( + getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)); +} + /// Emit a load from an l-value of atomic type. Note that the r-value /// we produce is an r-value of the atomic *value* type. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile, AggValueSlot resultSlot) { AtomicInfo atomics(*this, src); LValue LVal = atomics.getAtomicLValue(); @@ -1060,11 +1073,11 @@ // Okay, we're doing this natively. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(SrcAddr); llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load"); - load->setAtomic(llvm::SequentiallyConsistent); + load->setAtomic(AO); // Other decoration. load->setAlignment(src.getAlignment().getQuantity()); - if (src.isVolatileQualified()) + if (IsVolatile) load->setVolatile(true); if (src.getTBAAInfo()) CGM.DecorateInstruction(load, src.getTBAAInfo()); @@ -1166,7 +1179,9 @@ /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. -void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { +void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, + llvm::AtomicOrdering AO, bool IsVolatile, + bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || @@ -1209,11 +1224,11 @@ llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. - if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); + if (!isInit) store->setAtomic(AO); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); - if (dest.isVolatileQualified()) + if (IsVolatile) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); Index: lib/CodeGen/CGExpr.cpp =================================================================== --- lib/CodeGen/CGExpr.cpp +++ lib/CodeGen/CGExpr.cpp @@ -1135,8 +1135,10 @@ } } + bool VolatileIsAtomic = Volatile && volatileIsAtomic(Ty); + // Atomic operations have to be done on integral types. - if (Ty->isAtomicType()) { + if (Ty->isAtomicType() || VolatileIsAtomic) { LValue lvalue = LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), getContext(), TBAAInfo); @@ -1255,7 +1257,9 @@ Value = EmitToMemory(Value, Ty); - if (Ty->isAtomicType()) { + bool VolatileIsAtomic = !isInit && Volatile && volatileIsAtomic(Ty); + + if (Ty->isAtomicType() || VolatileIsAtomic) { EmitAtomicStore(RValue::get(Value), LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), Index: lib/CodeGen/CGExprAgg.cpp =================================================================== --- lib/CodeGen/CGExprAgg.cpp +++ lib/CodeGen/CGExprAgg.cpp @@ -212,7 +212,7 @@ LValue LV = CGF.EmitLValue(E); // If the type of the l-value is atomic, then do an atomic load. - if (LV.getType()->isAtomicType()) { + if (LV.getType()->isAtomicType() || CGF.volatileIsAtomic(LV)) { CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); return; } @@ -865,7 +865,7 @@ LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); // That copy is an atomic copy if the LHS is atomic. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || CGF.volatileIsAtomic(LHS)) { CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); return; } @@ -882,7 +882,7 @@ // If we have an atomic type, evaluate into the destination and then // do an atomic copy. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || CGF.volatileIsAtomic(LHS)) { EnsureDest(E->getRHS()->getType()); Visit(E->getRHS()); CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); Index: lib/CodeGen/CGExprComplex.cpp =================================================================== --- lib/CodeGen/CGExprComplex.cpp +++ lib/CodeGen/CGExprComplex.cpp @@ -336,7 +336,8 @@ /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { - if (lvalue.getType()->isAtomicType()) + if (lvalue.getType()->isAtomicType() || + (!isInit && CGF.volatileIsAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); Index: lib/CodeGen/CGStmtOpenMP.cpp =================================================================== --- lib/CodeGen/CGStmtOpenMP.cpp +++ lib/CodeGen/CGStmtOpenMP.cpp @@ -829,8 +829,11 @@ assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); LValue XLValue = CGF.EmitLValue(X); LValue VLValue = CGF.EmitLValue(V); - RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc) - : CGF.EmitAtomicLoad(XLValue, Loc); + RValue Res = XLValue.isGlobalReg() + ? CGF.EmitLoadOfLValue(XLValue, Loc) + : CGF.EmitAtomicLoad(XLValue, Loc, + IsSeqCst ? llvm::SequentiallyConsistent + : llvm::Unordered); // OpenMP, 2.12.6, atomic Construct // Any atomic construct with a seq_cst clause forces the atomically // performed operation to include an implicit flush operation without a Index: lib/CodeGen/CodeGenFunction.h =================================================================== --- lib/CodeGen/CodeGenFunction.h +++ lib/CodeGen/CodeGenFunction.h @@ -2146,10 +2146,41 @@ void EmitAtomicInit(Expr *E, LValue lvalue); + bool volatileIsAtomic(LValue Src); + bool volatileIsAtomic(QualType Ty); + + RValue EmitAtomicLoad(LValue LV, SourceLocation SL, + AggValueSlot Slot = AggValueSlot::ignored()) { + bool IsSeqCst; + bool IsVolatile = LV.isVolatileQualified(); + if (LV.getType()->isAtomicType()) { + IsSeqCst = true; + } else { + IsSeqCst = false; + IsVolatile = true; + } + return EmitAtomicLoad(LV, SL, IsSeqCst ? llvm::SequentiallyConsistent + : llvm::Acquire, + IsVolatile, Slot); + } + RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile = false, AggValueSlot slot = AggValueSlot::ignored()); - void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); + void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, + bool IsVolatile, bool isInit); + void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit) { + bool IsVolatile = lvalue.isVolatileQualified(); + llvm::AtomicOrdering AO; + if (lvalue.getType()->isAtomicType()) { + AO = llvm::SequentiallyConsistent; + } else { + AO = llvm::Release; + IsVolatile = true; + } + return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); + } std::pair EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, Index: lib/Driver/Tools.cpp =================================================================== --- lib/Driver/Tools.cpp +++ lib/Driver/Tools.cpp @@ -4888,6 +4888,18 @@ CmdArgs.push_back("-P"); } + if (Arg *A = Args.getLastArg(options::OPT__SLASH_volatile)) { + StringRef Arch = A->getValue(); + if (Arch == "ms") { + CmdArgs.push_back("-fms-volatile"); + } else if (Arch != "iso") { + D.Diag(clang::diag::warn_drv_unused_argument) << A->getAsString(Args); + } + } else if (getToolChain().getTriple().getArch() == llvm::Triple::x86_64 || + getToolChain().getTriple().getArch() == llvm::Triple::x86) { + CmdArgs.push_back("-fms-volatile"); + } + Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg); Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb); if (MostGeneralArg && BestCaseArg) Index: lib/Frontend/CompilerInvocation.cpp =================================================================== --- lib/Frontend/CompilerInvocation.cpp +++ lib/Frontend/CompilerInvocation.cpp @@ -477,6 +477,8 @@ OPT_fno_data_sections, false); Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions); + Opts.MSVolatile = Args.hasArg(OPT_fms_volatile); + Opts.VectorizeBB = Args.hasArg(OPT_vectorize_slp_aggressive); Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops); Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp); Index: test/CodeGen/ms-volatile.c =================================================================== --- /dev/null +++ test/CodeGen/ms-volatile.c @@ -0,0 +1,62 @@ +// RUN: %clang_cc1 -triple i386-pc-win32 -emit-llvm -fms-volatile -o - < %s | FileCheck %s +struct foo { + volatile int x; +}; +struct bar { + int x; +}; +typedef _Complex float __declspec(align(8)) baz; + +void test1(struct foo *p, struct foo *q) { + *p = *q; + // CHECK-LABEL: @test1 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test2(volatile int *p, volatile int *q) { + *p = *q; + // CHECK-LABEL: @test2 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test3(struct foo *p, struct foo *q) { + p->x = q->x; + // CHECK-LABEL: @test3 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test4(volatile struct foo *p, volatile struct foo *q) { + p->x = q->x; + // CHECK-LABEL: @test4 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test5(volatile struct foo *p, volatile struct foo *q) { + *p = *q; + // CHECK-LABEL: @test5 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test6(struct bar *p, struct bar *q) { + *p = *q; + // CHECK-LABEL: @test6 + // CHECK-NOT: load atomic volatile {{.*}} + // CHECK-NOT: store atomic volatile {{.*}}, {{.*}} +} +void test7(volatile struct bar *p, volatile struct bar *q) { + *p = *q; + // CHECK-LABEL: @test7 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test8(volatile double *p, volatile double *q) { + *p = *q; + // CHECK-LABEL: @test8 + // CHECK: load atomic volatile {{.*}} acquire + // CHECK: store atomic volatile {{.*}}, {{.*}} release +} +void test9(volatile baz *p, baz *q) { + *p = *q; + // CHECK-LABEL: @test9 + // CHECK: store atomic volatile {{.*}}, {{.*}} release +}