@@ -200,6 +200,9 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
200
200
/// cmpxchg result or the old value.
201
201
///
202
202
/// @returns result of cmpxchg, according to ReturnBool
203
+ ///
204
+ /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
205
+ /// invoke the function EmitAtomicCmpXchgForMSIntrin.
203
206
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
204
207
bool ReturnBool) {
205
208
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
@@ -230,6 +233,45 @@ static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
230
233
ValueType);
231
234
}
232
235
236
+ /// This function should be invoked to emit atomic cmpxchg for Microsoft's
237
+ /// _InterlockedCompareExchange* intrinsics which have the following signature:
238
+ /// T _InterlockedCompareExchange(T volatile *Destination,
239
+ /// T Exchange,
240
+ /// T Comparand);
241
+ ///
242
+ /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
243
+ /// cmpxchg *Destination, Comparand, Exchange.
244
+ /// So we need to swap Comparand and Exchange when invoking
245
+ /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
246
+ /// function MakeAtomicCmpXchgValue since it expects the arguments to be
247
+ /// already swapped.
248
+
249
+ static
250
+ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
251
+ AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
252
+ auto T = E->getType();
253
+ assert(E->getArg(0)->getType()->isPointerType());
254
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
255
+ E->getArg(0)->getType()->getPointeeType()));
256
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
257
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(2)->getType()));
258
+
259
+ auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
260
+ auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
261
+ auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
262
+
263
+ // For Release ordering, the failure ordering should be Monotonic.
264
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
265
+ AtomicOrdering::Monotonic :
266
+ SuccessOrdering;
267
+
268
+ auto *Result = CGF.Builder.CreateAtomicCmpXchg(
269
+ Destination, Comparand, Exchange,
270
+ SuccessOrdering, FailureOrdering);
271
+ Result->setVolatile(true);
272
+ return CGF.Builder.CreateExtractValue(Result, 0);
273
+ }
274
+
233
275
// Emit a simple mangled intrinsic that has 1 argument and a return type
234
276
// matching the argument type.
235
277
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
@@ -754,6 +796,9 @@ enum class CodeGenFunction::MSVCIntrin {
754
796
_InterlockedExchange_acq,
755
797
_InterlockedExchange_rel,
756
798
_InterlockedExchange_nf,
799
+ _InterlockedCompareExchange_acq,
800
+ _InterlockedCompareExchange_rel,
801
+ _InterlockedCompareExchange_nf,
757
802
__fastfail,
758
803
};
759
804
@@ -838,6 +883,12 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
838
883
case MSVCIntrin::_InterlockedExchange_nf:
839
884
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
840
885
AtomicOrdering::Monotonic);
886
+ case MSVCIntrin::_InterlockedCompareExchange_acq:
887
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
888
+ case MSVCIntrin::_InterlockedCompareExchange_rel:
889
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
890
+ case MSVCIntrin::_InterlockedCompareExchange_nf:
891
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
841
892
842
893
case MSVCIntrin::_InterlockedDecrement: {
843
894
llvm::Type *IntTy = ConvertType(E->getType());
@@ -3059,16 +3110,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
3059
3110
case Builtin::BI_InterlockedCompareExchange8:
3060
3111
case Builtin::BI_InterlockedCompareExchange16:
3061
3112
case Builtin::BI_InterlockedCompareExchange:
3062
- case Builtin::BI_InterlockedCompareExchange64: {
3063
- AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
3064
- EmitScalarExpr(E->getArg(0)),
3065
- EmitScalarExpr(E->getArg(2)),
3066
- EmitScalarExpr(E->getArg(1)),
3067
- AtomicOrdering::SequentiallyConsistent,
3068
- AtomicOrdering::SequentiallyConsistent);
3069
- CXI->setVolatile(true);
3070
- return RValue::get(Builder.CreateExtractValue(CXI, 0));
3071
- }
3113
+ case Builtin::BI_InterlockedCompareExchange64:
3114
+ return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3072
3115
case Builtin::BI_InterlockedIncrement16:
3073
3116
case Builtin::BI_InterlockedIncrement:
3074
3117
return RValue::get(
@@ -6159,6 +6202,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6159
6202
case ARM::BI_InterlockedExchange_nf:
6160
6203
case ARM::BI_InterlockedExchange64_nf:
6161
6204
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
6205
+ case ARM::BI_InterlockedCompareExchange8_acq:
6206
+ case ARM::BI_InterlockedCompareExchange16_acq:
6207
+ case ARM::BI_InterlockedCompareExchange_acq:
6208
+ case ARM::BI_InterlockedCompareExchange64_acq:
6209
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
6210
+ case ARM::BI_InterlockedCompareExchange8_rel:
6211
+ case ARM::BI_InterlockedCompareExchange16_rel:
6212
+ case ARM::BI_InterlockedCompareExchange_rel:
6213
+ case ARM::BI_InterlockedCompareExchange64_rel:
6214
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
6215
+ case ARM::BI_InterlockedCompareExchange8_nf:
6216
+ case ARM::BI_InterlockedCompareExchange16_nf:
6217
+ case ARM::BI_InterlockedCompareExchange_nf:
6218
+ case ARM::BI_InterlockedCompareExchange64_nf:
6219
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
6162
6220
}
6163
6221
6164
6222
// Get the last argument, which specifies the vector type.
@@ -8675,6 +8733,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
8675
8733
case AArch64::BI_InterlockedExchange_nf:
8676
8734
case AArch64::BI_InterlockedExchange64_nf:
8677
8735
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
8736
+ case AArch64::BI_InterlockedCompareExchange8_acq:
8737
+ case AArch64::BI_InterlockedCompareExchange16_acq:
8738
+ case AArch64::BI_InterlockedCompareExchange_acq:
8739
+ case AArch64::BI_InterlockedCompareExchange64_acq:
8740
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
8741
+ case AArch64::BI_InterlockedCompareExchange8_rel:
8742
+ case AArch64::BI_InterlockedCompareExchange16_rel:
8743
+ case AArch64::BI_InterlockedCompareExchange_rel:
8744
+ case AArch64::BI_InterlockedCompareExchange64_rel:
8745
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
8746
+ case AArch64::BI_InterlockedCompareExchange8_nf:
8747
+ case AArch64::BI_InterlockedCompareExchange16_nf:
8748
+ case AArch64::BI_InterlockedCompareExchange_nf:
8749
+ case AArch64::BI_InterlockedCompareExchange64_nf:
8750
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
8678
8751
8679
8752
case AArch64::BI_InterlockedAdd: {
8680
8753
Value *Arg0 = EmitScalarExpr(E->getArg(0));
0 commit comments