Implement the missing pieces needed to support libstdc++4.7's <atomic>:

__atomic_test_and_set, __atomic_clear, plus a pile of undocumented __GCC_*
predefined macros.

Implement library fallback for __atomic_is_lock_free and
__c11_atomic_is_lock_free, and implement __atomic_always_lock_free.

Contrary to their documentation, GCC's __atomic_fetch_add family don't
multiply the operand by sizeof(T) when operating on a pointer type.
libstdc++ relies on this quirk. Remove this handling for all but the
__c11_atomic_fetch_add and __c11_atomic_fetch_sub builtins.

Contrary to their documentation, __atomic_test_and_set and __atomic_clear
take a first argument of type 'volatile void *', not 'void *' or 'bool *',
and __atomic_is_lock_free and __atomic_always_lock_free have an argument
of type 'const volatile void *', not 'void *'.

With this change, libstdc++4.7's <atomic> passes libc++'s atomic test suite,
except for a couple of libstdc++ bugs and some cases where libc++'s test
suite tests for properties which implementations have latitude to vary.

llvm-svn: 154640
This commit is contained in:
Richard Smith 2012-04-13 00:45:38 +00:00
parent 18104239b4
commit 01ba47d7b6
7 changed files with 375 additions and 46 deletions

View File

@ -635,12 +635,12 @@ ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
BUILTIN(__atomic_test_and_set, "vv*i", "n")
BUILTIN(__atomic_clear, "vb*i", "n")
BUILTIN(__atomic_test_and_set, "bvD*i", "n")
BUILTIN(__atomic_clear, "vvD*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
BUILTIN(__atomic_always_lock_free, "izv*", "n")
BUILTIN(__atomic_is_lock_free, "izv*", "n")
BUILTIN(__atomic_always_lock_free, "izvCD*", "n")
BUILTIN(__atomic_is_lock_free, "izvCD*", "n")
#undef ATOMIC_BUILTIN

View File

@ -4306,7 +4306,7 @@ bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
switch (E->isBuiltinCall()) {
switch (unsigned BuiltinOp = E->isBuiltinCall()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@ -4365,6 +4365,7 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Error(E);
case Builtin::BI__atomic_always_lock_free:
case Builtin::BI__atomic_is_lock_free:
case Builtin::BI__c11_atomic_is_lock_free: {
APSInt SizeVal;
@ -4382,32 +4383,31 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
// Check power-of-two.
CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
if (!Size.isPowerOfTwo())
#if 0
// FIXME: Suppress this folding until the ABI for the promotion width
// settles.
return Success(0, E);
#else
return Error(E);
#endif
if (Size.isPowerOfTwo()) {
// Check against inlining width.
unsigned InlineWidthBits =
Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
Size == CharUnits::One() ||
E->getArg(1)->isNullPointerConstant(Info.Ctx,
Expr::NPC_NeverValueDependent))
// OK, we will inline appropriately-aligned operations of this size,
// and _Atomic(T) is appropriately-aligned.
return Success(1, E);
#if 0
// Check against promotion width.
// FIXME: Suppress this folding until the ABI for the promotion width
// settles.
unsigned PromoteWidthBits =
Info.Ctx.getTargetInfo().getMaxAtomicPromoteWidth();
if (Size > Info.Ctx.toCharUnitsFromBits(PromoteWidthBits))
return Success(0, E);
#endif
QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
castAs<PointerType>()->getPointeeType();
if (!PointeeType->isIncompleteType() &&
Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
// OK, we will inline operations on this object.
return Success(1, E);
}
}
}
// Check against inlining width.
unsigned InlineWidthBits =
Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits))
return Success(1, E);
return Error(E);
return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
Success(0, E) : Error(E);
}
}
}

View File

@ -966,6 +966,179 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(0);
}
case Builtin::BI__c11_atomic_is_lock_free:
case Builtin::BI__atomic_is_lock_free: {
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
// __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
// _Atomic(T) is always properly-aligned.
const char *LibCallName = "__atomic_is_lock_free";
CallArgList Args;
Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
getContext().getSizeType());
if (BuiltinID == Builtin::BI__atomic_is_lock_free)
Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
getContext().VoidPtrTy);
else
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
FunctionType::ExtInfo(),
RequiredArgs::All);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
// Look at the argument type to determine whether this is a volatile
// operation. The parameter type is always volatile.
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace =
cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
AtomicRMWInst *Result = 0;
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Monotonic);
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Acquire);
break;
case 3: // memory_order_release
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::Release);
break;
case 4: // memory_order_acq_rel
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::AcquireRelease);
break;
case 5: // memory_order_seq_cst
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal,
llvm::SequentiallyConsistent);
break;
}
Result->setVolatile(Volatile);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[5] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("acquire", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("acqrel", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[5] = {
llvm::Monotonic, llvm::Acquire, llvm::Release,
llvm::AcquireRelease, llvm::SequentiallyConsistent
};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
Builder.SetInsertPoint(ContBB);
PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
for (unsigned i = 0; i < 5; ++i) {
Builder.SetInsertPoint(BBs[i]);
AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal, Orders[i]);
RMW->setVolatile(Volatile);
Result->addIncoming(RMW, BBs[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(1), BBs[1]);
SI->addCase(Builder.getInt32(2), BBs[1]);
SI->addCase(Builder.getInt32(3), BBs[2]);
SI->addCase(Builder.getInt32(4), BBs[3]);
SI->addCase(Builder.getInt32(5), BBs[4]);
Builder.SetInsertPoint(ContBB);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
case Builtin::BI__atomic_clear: {
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace =
cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setAlignment(1);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Store->setOrdering(llvm::Monotonic);
break;
case 3: // memory_order_release
Store->setOrdering(llvm::Release);
break;
case 5: // memory_order_seq_cst
Store->setOrdering(llvm::SequentiallyConsistent);
break;
}
return RValue::get(0);
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[3] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[3] = {
llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setAlignment(1);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(3), BBs[1]);
SI->addCase(Builder.getInt32(5), BBs[2]);
Builder.SetInsertPoint(ContBB);
return RValue::get(0);
}
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:

View File

@ -2904,13 +2904,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
if (MemTy->isPointerType()) {
// For pointer arithmetic, we're required to do a bit of math:
// adding 1 to an int* is not the same as adding 1 to a uintptr_t.
// ... but only for the C11 builtins. The GNU builtins expect the
// user to multiply by sizeof(T).
QualType Val1Ty = E->getVal1()->getType();
llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
CharUnits PointeeIncAmt =
@ -2921,6 +2919,10 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
// Fall through.
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__atomic_store_n:

View File

@ -202,6 +202,20 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
ConstSuffix);
}
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
unsigned InlineWidth) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
TypeWidth <= InlineWidth)
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
return "1"; // "sometimes lock free"
}
/// \brief Add definitions required for a smooth interaction between
/// Objective-C++ automated reference counting and libstdc++ (4.2).
static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
@ -521,6 +535,32 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else
Builder.defineMacro("__GNUC_STDC_INLINE__");
// The value written by __atomic_test_and_set.
// FIXME: This is target-dependent.
Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
// Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
DEFINE_LOCK_FREE_MACRO(SHORT, Short);
DEFINE_LOCK_FREE_MACRO(INT, Int);
DEFINE_LOCK_FREE_MACRO(LONG, Long);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
if (LangOpts.NoInlineDefine)
Builder.defineMacro("__NO_INLINE__");

View File

@ -7,9 +7,7 @@
#ifndef ALREADY_INCLUDED
#define ALREADY_INCLUDED
// Basic IRGen tests for __c11_atomic_*
// FIXME: Need to implement __c11_atomic_is_lock_free
// Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
typedef enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
@ -131,7 +129,8 @@ int *fp2a(int **p) {
// CHECK: @fp2a
// CHECK: store i32 4
// CHECK: atomicrmw sub {{.*}} monotonic
return __atomic_fetch_sub(p, 1, memory_order_relaxed);
// Note, the GNU builtins do not multiply by sizeof(T)!
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}
_Complex float fc(_Atomic(_Complex float) *c) {
@ -161,8 +160,55 @@ _Bool fsb(_Bool *c) {
return __atomic_exchange_n(c, 1, memory_order_seq_cst);
}
int lock_free() {
char flag1;
volatile char flag2;
void test_and_set() {
// CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
__atomic_test_and_set(&flag1, memory_order_seq_cst);
// CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
__atomic_test_and_set(&flag2, memory_order_acquire);
// CHECK: store atomic volatile i8 0, i8* @flag2 release
__atomic_clear(&flag2, memory_order_release);
// CHECK: store atomic i8 0, i8* @flag1 seq_cst
__atomic_clear(&flag1, memory_order_seq_cst);
}
struct Sixteen {
char c[16];
} sixteen;
struct Seventeen {
char c[17];
} seventeen;
int lock_free(struct Incomplete *incomplete) {
// CHECK: @lock_free
// CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
__c11_atomic_is_lock_free(3);
// CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
__atomic_is_lock_free(16, &sixteen);
// CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
__atomic_is_lock_free(17, &seventeen);
// CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
__atomic_is_lock_free(4, incomplete);
char cs[20];
// CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
__atomic_is_lock_free(4, cs+1);
// CHECK-NOT: call
__atomic_always_lock_free(3, 0);
__atomic_always_lock_free(16, 0);
__atomic_always_lock_free(17, 0);
__atomic_always_lock_free(16, &sixteen);
__atomic_always_lock_free(17, &seventeen);
int n;
__atomic_is_lock_free(4, &n);
// CHECK: ret i32 1
return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
}

View File

@ -1,14 +1,7 @@
// RUN: %clang_cc1 %s -verify -fsyntax-only
// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=i686-linux-gnu
// Basic parsing/Sema tests for __c11_atomic_*
// FIXME: Need to implement:
// __c11_atomic_is_lock_free
// __atomic_is_lock_free
// __atomic_always_lock_free
// __atomic_test_and_set
// __atomic_clear
typedef enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
@ -16,6 +9,73 @@ typedef enum memory_order {
struct S { char c[3]; };
_Static_assert(__GCC_ATOMIC_BOOL_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_CHAR_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_CHAR16_T_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_CHAR32_T_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, "");
_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, "");
_Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, "");
_Static_assert(__c11_atomic_is_lock_free(1), "");
_Static_assert(__c11_atomic_is_lock_free(2), "");
_Static_assert(__c11_atomic_is_lock_free(3), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__c11_atomic_is_lock_free(4), "");
_Static_assert(__c11_atomic_is_lock_free(8), "");
_Static_assert(__c11_atomic_is_lock_free(16), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__c11_atomic_is_lock_free(17), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(1, 0), "");
_Static_assert(__atomic_is_lock_free(2, 0), "");
_Static_assert(__atomic_is_lock_free(3, 0), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(4, 0), "");
_Static_assert(__atomic_is_lock_free(8, 0), "");
_Static_assert(__atomic_is_lock_free(16, 0), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(17, 0), ""); // expected-error {{not an integral constant expression}}
char i8;
short i16;
int i32;
int __attribute__((vector_size(8))) i64;
struct Incomplete *incomplete;
_Static_assert(__atomic_is_lock_free(1, &i8), "");
_Static_assert(__atomic_is_lock_free(1, &i64), "");
_Static_assert(__atomic_is_lock_free(2, &i8), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(2, &i16), "");
_Static_assert(__atomic_is_lock_free(2, &i64), "");
_Static_assert(__atomic_is_lock_free(4, &i16), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(4, &i32), "");
_Static_assert(__atomic_is_lock_free(4, &i64), "");
_Static_assert(__atomic_is_lock_free(8, &i32), ""); // expected-error {{not an integral constant expression}}
_Static_assert(__atomic_is_lock_free(8, &i64), "");
_Static_assert(__atomic_always_lock_free(1, 0), "");
_Static_assert(__atomic_always_lock_free(2, 0), "");
_Static_assert(!__atomic_always_lock_free(3, 0), "");
_Static_assert(__atomic_always_lock_free(4, 0), "");
_Static_assert(__atomic_always_lock_free(8, 0), "");
_Static_assert(!__atomic_always_lock_free(16, 0), "");
_Static_assert(!__atomic_always_lock_free(17, 0), "");
_Static_assert(__atomic_always_lock_free(1, incomplete), "");
_Static_assert(!__atomic_always_lock_free(2, incomplete), "");
_Static_assert(!__atomic_always_lock_free(4, incomplete), "");
_Static_assert(__atomic_always_lock_free(1, &i8), "");
_Static_assert(__atomic_always_lock_free(1, &i64), "");
_Static_assert(!__atomic_always_lock_free(2, &i8), "");
_Static_assert(__atomic_always_lock_free(2, &i16), "");
_Static_assert(__atomic_always_lock_free(2, &i64), "");
_Static_assert(!__atomic_always_lock_free(4, &i16), "");
_Static_assert(__atomic_always_lock_free(4, &i32), "");
_Static_assert(__atomic_always_lock_free(4, &i64), "");
_Static_assert(!__atomic_always_lock_free(8, &i32), "");
_Static_assert(__atomic_always_lock_free(8, &i64), "");
void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d,
int *I, int **P, float *D, struct S *s1, struct S *s2) {
__c11_atomic_init(I, 5); // expected-error {{pointer to _Atomic}}
@ -94,4 +154,12 @@ void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d,
_Bool cmpexch_7 = __atomic_compare_exchange(I, I, 5, 1, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{passing 'int' to parameter of type 'int *'}}
_Bool cmpexch_8 = __atomic_compare_exchange(I, P, I, 0, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{; dereference with *}}
_Bool cmpexch_9 = __atomic_compare_exchange(I, I, I, 0, memory_order_seq_cst, memory_order_seq_cst);
const volatile int flag_k = 0;
volatile int flag = 0;
(void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}}
(void)(int)__atomic_test_and_set(&flag, memory_order_seq_cst);
__atomic_clear(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}}
__atomic_clear(&flag, memory_order_seq_cst);
(int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
}