mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-12-15 06:54:05 +08:00
re PR libstdc++/40297 ([C++0x] debug mode vs atomics)
2009-06-24 Jonathan Wakely <jwakely.gcc@gmail.com> PR libstdc++/40297 * include/bits/atomic_0.h: Reverse debug assertions. * include/bits/atomic_2.h: Likewise. From-SVN: r148893
This commit is contained in:
parent
fb6234e0f8
commit
27f41cfea5
@ -1,3 +1,9 @@
|
||||
2009-06-24 Jonathan Wakely <jwakely.gcc@gmail.com>
|
||||
|
||||
PR libstdc++/40297
|
||||
* include/bits/atomic_0.h: Reverse debug assertions.
|
||||
* include/bits/atomic_2.h: Likewise.
|
||||
|
||||
2009-06-23 DJ Delorie <dj@redhat.com>
|
||||
|
||||
Add MeP port.
|
||||
|
@ -119,17 +119,17 @@ namespace __atomic0
|
||||
void
|
||||
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_acquire);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m == memory_order_consume);
|
||||
__glibcxx_assert(__m != memory_order_acquire);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_consume);
|
||||
_ATOMIC_STORE_(this, __v, __m);
|
||||
}
|
||||
|
||||
void*
|
||||
load(memory_order __m = memory_order_seq_cst) const volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_release);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_release);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
return _ATOMIC_LOAD_(this, __m);
|
||||
}
|
||||
|
||||
@ -141,8 +141,8 @@ namespace __atomic0
|
||||
compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
|
||||
memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
|
||||
}
|
||||
@ -159,8 +159,8 @@ namespace __atomic0
|
||||
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
|
||||
memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
|
||||
}
|
||||
@ -310,17 +310,17 @@ namespace __atomic0
|
||||
store(__integral_type __i,
|
||||
memory_order __m = memory_order_seq_cst) volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_acquire);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m == memory_order_consume);
|
||||
__glibcxx_assert(__m != memory_order_acquire);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_consume);
|
||||
_ATOMIC_STORE_(this, __i, __m);
|
||||
}
|
||||
|
||||
__integral_type
|
||||
load(memory_order __m = memory_order_seq_cst) const volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_release);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_release);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
return _ATOMIC_LOAD_(this, __m);
|
||||
}
|
||||
|
||||
@ -333,8 +333,8 @@ namespace __atomic0
|
||||
compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
|
||||
memory_order __m1, memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
|
||||
}
|
||||
@ -351,8 +351,8 @@ namespace __atomic0
|
||||
compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
|
||||
memory_order __m1, memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
|
||||
}
|
||||
|
@ -65,6 +65,10 @@ namespace __atomic2
|
||||
void
|
||||
clear(memory_order __m = memory_order_seq_cst) volatile
|
||||
{
|
||||
__glibcxx_assert(__m != memory_order_consume);
|
||||
__glibcxx_assert(__m != memory_order_acquire);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
|
||||
__sync_lock_release(&_M_i);
|
||||
if (__m != memory_order_acquire && __m != memory_order_acq_rel)
|
||||
__sync_synchronize();
|
||||
@ -93,9 +97,9 @@ namespace __atomic2
|
||||
void
|
||||
store(void* __v, memory_order __m = memory_order_seq_cst) volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_acquire);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m == memory_order_consume);
|
||||
__glibcxx_assert(__m != memory_order_acquire);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_consume);
|
||||
|
||||
if (__m == memory_order_relaxed)
|
||||
_M_i = __v;
|
||||
@ -111,8 +115,8 @@ namespace __atomic2
|
||||
void*
|
||||
load(memory_order __m = memory_order_seq_cst) const volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_release);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_release);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
|
||||
__sync_synchronize();
|
||||
void* __ret = _M_i;
|
||||
@ -144,8 +148,8 @@ namespace __atomic2
|
||||
compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
|
||||
memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
|
||||
void* __v1o = __v1;
|
||||
@ -284,9 +288,9 @@ namespace __atomic2
|
||||
store(__integral_type __i,
|
||||
memory_order __m = memory_order_seq_cst) volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_acquire);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m == memory_order_consume);
|
||||
__glibcxx_assert(__m != memory_order_acquire);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_consume);
|
||||
|
||||
if (__m == memory_order_relaxed)
|
||||
_M_i = __i;
|
||||
@ -302,8 +306,8 @@ namespace __atomic2
|
||||
__integral_type
|
||||
load(memory_order __m = memory_order_seq_cst) const volatile
|
||||
{
|
||||
__glibcxx_assert(__m == memory_order_release);
|
||||
__glibcxx_assert(__m == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m != memory_order_release);
|
||||
__glibcxx_assert(__m != memory_order_acq_rel);
|
||||
|
||||
__sync_synchronize();
|
||||
__integral_type __ret = _M_i;
|
||||
@ -336,8 +340,8 @@ namespace __atomic2
|
||||
compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
|
||||
memory_order __m1, memory_order __m2) volatile
|
||||
{
|
||||
__glibcxx_assert(__m2 == memory_order_release);
|
||||
__glibcxx_assert(__m2 == memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 != memory_order_release);
|
||||
__glibcxx_assert(__m2 != memory_order_acq_rel);
|
||||
__glibcxx_assert(__m2 <= __m1);
|
||||
|
||||
__integral_type __i1o = __i1;
|
||||
|
Loading…
Reference in New Issue
Block a user