mirror of git://gcc.gnu.org/git/gcc.git
Rename some internal atomic macros to have a less generic prefix.
From-SVN: r185074
This commit is contained in:
parent
ca538e973f
commit
ef6c2c5316
|
@ -1,3 +1,15 @@
|
||||||
|
2012-03-07 Walter Lee <walt@tilera.com>
|
||||||
|
|
||||||
|
* config/tilepro/atomic.c: Rename "atomic_" prefix to
|
||||||
|
"arch_atomic_".
|
||||||
|
(atomic_xor): Rename and move definition to
|
||||||
|
config/tilepro/atomic.h.
|
||||||
|
(atomic_nand): Ditto.
|
||||||
|
* config/tilepro/atomic.h: Rename "atomic_" prefix to
|
||||||
|
"arch_atomic_".
|
||||||
|
(arch_atomic_xor): Move from config/tilepro/atomic.c.
|
||||||
|
(arch_atomic_nand): Ditto.
|
||||||
|
|
||||||
2012-03-07 Georg-Johann Lay <avr@gjlay.de>
|
2012-03-07 Georg-Johann Lay <avr@gjlay.de>
|
||||||
|
|
||||||
PR target/52507
|
PR target/52507
|
||||||
|
|
|
@ -63,18 +63,12 @@ post_atomic_barrier (int model)
|
||||||
|
|
||||||
#define __unused __attribute__((unused))
|
#define __unused __attribute__((unused))
|
||||||
|
|
||||||
/* Provide additional methods not implemented by atomic.h. */
|
|
||||||
#define atomic_xor(mem, mask) \
|
|
||||||
__atomic_update_cmpxchg(mem, mask, __old ^ __value)
|
|
||||||
#define atomic_nand(mem, mask) \
|
|
||||||
__atomic_update_cmpxchg(mem, mask, ~(__old & __value))
|
|
||||||
|
|
||||||
#define __atomic_fetch_and_do(type, size, opname) \
|
#define __atomic_fetch_and_do(type, size, opname) \
|
||||||
type \
|
type \
|
||||||
__atomic_fetch_##opname##_##size(type* p, type i, int model) \
|
__atomic_fetch_##opname##_##size(type* p, type i, int model) \
|
||||||
{ \
|
{ \
|
||||||
pre_atomic_barrier(model); \
|
pre_atomic_barrier(model); \
|
||||||
type rv = atomic_##opname(p, i); \
|
type rv = arch_atomic_##opname(p, i); \
|
||||||
post_atomic_barrier(model); \
|
post_atomic_barrier(model); \
|
||||||
return rv; \
|
return rv; \
|
||||||
}
|
}
|
||||||
|
@ -96,7 +90,7 @@ type \
|
||||||
__atomic_##opname##_fetch_##size(type* p, type i, int model) \
|
__atomic_##opname##_fetch_##size(type* p, type i, int model) \
|
||||||
{ \
|
{ \
|
||||||
pre_atomic_barrier(model); \
|
pre_atomic_barrier(model); \
|
||||||
type rv = atomic_##opname(p, i) op i; \
|
type rv = arch_atomic_##opname(p, i) op i; \
|
||||||
post_atomic_barrier(model); \
|
post_atomic_barrier(model); \
|
||||||
return rv; \
|
return rv; \
|
||||||
}
|
}
|
||||||
|
@ -120,7 +114,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* oldvalp, \
|
||||||
{ \
|
{ \
|
||||||
type oldval = *oldvalp; \
|
type oldval = *oldvalp; \
|
||||||
pre_atomic_barrier(models); \
|
pre_atomic_barrier(models); \
|
||||||
type retval = atomic_val_compare_and_exchange(ptr, oldval, newval); \
|
type retval = arch_atomic_val_compare_and_exchange(ptr, oldval, newval); \
|
||||||
post_atomic_barrier(models); \
|
post_atomic_barrier(models); \
|
||||||
bool success = (retval == oldval); \
|
bool success = (retval == oldval); \
|
||||||
*oldvalp = retval; \
|
*oldvalp = retval; \
|
||||||
|
@ -131,7 +125,7 @@ type \
|
||||||
__atomic_exchange_##size(volatile type* ptr, type val, int model) \
|
__atomic_exchange_##size(volatile type* ptr, type val, int model) \
|
||||||
{ \
|
{ \
|
||||||
pre_atomic_barrier(model); \
|
pre_atomic_barrier(model); \
|
||||||
type retval = atomic_exchange(ptr, val); \
|
type retval = arch_atomic_exchange(ptr, val); \
|
||||||
post_atomic_barrier(model); \
|
post_atomic_barrier(model); \
|
||||||
return retval; \
|
return retval; \
|
||||||
}
|
}
|
||||||
|
@ -159,7 +153,7 @@ __atomic_compare_exchange_##size(volatile type* ptr, type* guess, \
|
||||||
type oldval = (oldword >> shift) & valmask; \
|
type oldval = (oldword >> shift) & valmask; \
|
||||||
if (__builtin_expect((oldval == *guess), 1)) { \
|
if (__builtin_expect((oldval == *guess), 1)) { \
|
||||||
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
|
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
|
||||||
oldword = atomic_val_compare_and_exchange(p, oldword, word); \
|
oldword = arch_atomic_val_compare_and_exchange(p, oldword, word); \
|
||||||
oldval = (oldword >> shift) & valmask; \
|
oldval = (oldword >> shift) & valmask; \
|
||||||
} \
|
} \
|
||||||
post_atomic_barrier(models); \
|
post_atomic_barrier(models); \
|
||||||
|
@ -187,7 +181,7 @@ proto \
|
||||||
oldval = (oldword >> shift) & valmask; \
|
oldval = (oldword >> shift) & valmask; \
|
||||||
val = expr; \
|
val = expr; \
|
||||||
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
|
unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
|
||||||
xword = atomic_val_compare_and_exchange(p, oldword, word); \
|
xword = arch_atomic_val_compare_and_exchange(p, oldword, word); \
|
||||||
} while (__builtin_expect(xword != oldword, 0)); \
|
} while (__builtin_expect(xword != oldword, 0)); \
|
||||||
bottom \
|
bottom \
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,8 +104,8 @@
|
||||||
|
|
||||||
/* 32-bit integer compare-and-exchange. */
|
/* 32-bit integer compare-and-exchange. */
|
||||||
static __inline __attribute__ ((always_inline))
|
static __inline __attribute__ ((always_inline))
|
||||||
int atomic_val_compare_and_exchange_4 (volatile int *mem,
|
int arch_atomic_val_compare_and_exchange_4 (volatile int *mem,
|
||||||
int oldval, int newval)
|
int oldval, int newval)
|
||||||
{
|
{
|
||||||
#ifdef __tilegx__
|
#ifdef __tilegx__
|
||||||
__insn_mtspr (SPR_CMPEXCH_VALUE, oldval);
|
__insn_mtspr (SPR_CMPEXCH_VALUE, oldval);
|
||||||
|
@ -123,9 +123,9 @@ static __inline __attribute__ ((always_inline))
|
||||||
|
|
||||||
/* 64-bit integer compare-and-exchange. */
|
/* 64-bit integer compare-and-exchange. */
|
||||||
static __inline __attribute__ ((always_inline))
|
static __inline __attribute__ ((always_inline))
|
||||||
int64_t atomic_val_compare_and_exchange_8 (volatile int64_t * mem,
|
int64_t arch_atomic_val_compare_and_exchange_8 (volatile int64_t * mem,
|
||||||
int64_t oldval,
|
int64_t oldval,
|
||||||
int64_t newval)
|
int64_t newval)
|
||||||
{
|
{
|
||||||
#ifdef __tilegx__
|
#ifdef __tilegx__
|
||||||
__insn_mtspr (SPR_CMPEXCH_VALUE, oldval);
|
__insn_mtspr (SPR_CMPEXCH_VALUE, oldval);
|
||||||
|
@ -146,41 +146,41 @@ static __inline __attribute__ ((always_inline))
|
||||||
|
|
||||||
/* This non-existent symbol is called for sizes other than "4" and "8",
|
/* This non-existent symbol is called for sizes other than "4" and "8",
|
||||||
indicating a bug in the caller. */
|
indicating a bug in the caller. */
|
||||||
extern int __atomic_error_bad_argument_size (void)
|
extern int __arch_atomic_error_bad_argument_size (void)
|
||||||
__attribute__ ((warning ("sizeof atomic argument not 4 or 8")));
|
__attribute__ ((warning ("sizeof atomic argument not 4 or 8")));
|
||||||
|
|
||||||
|
|
||||||
#define atomic_val_compare_and_exchange(mem, o, n) \
|
#define arch_atomic_val_compare_and_exchange(mem, o, n) \
|
||||||
({ \
|
({ \
|
||||||
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
|
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
|
||||||
((sizeof(*(mem)) == 8) ? \
|
((sizeof(*(mem)) == 8) ? \
|
||||||
atomic_val_compare_and_exchange_8( \
|
arch_atomic_val_compare_and_exchange_8( \
|
||||||
(volatile int64_t*)(mem), (__typeof((o)-(o)))(o), \
|
(volatile int64_t*)(mem), (__typeof((o)-(o)))(o), \
|
||||||
(__typeof((n)-(n)))(n)) : \
|
(__typeof((n)-(n)))(n)) : \
|
||||||
(sizeof(*(mem)) == 4) ? \
|
(sizeof(*(mem)) == 4) ? \
|
||||||
atomic_val_compare_and_exchange_4( \
|
arch_atomic_val_compare_and_exchange_4( \
|
||||||
(volatile int*)(mem), (__typeof((o)-(o)))(o), \
|
(volatile int*)(mem), (__typeof((o)-(o)))(o), \
|
||||||
(__typeof((n)-(n)))(n)) : \
|
(__typeof((n)-(n)))(n)) : \
|
||||||
__atomic_error_bad_argument_size()); \
|
__arch_atomic_error_bad_argument_size()); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define atomic_bool_compare_and_exchange(mem, o, n) \
|
#define arch_atomic_bool_compare_and_exchange(mem, o, n) \
|
||||||
({ \
|
({ \
|
||||||
__typeof(o) __o = (o); \
|
__typeof(o) __o = (o); \
|
||||||
__builtin_expect( \
|
__builtin_expect( \
|
||||||
__o == atomic_val_compare_and_exchange((mem), __o, (n)), 1); \
|
__o == arch_atomic_val_compare_and_exchange((mem), __o, (n)), 1); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
/* Loop with compare_and_exchange until we guess the correct value.
|
/* Loop with compare_and_exchange until we guess the correct value.
|
||||||
Normally "expr" will be an expression using __old and __value. */
|
Normally "expr" will be an expression using __old and __value. */
|
||||||
#define __atomic_update_cmpxchg(mem, value, expr) \
|
#define __arch_atomic_update_cmpxchg(mem, value, expr) \
|
||||||
({ \
|
({ \
|
||||||
__typeof(value) __value = (value); \
|
__typeof(value) __value = (value); \
|
||||||
__typeof(*(mem)) *__mem = (mem), __old = *__mem, __guess; \
|
__typeof(*(mem)) *__mem = (mem), __old = *__mem, __guess; \
|
||||||
do { \
|
do { \
|
||||||
__guess = __old; \
|
__guess = __old; \
|
||||||
__old = atomic_val_compare_and_exchange(__mem, __old, (expr)); \
|
__old = arch_atomic_val_compare_and_exchange(__mem, __old, (expr)); \
|
||||||
} while (__builtin_expect(__old != __guess, 0)); \
|
} while (__builtin_expect(__old != __guess, 0)); \
|
||||||
__old; \
|
__old; \
|
||||||
})
|
})
|
||||||
|
@ -189,14 +189,14 @@ extern int __atomic_error_bad_argument_size (void)
|
||||||
|
|
||||||
/* Generic atomic op with 8- or 4-byte variant.
|
/* Generic atomic op with 8- or 4-byte variant.
|
||||||
The _mask, _addend, and _expr arguments are ignored on tilegx. */
|
The _mask, _addend, and _expr arguments are ignored on tilegx. */
|
||||||
#define __atomic_update(mem, value, op, _mask, _addend, _expr) \
|
#define __arch_atomic_update(mem, value, op, _mask, _addend, _expr) \
|
||||||
({ \
|
({ \
|
||||||
((__typeof(*(mem))) \
|
((__typeof(*(mem))) \
|
||||||
((sizeof(*(mem)) == 8) ? (__typeof(*(mem)-*(mem)))__insn_##op( \
|
((sizeof(*(mem)) == 8) ? (__typeof(*(mem)-*(mem)))__insn_##op( \
|
||||||
(void *)(mem), (int64_t)(__typeof((value)-(value)))(value)) : \
|
(void *)(mem), (int64_t)(__typeof((value)-(value)))(value)) : \
|
||||||
(sizeof(*(mem)) == 4) ? (int)__insn_##op##4( \
|
(sizeof(*(mem)) == 4) ? (int)__insn_##op##4( \
|
||||||
(void *)(mem), (int32_t)(__typeof((value)-(value)))(value)) : \
|
(void *)(mem), (int32_t)(__typeof((value)-(value)))(value)) : \
|
||||||
__atomic_error_bad_argument_size())); \
|
__arch_atomic_error_bad_argument_size())); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -211,7 +211,7 @@ extern int __atomic_error_bad_argument_size (void)
|
||||||
Only 32-bit support is provided. */
|
Only 32-bit support is provided. */
|
||||||
static __inline __attribute__ ((always_inline))
|
static __inline __attribute__ ((always_inline))
|
||||||
int
|
int
|
||||||
__atomic_update_4 (volatile int *mem, int mask, int addend)
|
__arch_atomic_update_4 (volatile int *mem, int mask, int addend)
|
||||||
{
|
{
|
||||||
int result;
|
int result;
|
||||||
__asm__ __volatile__ ("swint1":"=R00" (result),
|
__asm__ __volatile__ ("swint1":"=R00" (result),
|
||||||
|
@ -224,48 +224,55 @@ static __inline __attribute__ ((always_inline))
|
||||||
|
|
||||||
/* Generic atomic op with 8- or 4-byte variant.
|
/* Generic atomic op with 8- or 4-byte variant.
|
||||||
The _op argument is ignored on tilepro. */
|
The _op argument is ignored on tilepro. */
|
||||||
#define __atomic_update(mem, value, _op, mask, addend, expr) \
|
#define __arch_atomic_update(mem, value, _op, mask, addend, expr) \
|
||||||
({ \
|
({ \
|
||||||
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
|
(__typeof(*(mem)))(__typeof(*(mem)-*(mem))) \
|
||||||
((sizeof(*(mem)) == 8) ? \
|
((sizeof(*(mem)) == 8) ? \
|
||||||
__atomic_update_cmpxchg((mem), (value), (expr)) : \
|
__arch_atomic_update_cmpxchg((mem), (value), (expr)) : \
|
||||||
(sizeof(*(mem)) == 4) ? \
|
(sizeof(*(mem)) == 4) ? \
|
||||||
__atomic_update_4((volatile int*)(mem), (__typeof((mask)-(mask)))(mask), \
|
__arch_atomic_update_4((volatile int*)(mem), \
|
||||||
(__typeof((addend)-(addend)))(addend)) : \
|
(__typeof((mask)-(mask)))(mask), \
|
||||||
__atomic_error_bad_argument_size()); \
|
(__typeof((addend)-(addend)))(addend)) : \
|
||||||
|
__arch_atomic_error_bad_argument_size()); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#endif /* __tilegx__ */
|
#endif /* __tilegx__ */
|
||||||
|
|
||||||
|
|
||||||
#define atomic_exchange(mem, newvalue) \
|
#define arch_atomic_exchange(mem, newvalue) \
|
||||||
__atomic_update(mem, newvalue, exch, 0, newvalue, __value)
|
__arch_atomic_update(mem, newvalue, exch, 0, newvalue, __value)
|
||||||
|
|
||||||
#define atomic_add(mem, value) \
|
#define arch_atomic_add(mem, value) \
|
||||||
__atomic_update(mem, value, fetchadd, -1, value, __old + __value)
|
__arch_atomic_update(mem, value, fetchadd, -1, value, __old + __value)
|
||||||
|
|
||||||
#define atomic_sub(mem, value) atomic_add((mem), -(value))
|
#define arch_atomic_sub(mem, value) arch_atomic_add((mem), -(value))
|
||||||
|
|
||||||
#define atomic_increment(mem) atomic_add((mem), 1)
|
#define arch_atomic_increment(mem) arch_atomic_add((mem), 1)
|
||||||
|
|
||||||
#define atomic_decrement(mem) atomic_add((mem), -1)
|
#define arch_atomic_decrement(mem) arch_atomic_add((mem), -1)
|
||||||
|
|
||||||
#define atomic_and(mem, mask) \
|
#define arch_atomic_and(mem, mask) \
|
||||||
__atomic_update(mem, mask, fetchand, mask, 0, __old & __value)
|
__arch_atomic_update(mem, mask, fetchand, mask, 0, __old & __value)
|
||||||
|
|
||||||
#define atomic_or(mem, mask) \
|
#define arch_atomic_or(mem, mask) \
|
||||||
__atomic_update(mem, mask, fetchor, ~mask, mask, __old | __value)
|
__arch_atomic_update(mem, mask, fetchor, ~mask, mask, __old | __value)
|
||||||
|
|
||||||
#define atomic_bit_set(mem, bit) \
|
#define arch_atomic_xor(mem, mask) \
|
||||||
|
__arch_atomic_update_cmpxchg(mem, mask, __old ^ __value)
|
||||||
|
|
||||||
|
#define arch_atomic_nand(mem, mask) \
|
||||||
|
__arch_atomic_update_cmpxchg(mem, mask, ~(__old & __value))
|
||||||
|
|
||||||
|
#define arch_atomic_bit_set(mem, bit) \
|
||||||
({ \
|
({ \
|
||||||
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
|
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
|
||||||
__mask & atomic_or((mem), __mask); \
|
__mask & arch_atomic_or((mem), __mask); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define atomic_bit_clear(mem, bit) \
|
#define arch_atomic_bit_clear(mem, bit) \
|
||||||
({ \
|
({ \
|
||||||
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
|
__typeof(*(mem)) __mask = (__typeof(*(mem)))1 << (bit); \
|
||||||
__mask & atomic_and((mem), ~__mask); \
|
__mask & arch_atomic_and((mem), ~__mask); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#ifdef __tilegx__
|
#ifdef __tilegx__
|
||||||
|
@ -275,9 +282,9 @@ static __inline __attribute__ ((always_inline))
|
||||||
This accessor is provided for compatibility with TILEPro, which
|
This accessor is provided for compatibility with TILEPro, which
|
||||||
required an explicit atomic operation for stores that needed
|
required an explicit atomic operation for stores that needed
|
||||||
to be atomic with respect to other atomic methods in this header. */
|
to be atomic with respect to other atomic methods in this header. */
|
||||||
#define atomic_write(mem, value) ((void) (*(mem) = (value)))
|
#define arch_atomic_write(mem, value) ((void) (*(mem) = (value)))
|
||||||
#else
|
#else
|
||||||
#define atomic_write(mem, value) \
|
#define arch_atomic_write(mem, value) \
|
||||||
do { \
|
do { \
|
||||||
__typeof(mem) __aw_mem = (mem); \
|
__typeof(mem) __aw_mem = (mem); \
|
||||||
__typeof(value) __aw_val = (value); \
|
__typeof(value) __aw_val = (value); \
|
||||||
|
@ -285,26 +292,26 @@ static __inline __attribute__ ((always_inline))
|
||||||
__aw_intval = (__typeof((value) - (value)))__aw_val; \
|
__aw_intval = (__typeof((value) - (value)))__aw_val; \
|
||||||
switch (sizeof(*__aw_mem)) { \
|
switch (sizeof(*__aw_mem)) { \
|
||||||
case 8: \
|
case 8: \
|
||||||
__atomic_update_cmpxchg(__aw_mem, __aw_val, __value); \
|
__arch_atomic_update_cmpxchg(__aw_mem, __aw_val, __value); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
__atomic_update_4((int *)__aw_mem, 0, __aw_intval); \
|
__arch_atomic_update_4((int *)__aw_mem, 0, __aw_intval); \
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
__aw_off = 8 * ((long)__aw_mem & 0x2); \
|
__aw_off = 8 * ((long)__aw_mem & 0x2); \
|
||||||
__aw_mask = 0xffffU << __aw_off; \
|
__aw_mask = 0xffffU << __aw_off; \
|
||||||
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x2); \
|
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x2); \
|
||||||
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
|
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
|
||||||
__atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
|
__arch_atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
|
||||||
(__old & ~__aw_mask) | __value); \
|
(__old & ~__aw_mask) | __value); \
|
||||||
break; \
|
break; \
|
||||||
case 1: \
|
case 1: \
|
||||||
__aw_off = 8 * ((long)__aw_mem & 0x3); \
|
__aw_off = 8 * ((long)__aw_mem & 0x3); \
|
||||||
__aw_mask = 0xffU << __aw_off; \
|
__aw_mask = 0xffU << __aw_off; \
|
||||||
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x3); \
|
__aw_mem32 = (unsigned int *)((long)__aw_mem & ~0x3); \
|
||||||
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
|
__aw_val32 = (__aw_intval << __aw_off) & __aw_mask; \
|
||||||
__atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
|
__arch_atomic_update_cmpxchg(__aw_mem32, __aw_val32, \
|
||||||
(__old & ~__aw_mask) | __value); \
|
(__old & ~__aw_mask) | __value); \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
@ -315,15 +322,15 @@ static __inline __attribute__ ((always_inline))
|
||||||
This macro prevents loads or stores from being moved by the compiler
|
This macro prevents loads or stores from being moved by the compiler
|
||||||
across the macro. Any loaded value that was loaded before this
|
across the macro. Any loaded value that was loaded before this
|
||||||
macro must then be reloaded by the compiler. */
|
macro must then be reloaded by the compiler. */
|
||||||
#define atomic_compiler_barrier() __asm__ __volatile__("" ::: "memory")
|
#define arch_atomic_compiler_barrier() __asm__ __volatile__("" ::: "memory")
|
||||||
|
|
||||||
/* Full memory barrier.
|
/* Full memory barrier.
|
||||||
|
|
||||||
This macro has the semantics of atomic_compiler_barrer(), but also
|
This macro has the semantics of arch_atomic_compiler_barrer(), but also
|
||||||
ensures that previous stores are visible to other cores, and that
|
ensures that previous stores are visible to other cores, and that
|
||||||
all previous loaded values have been placed into their target
|
all previous loaded values have been placed into their target
|
||||||
register on this core. */
|
register on this core. */
|
||||||
#define atomic_full_barrier() __insn_mf()
|
#define arch_atomic_full_barrier() __insn_mf()
|
||||||
|
|
||||||
/* Read memory barrier.
|
/* Read memory barrier.
|
||||||
|
|
||||||
|
@ -335,9 +342,9 @@ static __inline __attribute__ ((always_inline))
|
||||||
On current TILE chips a read barrier is implemented as a full barrier,
|
On current TILE chips a read barrier is implemented as a full barrier,
|
||||||
but this may not be true in later versions of the architecture.
|
but this may not be true in later versions of the architecture.
|
||||||
|
|
||||||
See also atomic_acquire_barrier() for the appropriate idiom to use
|
See also arch_atomic_acquire_barrier() for the appropriate idiom to use
|
||||||
to ensure no reads are lifted above an atomic lock instruction. */
|
to ensure no reads are lifted above an atomic lock instruction. */
|
||||||
#define atomic_read_barrier() atomic_full_barrier()
|
#define arch_atomic_read_barrier() arch_atomic_full_barrier()
|
||||||
|
|
||||||
/* Write memory barrier.
|
/* Write memory barrier.
|
||||||
|
|
||||||
|
@ -349,9 +356,9 @@ static __inline __attribute__ ((always_inline))
|
||||||
On current TILE chips a write barrier is implemented as a full barrier,
|
On current TILE chips a write barrier is implemented as a full barrier,
|
||||||
but this may not be true in later versions of the architecture.
|
but this may not be true in later versions of the architecture.
|
||||||
|
|
||||||
See also atomic_release_barrier() for the appropriate idiom to use
|
See also arch_atomic_release_barrier() for the appropriate idiom to use
|
||||||
to ensure all writes are complete prior to an atomic unlock instruction. */
|
to ensure all writes are complete prior to an atomic unlock instruction. */
|
||||||
#define atomic_write_barrier() atomic_full_barrier()
|
#define arch_atomic_write_barrier() arch_atomic_full_barrier()
|
||||||
|
|
||||||
/* Lock acquisition barrier.
|
/* Lock acquisition barrier.
|
||||||
|
|
||||||
|
@ -367,10 +374,10 @@ static __inline __attribute__ ((always_inline))
|
||||||
This should be done after the atomic operation that actually
|
This should be done after the atomic operation that actually
|
||||||
acquires the lock, and in conjunction with a "control dependency"
|
acquires the lock, and in conjunction with a "control dependency"
|
||||||
that checks the atomic operation result to see if the lock was
|
that checks the atomic operation result to see if the lock was
|
||||||
in fact acquired. See the atomic_read_barrier() macro
|
in fact acquired. See the arch_atomic_read_barrier() macro
|
||||||
for a heavier-weight barrier to use in certain unusual constructs,
|
for a heavier-weight barrier to use in certain unusual constructs,
|
||||||
or atomic_acquire_barrier_value() if no control dependency exists. */
|
or arch_atomic_acquire_barrier_value() if no control dependency exists. */
|
||||||
#define atomic_acquire_barrier() atomic_compiler_barrier()
|
#define arch_atomic_acquire_barrier() arch_atomic_compiler_barrier()
|
||||||
|
|
||||||
/* Lock release barrier.
|
/* Lock release barrier.
|
||||||
|
|
||||||
|
@ -383,7 +390,7 @@ static __inline __attribute__ ((always_inline))
|
||||||
for locking, that is, when leaving a critical section. This should
|
for locking, that is, when leaving a critical section. This should
|
||||||
be done before the operation (such as a store of zero) that
|
be done before the operation (such as a store of zero) that
|
||||||
actually releases the lock. */
|
actually releases the lock. */
|
||||||
#define atomic_release_barrier() atomic_write_barrier()
|
#define arch_atomic_release_barrier() arch_atomic_write_barrier()
|
||||||
|
|
||||||
/* Barrier until the read of a particular value is complete.
|
/* Barrier until the read of a particular value is complete.
|
||||||
|
|
||||||
|
@ -400,7 +407,7 @@ static __inline __attribute__ ((always_inline))
|
||||||
atomic instruction, even if the value itself is not checked. This
|
atomic instruction, even if the value itself is not checked. This
|
||||||
guarantees that if the atomic instruction succeeded in taking the lock,
|
guarantees that if the atomic instruction succeeded in taking the lock,
|
||||||
the lock was held before any reads in the critical section issued. */
|
the lock was held before any reads in the critical section issued. */
|
||||||
#define atomic_acquire_barrier_value(val) \
|
#define arch_atomic_acquire_barrier_value(val) \
|
||||||
__asm__ __volatile__("move %0, %0" :: "r"(val))
|
__asm__ __volatile__("move %0, %0" :: "r"(val))
|
||||||
|
|
||||||
/* Access the given variable in memory exactly once.
|
/* Access the given variable in memory exactly once.
|
||||||
|
@ -421,8 +428,9 @@ static __inline __attribute__ ((always_inline))
|
||||||
|
|
||||||
Note that multiple uses of this macro are guaranteed to be ordered,
|
Note that multiple uses of this macro are guaranteed to be ordered,
|
||||||
i.e. the compiler will not reorder stores or loads that are wrapped
|
i.e. the compiler will not reorder stores or loads that are wrapped
|
||||||
in atomic_access_once(). */
|
in arch_atomic_access_once(). */
|
||||||
#define atomic_access_once(x) (*(volatile __typeof(x) *)&(x))
|
#define arch_atomic_access_once(x) (*(volatile __typeof(x) *)&(x))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* !_ATOMIC_H_ */
|
#endif /* !_ATOMIC_H_ */
|
||||||
|
|
Loading…
Reference in New Issue