linux-atomic.c (__kernel_cmpxchg): Reorder arguments to better match light-weight syscall argument order.

* config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to
	better match light-weight syscall argument order.
	(__kernel_cmpxchg2): Likewise.
	Adjust callers.

From-SVN: r225267
This commit is contained in:
John David Anglin 2015-07-01 17:42:20 +00:00
parent 5747290f51
commit f9a12f7b8f
2 changed files with 33 additions and 26 deletions

View File

@ -1,3 +1,10 @@
2015-07-01 John David Anglin <danglin@gcc.gnu.org>
* config/pa/linux-atomic.c (__kernel_cmpxchg): Reorder arguments to
better match light-weight syscall argument order.
(__kernel_cmpxchg2): Likewise.
Adjust callers.
2015-06-30 H.J. Lu <hongjiu.lu@intel.com> 2015-06-30 H.J. Lu <hongjiu.lu@intel.com>
* config.host: Support i[34567]86-*-elfiamcu target. * config.host: Support i[34567]86-*-elfiamcu target.

View File

@ -46,18 +46,17 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* Kernel helper for compare-and-exchange a 32-bit value. */ /* Kernel helper for compare-and-exchange a 32-bit value. */
static inline long static inline long
__kernel_cmpxchg (int oldval, int newval, int *mem) __kernel_cmpxchg (int *mem, int oldval, int newval)
{ {
register unsigned long lws_mem asm("r26") = (unsigned long) (mem); register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
register long lws_ret asm("r28");
register long lws_errno asm("r21");
register int lws_old asm("r25") = oldval; register int lws_old asm("r25") = oldval;
register int lws_new asm("r24") = newval; register int lws_new asm("r24") = newval;
register long lws_ret asm("r28");
register long lws_errno asm("r21");
asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
"ldi %5, %%r20 \n\t" "ldi %2, %%r20 \n\t"
: "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem), : "=r" (lws_ret), "=r" (lws_errno)
"=r" (lws_old), "=r" (lws_new) : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
: "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
: "r1", "r20", "r22", "r23", "r29", "r31", "memory" : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
); );
if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
@ -73,19 +72,20 @@ __kernel_cmpxchg (int oldval, int newval, int *mem)
} }
static inline long static inline long
__kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem, __kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
int val_size) int val_size)
{ {
register unsigned long lws_mem asm("r26") = (unsigned long) (mem); register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
register long lws_ret asm("r28");
register long lws_errno asm("r21");
register unsigned long lws_old asm("r25") = (unsigned long) oldval; register unsigned long lws_old asm("r25") = (unsigned long) oldval;
register unsigned long lws_new asm("r24") = (unsigned long) newval; register unsigned long lws_new asm("r24") = (unsigned long) newval;
register int lws_size asm("r23") = val_size; register int lws_size asm("r23") = val_size;
register long lws_ret asm("r28");
register long lws_errno asm("r21");
asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
"ldi %2, %%r20 \n\t" "ldi %6, %%r20 \n\t"
: "=r" (lws_ret), "=r" (lws_errno) : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
: "i" (2), "r" (lws_mem), "r" (lws_old), "r" (lws_new), "r" (lws_size) "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
: "i" (2)
: "r1", "r20", "r22", "r29", "r31", "fr4", "memory" : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
); );
if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
@ -116,7 +116,7 @@ __kernel_cmpxchg2 (const void *oldval, const void *newval, void *mem,
do { \ do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
newval = PFX_OP (tmp INF_OP val); \ newval = PFX_OP (tmp INF_OP val); \
failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \ } while (failure != 0); \
\ \
return tmp; \ return tmp; \
@ -146,7 +146,7 @@ FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0)
do { \ do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
newval = PFX_OP (tmp INF_OP val); \ newval = PFX_OP (tmp INF_OP val); \
failure = __kernel_cmpxchg2 (&tmp, &newval, ptr, INDEX); \ failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \ } while (failure != 0); \
\ \
return PFX_OP (tmp INF_OP val); \ return PFX_OP (tmp INF_OP val); \
@ -174,7 +174,7 @@ OP_AND_FETCH_2 (nand, ~, &, signed char, 1, 0)
\ \
do { \ do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \ } while (failure != 0); \
\ \
return tmp; \ return tmp; \
@ -195,7 +195,7 @@ FETCH_AND_OP_WORD (nand, ~, &)
\ \
do { \ do { \
tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \ } while (failure != 0); \
\ \
return PFX_OP (tmp INF_OP val); \ return PFX_OP (tmp INF_OP val); \
@ -225,7 +225,7 @@ typedef unsigned char bool;
if (__builtin_expect (oldval != actual_oldval, 0)) \ if (__builtin_expect (oldval != actual_oldval, 0)) \
return actual_oldval; \ return actual_oldval; \
\ \
fail = __kernel_cmpxchg2 (&actual_oldval, &newval, ptr, INDEX); \ fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
\ \
if (__builtin_expect (!fail, 1)) \ if (__builtin_expect (!fail, 1)) \
return actual_oldval; \ return actual_oldval; \
@ -236,7 +236,7 @@ typedef unsigned char bool;
__sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
TYPE newval) \ TYPE newval) \
{ \ { \
int failure = __kernel_cmpxchg2 (&oldval, &newval, ptr, INDEX); \ int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
return (failure != 0); \ return (failure != 0); \
} }
@ -255,7 +255,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
if (__builtin_expect (oldval != actual_oldval, 0)) if (__builtin_expect (oldval != actual_oldval, 0))
return actual_oldval; return actual_oldval;
fail = __kernel_cmpxchg (actual_oldval, newval, ptr); fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
if (__builtin_expect (!fail, 1)) if (__builtin_expect (!fail, 1))
return actual_oldval; return actual_oldval;
@ -265,7 +265,7 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
bool HIDDEN bool HIDDEN
__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval) __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
{ {
int failure = __kernel_cmpxchg (oldval, newval, ptr); int failure = __kernel_cmpxchg (ptr, oldval, newval);
return (failure == 0); return (failure == 0);
} }
@ -278,7 +278,7 @@ TYPE HIDDEN \
\ \
do { \ do { \
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
failure = __kernel_cmpxchg2 (&oldval, &val, ptr, INDEX); \ failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
} while (failure != 0); \ } while (failure != 0); \
\ \
return oldval; \ return oldval; \
@ -294,7 +294,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val)
do { do {
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
failure = __kernel_cmpxchg (oldval, val, ptr); failure = __kernel_cmpxchg (ptr, oldval, val);
} while (failure != 0); } while (failure != 0);
return oldval; return oldval;
@ -308,7 +308,7 @@ __sync_lock_test_and_set_4 (int *ptr, int val)
\ \
do { \ do { \
oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \ oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
failure = __kernel_cmpxchg2 (&oldval, &zero, ptr, INDEX); \ failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \
} while (failure != 0); \ } while (failure != 0); \
} }
@ -321,7 +321,7 @@ __sync_lock_release_4 (int *ptr)
int failure, oldval; int failure, oldval;
do { do {
oldval = *ptr; oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST);
failure = __kernel_cmpxchg (oldval, 0, ptr); failure = __kernel_cmpxchg (ptr, oldval, 0);
} while (failure != 0); } while (failure != 0);
} }