mirror of
https://gitee.com/openharmony/kernel_linux
synced 2025-01-09 09:22:58 +00:00
[PATCH] s390: atomic primitives
Hugh Dickins <hugh@veritas.com> Fix the broken atomic_cmpxchg primitive. Add atomic_sub_and_test, atomic64_sub_return, atomic64_sub_and_test, atomic64_cmpxchg, atomic64_add_unless and atomic64_inc_not_zero. Replace old style atomic_compare_and_swap by atomic_cmpxchg. Shorten the whole header by defining most primitives with the two inline functions atomic_add_return and atomic_sub_return. In addition this patch contains the s390 related fixes of Hugh's "mm: fill arch atomic64 gaps" patch. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
8d93c700a4
commit
973bd99375
@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
|
||||
pfault_fini();
|
||||
#endif
|
||||
|
||||
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
|
||||
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
|
||||
signal_processor(smp_processor_id(), sigp_stop);
|
||||
|
||||
/* Wait for all other cpus to enter stopped state */
|
||||
|
@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused)
|
||||
int cpu;
|
||||
static atomic_t cpuid = ATOMIC_INIT(-1);
|
||||
|
||||
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
|
||||
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
|
||||
signal_processor(smp_processor_id(), sigp_stop);
|
||||
|
||||
/* Wait for all other cpus to enter stopped state */
|
||||
@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused)
|
||||
{
|
||||
static atomic_t cpuid = ATOMIC_INIT(-1);
|
||||
|
||||
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
|
||||
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
|
||||
smp_send_stop();
|
||||
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
|
||||
cpcmd(vmhalt_cmd, NULL, 0, NULL);
|
||||
@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused)
|
||||
{
|
||||
static atomic_t cpuid = ATOMIC_INIT(-1);
|
||||
|
||||
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
|
||||
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
|
||||
smp_send_stop();
|
||||
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
|
||||
cpcmd(vmpoff_cmd, NULL, 0, NULL);
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Bugreports.to..: <Linux390@de.ibm.com>
|
||||
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
|
||||
*
|
||||
* $Revision: 1.167 $
|
||||
* $Revision: 1.169 $
|
||||
*/
|
||||
|
||||
#include <linux/config.h>
|
||||
@ -1323,7 +1323,7 @@ void
|
||||
dasd_schedule_bh(struct dasd_device * device)
|
||||
{
|
||||
/* Protect against rescheduling. */
|
||||
if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
|
||||
if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
|
||||
return;
|
||||
dasd_get_device(device);
|
||||
tasklet_hi_schedule(&device->tasklet);
|
||||
|
@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
|
||||
psw_t quiesce_psw;
|
||||
int cpu;
|
||||
|
||||
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
|
||||
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
|
||||
signal_processor(smp_processor_id(), sigp_stop);
|
||||
/* Wait for all other cpus to enter stopped state */
|
||||
for_each_online_cpu(cpu) {
|
||||
|
@ -65,7 +65,7 @@ static void
|
||||
tapeblock_trigger_requeue(struct tape_device *device)
|
||||
{
|
||||
/* Protect against rescheduling. */
|
||||
if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled))
|
||||
if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
|
||||
return;
|
||||
schedule_work(&device->blk_data.requeue_task);
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* drivers/s390/cio/ccwgroup.c
|
||||
* bus driver for ccwgroup
|
||||
* $Revision: 1.32 $
|
||||
* $Revision: 1.33 $
|
||||
*
|
||||
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
|
||||
* IBM Corporation
|
||||
@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
|
||||
struct ccwgroup_driver *gdrv;
|
||||
int ret;
|
||||
|
||||
if (atomic_compare_and_swap(0, 1, &gdev->onoff))
|
||||
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
if (gdev->state == CCWGROUP_ONLINE) {
|
||||
ret = 0;
|
||||
@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
|
||||
struct ccwgroup_driver *gdrv;
|
||||
int ret;
|
||||
|
||||
if (atomic_compare_and_swap(0, 1, &gdev->onoff))
|
||||
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
if (gdev->state == CCWGROUP_OFFLINE) {
|
||||
ret = 0;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* drivers/s390/cio/device.c
|
||||
* bus driver for ccw devices
|
||||
* $Revision: 1.131 $
|
||||
* $Revision: 1.137 $
|
||||
*
|
||||
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
|
||||
* IBM Corporation
|
||||
@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
|
||||
int i, force, ret;
|
||||
char *tmp;
|
||||
|
||||
if (atomic_compare_and_swap(0, 1, &cdev->private->onoff))
|
||||
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
|
||||
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $
|
||||
* $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
|
||||
*
|
||||
* IUCV network driver
|
||||
*
|
||||
@ -29,7 +29,7 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $
|
||||
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
|
||||
*
|
||||
*/
|
||||
|
||||
@ -355,7 +355,7 @@ do { \
|
||||
static void
|
||||
iucv_banner(void)
|
||||
{
|
||||
char vbuf[] = "$Revision: 1.45 $";
|
||||
char vbuf[] = "$Revision: 1.47 $";
|
||||
char *version = vbuf;
|
||||
|
||||
if ((version = strchr(version, ':'))) {
|
||||
@ -477,7 +477,7 @@ grab_param(void)
|
||||
ptr++;
|
||||
if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
|
||||
ptr = iucv_param_pool;
|
||||
} while (atomic_compare_and_swap(0, 1, &ptr->in_use));
|
||||
} while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
|
||||
hint = ptr - iucv_param_pool;
|
||||
|
||||
memset(&ptr->param, 0, sizeof(ptr->param));
|
||||
|
@ -1396,7 +1396,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
||||
channel->ccw.cda = (__u32) __pa(iob->data);
|
||||
|
||||
wait_event(card->wait_q,
|
||||
atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
|
||||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(setup, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
rc = ccw_device_start(channel->ccwdev,
|
||||
@ -1463,7 +1463,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
|
||||
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
|
||||
|
||||
wait_event(card->wait_q,
|
||||
atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
|
||||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(setup, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
rc = ccw_device_start(channel->ccwdev,
|
||||
@ -1616,7 +1616,7 @@ qeth_issue_next_read(struct qeth_card *card)
|
||||
}
|
||||
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
|
||||
wait_event(card->wait_q,
|
||||
atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
|
||||
atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(trace, 6, "noirqpnd");
|
||||
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
|
||||
(addr_t) iob, 0, 0);
|
||||
@ -1882,7 +1882,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
|
||||
wait_event(card->wait_q,
|
||||
atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
|
||||
atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
if (IS_IPA(iob->data))
|
||||
timer.expires = jiffies + QETH_IPA_TIMEOUT;
|
||||
@ -1924,7 +1924,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
|
||||
QETH_DBF_TEXT(trace, 5, "osndctrd");
|
||||
|
||||
wait_event(card->wait_q,
|
||||
atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
|
||||
atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
QETH_DBF_TEXT(trace, 6, "osnoirqp");
|
||||
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
@ -4236,9 +4236,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
QETH_DBF_TEXT(trace, 6, "dosndpfa");
|
||||
|
||||
/* spin until we get the queue ... */
|
||||
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
|
||||
QETH_OUT_Q_LOCKED,
|
||||
&queue->state));
|
||||
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
|
||||
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
|
||||
/* ... now we've got the queue */
|
||||
index = queue->next_buf_to_fill;
|
||||
buffer = &queue->bufs[queue->next_buf_to_fill];
|
||||
@ -4292,9 +4291,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
QETH_DBF_TEXT(trace, 6, "dosndpkt");
|
||||
|
||||
/* spin until we get the queue ... */
|
||||
while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
|
||||
QETH_OUT_Q_LOCKED,
|
||||
&queue->state));
|
||||
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
|
||||
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
|
||||
start_index = queue->next_buf_to_fill;
|
||||
buffer = &queue->bufs[queue->next_buf_to_fill];
|
||||
/*
|
||||
|
@ -5,7 +5,7 @@
|
||||
* include/asm-s390/atomic.h
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Denis Joseph Barrow,
|
||||
* Arnd Bergmann (arndb@de.ibm.com)
|
||||
@ -45,59 +45,57 @@ typedef struct {
|
||||
#define atomic_read(v) ((v)->counter)
|
||||
#define atomic_set(v,i) (((v)->counter) = (i))
|
||||
|
||||
static __inline__ void atomic_add(int i, atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, i, "ar");
|
||||
}
|
||||
static __inline__ int atomic_add_return(int i, atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, i, "ar");
|
||||
}
|
||||
static __inline__ int atomic_add_negative(int i, atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, i, "ar") < 0;
|
||||
}
|
||||
static __inline__ void atomic_sub(int i, atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, i, "sr");
|
||||
}
|
||||
#define atomic_add(_i, _v) atomic_add_return(_i, _v)
|
||||
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
|
||||
#define atomic_inc(_v) atomic_add_return(1, _v)
|
||||
#define atomic_inc_return(_v) atomic_add_return(1, _v)
|
||||
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
|
||||
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, i, "sr");
|
||||
}
|
||||
static __inline__ void atomic_inc(volatile atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, 1, "ar");
|
||||
}
|
||||
static __inline__ int atomic_inc_return(volatile atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, 1, "ar");
|
||||
}
|
||||
#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
|
||||
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
|
||||
#define atomic_dec(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
|
||||
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
|
||||
|
||||
static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, 1, "ar") == 0;
|
||||
}
|
||||
static __inline__ void atomic_dec(volatile atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, 1, "sr");
|
||||
}
|
||||
static __inline__ int atomic_dec_return(volatile atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, 1, "sr");
|
||||
}
|
||||
static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
|
||||
{
|
||||
return __CS_LOOP(v, 1, "sr") == 0;
|
||||
}
|
||||
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, ~mask, "nr");
|
||||
}
|
||||
|
||||
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
|
||||
{
|
||||
__CS_LOOP(v, mask, "or");
|
||||
}
|
||||
|
||||
static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
__asm__ __volatile__(" cs %0,%3,0(%2)\n"
|
||||
: "+d" (old), "=m" (v->counter)
|
||||
: "a" (v), "d" (new), "m" (v->counter)
|
||||
: "cc", "memory" );
|
||||
return old;
|
||||
}
|
||||
|
||||
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
|
||||
c = atomic_read(v);
|
||||
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
|
||||
c = old;
|
||||
return c != u;
|
||||
}
|
||||
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#undef __CS_LOOP
|
||||
|
||||
#ifdef __s390x__
|
||||
@ -123,92 +121,61 @@ typedef struct {
|
||||
#define atomic64_read(v) ((v)->counter)
|
||||
#define atomic64_set(v,i) (((v)->counter) = (i))
|
||||
|
||||
static __inline__ void atomic64_add(long long i, atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, i, "agr");
|
||||
}
|
||||
static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, i, "agr");
|
||||
}
|
||||
static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v)
|
||||
#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
|
||||
#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
|
||||
#define atomic64_inc(_v) atomic64_add_return(1, _v)
|
||||
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
|
||||
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
|
||||
|
||||
static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, i, "agr") < 0;
|
||||
}
|
||||
static __inline__ void atomic64_sub(long long i, atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, i, "sgr");
|
||||
}
|
||||
static __inline__ void atomic64_inc(volatile atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, 1, "agr");
|
||||
}
|
||||
static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, 1, "agr");
|
||||
}
|
||||
static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, 1, "agr") == 0;
|
||||
}
|
||||
static __inline__ void atomic64_dec(volatile atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, 1, "sgr");
|
||||
}
|
||||
static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, 1, "sgr");
|
||||
}
|
||||
static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
|
||||
{
|
||||
return __CSG_LOOP(v, 1, "sgr") == 0;
|
||||
return __CSG_LOOP(v, i, "sgr");
|
||||
}
|
||||
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
|
||||
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
|
||||
#define atomic64_dec(_v) atomic64_sub_return(1, _v)
|
||||
#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
|
||||
#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
|
||||
|
||||
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, ~mask, "ngr");
|
||||
}
|
||||
|
||||
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
|
||||
{
|
||||
__CSG_LOOP(v, mask, "ogr");
|
||||
}
|
||||
|
||||
#undef __CSG_LOOP
|
||||
#endif
|
||||
|
||||
/*
|
||||
returns 0 if expected_oldval==value in *v ( swap was successful )
|
||||
returns 1 if unsuccessful.
|
||||
|
||||
This is non-portable, use bitops or spinlocks instead!
|
||||
*/
|
||||
static __inline__ int
|
||||
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
|
||||
static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
|
||||
long long old, long long new)
|
||||
{
|
||||
int retval;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0,%3\n"
|
||||
" cs %0,%4,0(%2)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"0:"
|
||||
: "=&d" (retval), "=m" (v->counter)
|
||||
: "a" (v), "d" (expected_oldval) , "d" (new_val),
|
||||
"m" (v->counter) : "cc", "memory" );
|
||||
return retval;
|
||||
__asm__ __volatile__(" csg %0,%3,0(%2)\n"
|
||||
: "+d" (old), "=m" (v->counter)
|
||||
: "a" (v), "d" (new), "m" (v->counter)
|
||||
: "cc", "memory" );
|
||||
return old;
|
||||
}
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
|
||||
static __inline__ int atomic64_add_unless(atomic64_t *v,
|
||||
long long a, long long u)
|
||||
{
|
||||
long long c, old;
|
||||
|
||||
#define atomic_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
c = atomic_read(v); \
|
||||
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
|
||||
c = old; \
|
||||
c != (u); \
|
||||
})
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
c = atomic64_read(v);
|
||||
while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
|
||||
c = old;
|
||||
return c != u;
|
||||
}
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
#undef __CSG_LOOP
|
||||
#endif
|
||||
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
|
Loading…
Reference in New Issue
Block a user