mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-14 05:12:17 +00:00
h8300: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and} These will replace the atomic_{set,clear}_mask functions that are available on some archs. Also rework the atomic implementation in terms of CPP macros to avoid the typical repetition -- I seem to have missed this arch the last time around when I did that. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
b0d8003ef4
commit
73ada3700b
@ -16,83 +16,54 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
ret = v->counter += i;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||
static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
h8300flags flags; \
|
||||
int ret; \
|
||||
\
|
||||
flags = arch_local_irq_save(); \
|
||||
ret = v->counter c_op i; \
|
||||
arch_local_irq_restore(flags); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
#define atomic_add(i, v) atomic_add_return(i, v)
|
||||
#define ATOMIC_OP(op, c_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
h8300flags flags; \
|
||||
\
|
||||
flags = arch_local_irq_save(); \
|
||||
v->counter c_op i; \
|
||||
arch_local_irq_restore(flags); \
|
||||
}
|
||||
|
||||
ATOMIC_OP_RETURN(add, +=)
|
||||
ATOMIC_OP_RETURN(sub, -=)
|
||||
|
||||
#define CONFIG_ARCH_HAS_ATOMIC_OR
|
||||
|
||||
ATOMIC_OP(and, &=)
|
||||
ATOMIC_OP(or, |=)
|
||||
ATOMIC_OP(xor, ^=)
|
||||
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_add(i, v) (void)atomic_add_return(i, v)
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
ret = v->counter -= i;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
#define atomic_inc_return(v) atomic_add_return(1, v)
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, v)
|
||||
|
||||
#define atomic_sub(i, v) atomic_sub_return(i, v)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
#define atomic_inc(v) (void)atomic_inc_return(v)
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
static inline int atomic_inc_return(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
v->counter++;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_inc(v) atomic_inc_return(v)
|
||||
|
||||
/*
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
static inline int atomic_dec_return(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_dec(v) atomic_dec_return(v)
|
||||
|
||||
static inline int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
h8300flags flags;
|
||||
int ret;
|
||||
|
||||
flags = arch_local_irq_save();
|
||||
--v->counter;
|
||||
ret = v->counter;
|
||||
arch_local_irq_restore(flags);
|
||||
return ret == 0;
|
||||
}
|
||||
#define atomic_dec(v) (void)atomic_dec_return(v)
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
@ -120,40 +91,14 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
|
||||
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned char ccr;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("stc ccr,%w3\n\t"
|
||||
"orc #0x80,ccr\n\t"
|
||||
"mov.l %0,%1\n\t"
|
||||
"and.l %2,%1\n\t"
|
||||
"mov.l %1,%0\n\t"
|
||||
"ldc %w3,ccr"
|
||||
: "=m"(*v), "=r"(tmp)
|
||||
: "g"(~(mask)), "r"(ccr));
|
||||
atomic_and(~mask, v);
|
||||
}
|
||||
|
||||
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
|
||||
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
|
||||
{
|
||||
unsigned char ccr;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__("stc ccr,%w3\n\t"
|
||||
"orc #0x80,ccr\n\t"
|
||||
"mov.l %0,%1\n\t"
|
||||
"or.l %2,%1\n\t"
|
||||
"mov.l %1,%0\n\t"
|
||||
"ldc %w3,ccr"
|
||||
: "=m"(*v), "=r"(tmp)
|
||||
: "g"(~(mask)), "r"(ccr));
|
||||
atomic_or(mask, v);
|
||||
}
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* __ARCH_H8300_ATOMIC __ */
|
||||
|
Loading…
x
Reference in New Issue
Block a user