xref: /openbmc/linux/include/asm-generic/local.h (revision 5e97b9309baa76b476ec7e0d6e9c097edeb4142c)
11da177e4SLinus Torvalds #ifndef _ASM_GENERIC_LOCAL_H
21da177e4SLinus Torvalds #define _ASM_GENERIC_LOCAL_H
31da177e4SLinus Torvalds 
41da177e4SLinus Torvalds #include <linux/percpu.h>
51da177e4SLinus Torvalds #include <linux/hardirq.h>
6f5f5370dSKyle McMartin #include <asm/atomic.h>
71da177e4SLinus Torvalds #include <asm/types.h>
81da177e4SLinus Torvalds 
92cf8d82dSAndrew Morton /*
102cf8d82dSAndrew Morton  * A signed long type for operations which are atomic for a single CPU.
112cf8d82dSAndrew Morton  * Usually used in combination with per-cpu variables.
122cf8d82dSAndrew Morton  *
132cf8d82dSAndrew Morton  * This is the default implementation, which uses atomic_long_t.  Which is
142cf8d82dSAndrew Morton  * rather pointless.  The whole point behind local_t is that some processors
152cf8d82dSAndrew Morton  * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
162cf8d82dSAndrew Morton  * running on this CPU.  local_t allows exploitation of such capabilities.
172cf8d82dSAndrew Morton  */
181da177e4SLinus Torvalds 
191da177e4SLinus Torvalds /* Implement in terms of atomics. */
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds /* Don't use typedef: don't want them to be mixed with atomic_t's. */
221da177e4SLinus Torvalds typedef struct
231da177e4SLinus Torvalds {
24f5f5370dSKyle McMartin 	atomic_long_t a;
251da177e4SLinus Torvalds } local_t;
261da177e4SLinus Torvalds 
27f5f5370dSKyle McMartin #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
281da177e4SLinus Torvalds 
292cf8d82dSAndrew Morton #define local_read(l)	atomic_long_read(&(l)->a)
30f5f5370dSKyle McMartin #define local_set(l,i)	atomic_long_set((&(l)->a),(i))
31f5f5370dSKyle McMartin #define local_inc(l)	atomic_long_inc(&(l)->a)
32f5f5370dSKyle McMartin #define local_dec(l)	atomic_long_dec(&(l)->a)
33f5f5370dSKyle McMartin #define local_add(i,l)	atomic_long_add((i),(&(l)->a))
34f5f5370dSKyle McMartin #define local_sub(i,l)	atomic_long_sub((i),(&(l)->a))
351da177e4SLinus Torvalds 
36*5e97b930SMathieu Desnoyers #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
37*5e97b930SMathieu Desnoyers #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
38*5e97b930SMathieu Desnoyers #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
39*5e97b930SMathieu Desnoyers #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
40*5e97b930SMathieu Desnoyers #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
41*5e97b930SMathieu Desnoyers #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
42*5e97b930SMathieu Desnoyers #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
43*5e97b930SMathieu Desnoyers 
44*5e97b930SMathieu Desnoyers #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
45*5e97b930SMathieu Desnoyers #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
46*5e97b930SMathieu Desnoyers #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
47*5e97b930SMathieu Desnoyers #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
48*5e97b930SMathieu Desnoyers 
491da177e4SLinus Torvalds /* Non-atomic variants, ie. preemption disabled and won't be touched
501da177e4SLinus Torvalds  * in interrupt, etc.  Some archs can optimize this case well. */
511da177e4SLinus Torvalds #define __local_inc(l)		local_set((l), local_read(l) + 1)
521da177e4SLinus Torvalds #define __local_dec(l)		local_set((l), local_read(l) - 1)
531da177e4SLinus Torvalds #define __local_add(i,l)	local_set((l), local_read(l) + (i))
541da177e4SLinus Torvalds #define __local_sub(i,l)	local_set((l), local_read(l) - (i))
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Use these for per-cpu local_t variables: on some archs they are
571da177e4SLinus Torvalds  * much more efficient than these naive implementations.  Note they take
581da177e4SLinus Torvalds  * a variable (eg. mystruct.foo), not an address.
591da177e4SLinus Torvalds  */
60*5e97b930SMathieu Desnoyers #define cpu_local_read(l)	local_read(&__get_cpu_var(l))
61*5e97b930SMathieu Desnoyers #define cpu_local_set(l, i)	local_set(&__get_cpu_var(l), (i))
62*5e97b930SMathieu Desnoyers #define cpu_local_inc(l)	local_inc(&__get_cpu_var(l))
63*5e97b930SMathieu Desnoyers #define cpu_local_dec(l)	local_dec(&__get_cpu_var(l))
64*5e97b930SMathieu Desnoyers #define cpu_local_add(i, l)	local_add((i), &__get_cpu_var(l))
65*5e97b930SMathieu Desnoyers #define cpu_local_sub(i, l)	local_sub((i), &__get_cpu_var(l))
661da177e4SLinus Torvalds 
671da177e4SLinus Torvalds /* Non-atomic increments, ie. preemption disabled and won't be touched
681da177e4SLinus Torvalds  * in interrupt, etc.  Some archs can optimize this case well.
691da177e4SLinus Torvalds  */
70*5e97b930SMathieu Desnoyers #define __cpu_local_inc(l)	__local_inc(&__get_cpu_var(l))
71*5e97b930SMathieu Desnoyers #define __cpu_local_dec(l)	__local_dec(&__get_cpu_var(l))
72*5e97b930SMathieu Desnoyers #define __cpu_local_add(i, l)	__local_add((i), &__get_cpu_var(l))
73*5e97b930SMathieu Desnoyers #define __cpu_local_sub(i, l)	__local_sub((i), &__get_cpu_var(l))
741da177e4SLinus Torvalds 
751da177e4SLinus Torvalds #endif /* _ASM_GENERIC_LOCAL_H */
76