xref: /openbmc/linux/include/asm-generic/local64.h (revision 1996bda2a42480c275656233e631ee0966574be4)
1*1996bda2SPeter Zijlstra #ifndef _ASM_GENERIC_LOCAL64_H
2*1996bda2SPeter Zijlstra #define _ASM_GENERIC_LOCAL64_H
3*1996bda2SPeter Zijlstra 
4*1996bda2SPeter Zijlstra #include <linux/percpu.h>
5*1996bda2SPeter Zijlstra #include <asm/types.h>
6*1996bda2SPeter Zijlstra 
7*1996bda2SPeter Zijlstra /*
8*1996bda2SPeter Zijlstra  * A signed long type for operations which are atomic for a single CPU.
9*1996bda2SPeter Zijlstra  * Usually used in combination with per-cpu variables.
10*1996bda2SPeter Zijlstra  *
11*1996bda2SPeter Zijlstra  * This is the default implementation, which uses atomic64_t.  Which is
12*1996bda2SPeter Zijlstra  * rather pointless.  The whole point behind local64_t is that some processors
13*1996bda2SPeter Zijlstra  * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
14*1996bda2SPeter Zijlstra  * running on this CPU.  local64_t allows exploitation of such capabilities.
15*1996bda2SPeter Zijlstra  */
16*1996bda2SPeter Zijlstra 
17*1996bda2SPeter Zijlstra /* Implement in terms of atomics. */
18*1996bda2SPeter Zijlstra 
19*1996bda2SPeter Zijlstra #if BITS_PER_LONG == 64
20*1996bda2SPeter Zijlstra 
21*1996bda2SPeter Zijlstra #include <asm/local.h>
22*1996bda2SPeter Zijlstra 
23*1996bda2SPeter Zijlstra typedef struct {
24*1996bda2SPeter Zijlstra 	local_t a;
25*1996bda2SPeter Zijlstra } local64_t;
26*1996bda2SPeter Zijlstra 
27*1996bda2SPeter Zijlstra #define LOCAL64_INIT(i)	{ LOCAL_INIT(i) }
28*1996bda2SPeter Zijlstra 
29*1996bda2SPeter Zijlstra #define local64_read(l)		local_read(&(l)->a)
30*1996bda2SPeter Zijlstra #define local64_set(l,i)	local_set((&(l)->a),(i))
31*1996bda2SPeter Zijlstra #define local64_inc(l)		local_inc(&(l)->a)
32*1996bda2SPeter Zijlstra #define local64_dec(l)		local_dec(&(l)->a)
33*1996bda2SPeter Zijlstra #define local64_add(i,l)	local_add((i),(&(l)->a))
34*1996bda2SPeter Zijlstra #define local64_sub(i,l)	local_sub((i),(&(l)->a))
35*1996bda2SPeter Zijlstra 
36*1996bda2SPeter Zijlstra #define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
37*1996bda2SPeter Zijlstra #define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
38*1996bda2SPeter Zijlstra #define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
39*1996bda2SPeter Zijlstra #define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
40*1996bda2SPeter Zijlstra #define local64_add_return(i, l) local_add_return((i), (&(l)->a))
41*1996bda2SPeter Zijlstra #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
42*1996bda2SPeter Zijlstra #define local64_inc_return(l)	local_inc_return(&(l)->a)
43*1996bda2SPeter Zijlstra 
44*1996bda2SPeter Zijlstra #define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
45*1996bda2SPeter Zijlstra #define local64_xchg(l, n)	local_xchg((&(l)->a), (n))
46*1996bda2SPeter Zijlstra #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
47*1996bda2SPeter Zijlstra #define local64_inc_not_zero(l)	local_inc_not_zero(&(l)->a)
48*1996bda2SPeter Zijlstra 
49*1996bda2SPeter Zijlstra /* Non-atomic variants, ie. preemption disabled and won't be touched
50*1996bda2SPeter Zijlstra  * in interrupt, etc.  Some archs can optimize this case well. */
51*1996bda2SPeter Zijlstra #define __local64_inc(l)	local64_set((l), local64_read(l) + 1)
52*1996bda2SPeter Zijlstra #define __local64_dec(l)	local64_set((l), local64_read(l) - 1)
53*1996bda2SPeter Zijlstra #define __local64_add(i,l)	local64_set((l), local64_read(l) + (i))
54*1996bda2SPeter Zijlstra #define __local64_sub(i,l)	local64_set((l), local64_read(l) - (i))
55*1996bda2SPeter Zijlstra 
56*1996bda2SPeter Zijlstra #else /* BITS_PER_LONG != 64 */
57*1996bda2SPeter Zijlstra 
58*1996bda2SPeter Zijlstra #include <asm/atomic.h>
59*1996bda2SPeter Zijlstra 
60*1996bda2SPeter Zijlstra /* Don't use typedef: don't want them to be mixed with atomic_t's. */
61*1996bda2SPeter Zijlstra typedef struct {
62*1996bda2SPeter Zijlstra 	atomic64_t a;
63*1996bda2SPeter Zijlstra } local64_t;
64*1996bda2SPeter Zijlstra 
65*1996bda2SPeter Zijlstra #define LOCAL64_INIT(i)	{ ATOMIC_LONG_INIT(i) }
66*1996bda2SPeter Zijlstra 
67*1996bda2SPeter Zijlstra #define local64_read(l)		atomic64_read(&(l)->a)
68*1996bda2SPeter Zijlstra #define local64_set(l,i)	atomic64_set((&(l)->a),(i))
69*1996bda2SPeter Zijlstra #define local64_inc(l)		atomic64_inc(&(l)->a)
70*1996bda2SPeter Zijlstra #define local64_dec(l)		atomic64_dec(&(l)->a)
71*1996bda2SPeter Zijlstra #define local64_add(i,l)	atomic64_add((i),(&(l)->a))
72*1996bda2SPeter Zijlstra #define local64_sub(i,l)	atomic64_sub((i),(&(l)->a))
73*1996bda2SPeter Zijlstra 
74*1996bda2SPeter Zijlstra #define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
75*1996bda2SPeter Zijlstra #define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
76*1996bda2SPeter Zijlstra #define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
77*1996bda2SPeter Zijlstra #define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
78*1996bda2SPeter Zijlstra #define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
79*1996bda2SPeter Zijlstra #define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
80*1996bda2SPeter Zijlstra #define local64_inc_return(l)	atomic64_inc_return(&(l)->a)
81*1996bda2SPeter Zijlstra 
82*1996bda2SPeter Zijlstra #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
83*1996bda2SPeter Zijlstra #define local64_xchg(l, n)	atomic64_xchg((&(l)->a), (n))
84*1996bda2SPeter Zijlstra #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
85*1996bda2SPeter Zijlstra #define local64_inc_not_zero(l)	atomic64_inc_not_zero(&(l)->a)
86*1996bda2SPeter Zijlstra 
87*1996bda2SPeter Zijlstra /* Non-atomic variants, ie. preemption disabled and won't be touched
88*1996bda2SPeter Zijlstra  * in interrupt, etc.  Some archs can optimize this case well. */
89*1996bda2SPeter Zijlstra #define __local64_inc(l)	local64_set((l), local64_read(l) + 1)
90*1996bda2SPeter Zijlstra #define __local64_dec(l)	local64_set((l), local64_read(l) - 1)
91*1996bda2SPeter Zijlstra #define __local64_add(i,l)	local64_set((l), local64_read(l) + (i))
92*1996bda2SPeter Zijlstra #define __local64_sub(i,l)	local64_set((l), local64_read(l) - (i))
93*1996bda2SPeter Zijlstra 
94*1996bda2SPeter Zijlstra #endif /* BITS_PER_LONG != 64 */
95*1996bda2SPeter Zijlstra 
96*1996bda2SPeter Zijlstra #endif /* _ASM_GENERIC_LOCAL64_H */
97