xref: /openbmc/linux/arch/x86/include/asm/local.h (revision 8730046c)
1 #ifndef _ASM_X86_LOCAL_H
2 #define _ASM_X86_LOCAL_H
3 
4 #include <linux/percpu.h>
5 
6 #include <linux/atomic.h>
7 #include <asm/asm.h>
8 
9 typedef struct {
10 	atomic_long_t a;
11 } local_t;
12 
13 #define LOCAL_INIT(i)	{ ATOMIC_LONG_INIT(i) }
14 
15 #define local_read(l)	atomic_long_read(&(l)->a)
16 #define local_set(l, i)	atomic_long_set(&(l)->a, (i))
17 
18 static inline void local_inc(local_t *l)
19 {
20 	asm volatile(_ASM_INC "%0"
21 		     : "+m" (l->a.counter));
22 }
23 
24 static inline void local_dec(local_t *l)
25 {
26 	asm volatile(_ASM_DEC "%0"
27 		     : "+m" (l->a.counter));
28 }
29 
30 static inline void local_add(long i, local_t *l)
31 {
32 	asm volatile(_ASM_ADD "%1,%0"
33 		     : "+m" (l->a.counter)
34 		     : "ir" (i));
35 }
36 
37 static inline void local_sub(long i, local_t *l)
38 {
39 	asm volatile(_ASM_SUB "%1,%0"
40 		     : "+m" (l->a.counter)
41 		     : "ir" (i));
42 }
43 
44 /**
45  * local_sub_and_test - subtract value from variable and test result
46  * @i: integer value to subtract
47  * @l: pointer to type local_t
48  *
49  * Atomically subtracts @i from @l and returns
50  * true if the result is zero, or false for all
51  * other cases.
52  */
53 static inline bool local_sub_and_test(long i, local_t *l)
54 {
55 	GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
56 }
57 
58 /**
59  * local_dec_and_test - decrement and test
60  * @l: pointer to type local_t
61  *
62  * Atomically decrements @l by 1 and
63  * returns true if the result is 0, or false for all other
64  * cases.
65  */
66 static inline bool local_dec_and_test(local_t *l)
67 {
68 	GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
69 }
70 
71 /**
72  * local_inc_and_test - increment and test
73  * @l: pointer to type local_t
74  *
75  * Atomically increments @l by 1
76  * and returns true if the result is zero, or false for all
77  * other cases.
78  */
79 static inline bool local_inc_and_test(local_t *l)
80 {
81 	GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
82 }
83 
84 /**
85  * local_add_negative - add and test if negative
86  * @i: integer value to add
87  * @l: pointer to type local_t
88  *
89  * Atomically adds @i to @l and returns true
90  * if the result is negative, or false when
91  * result is greater than or equal to zero.
92  */
93 static inline bool local_add_negative(long i, local_t *l)
94 {
95 	GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
96 }
97 
98 /**
99  * local_add_return - add and return
100  * @i: integer value to add
101  * @l: pointer to type local_t
102  *
103  * Atomically adds @i to @l and returns @i + @l
104  */
105 static inline long local_add_return(long i, local_t *l)
106 {
107 	long __i = i;
108 	asm volatile(_ASM_XADD "%0, %1;"
109 		     : "+r" (i), "+m" (l->a.counter)
110 		     : : "memory");
111 	return i + __i;
112 }
113 
114 static inline long local_sub_return(long i, local_t *l)
115 {
116 	return local_add_return(-i, l);
117 }
118 
119 #define local_inc_return(l)  (local_add_return(1, l))
120 #define local_dec_return(l)  (local_sub_return(1, l))
121 
122 #define local_cmpxchg(l, o, n) \
123 	(cmpxchg_local(&((l)->a.counter), (o), (n)))
124 /* Always has a lock prefix */
125 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
126 
127 /**
128  * local_add_unless - add unless the number is a given value
129  * @l: pointer of type local_t
130  * @a: the amount to add to l...
131  * @u: ...unless l is equal to u.
132  *
133  * Atomically adds @a to @l, so long as it was not @u.
134  * Returns non-zero if @l was not @u, and zero otherwise.
135  */
136 #define local_add_unless(l, a, u)				\
137 ({								\
138 	long c, old;						\
139 	c = local_read((l));					\
140 	for (;;) {						\
141 		if (unlikely(c == (u)))				\
142 			break;					\
143 		old = local_cmpxchg((l), c, c + (a));		\
144 		if (likely(old == c))				\
145 			break;					\
146 		c = old;					\
147 	}							\
148 	c != (u);						\
149 })
150 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
151 
152 /* On x86_32, these are no better than the atomic variants.
153  * On x86-64 these are better than the atomic variants on SMP kernels
154  * because they dont use a lock prefix.
155  */
156 #define __local_inc(l)		local_inc(l)
157 #define __local_dec(l)		local_dec(l)
158 #define __local_add(i, l)	local_add((i), (l))
159 #define __local_sub(i, l)	local_sub((i), (l))
160 
161 #endif /* _ASM_X86_LOCAL_H */
162