xref: /openbmc/linux/arch/sparc/include/asm/atomic_32.h (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /* atomic.h: These still suck, but the I-cache hit rate is higher.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5  * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6  *
7  * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8  * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9  */
10 
11 #ifndef __ARCH_SPARC_ATOMIC__
12 #define __ARCH_SPARC_ATOMIC__
13 
14 #include <linux/types.h>
15 
16 #ifdef __KERNEL__
17 
18 #define ATOMIC_INIT(i)  { (i) }
19 
20 extern int __atomic_add_return(int, atomic_t *);
21 extern int atomic_cmpxchg(atomic_t *, int, int);
22 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
23 extern int atomic_add_unless(atomic_t *, int, int);
24 extern void atomic_set(atomic_t *, int);
25 
26 #define atomic_read(v)          ((v)->counter)
27 
28 #define atomic_add(i, v)	((void)__atomic_add_return( (int)(i), (v)))
29 #define atomic_sub(i, v)	((void)__atomic_add_return(-(int)(i), (v)))
30 #define atomic_inc(v)		((void)__atomic_add_return(        1, (v)))
31 #define atomic_dec(v)		((void)__atomic_add_return(       -1, (v)))
32 
33 #define atomic_add_return(i, v)	(__atomic_add_return( (int)(i), (v)))
34 #define atomic_sub_return(i, v)	(__atomic_add_return(-(int)(i), (v)))
35 #define atomic_inc_return(v)	(__atomic_add_return(        1, (v)))
36 #define atomic_dec_return(v)	(__atomic_add_return(       -1, (v)))
37 
38 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
39 
40 /*
41  * atomic_inc_and_test - increment and test
42  * @v: pointer of type atomic_t
43  *
44  * Atomically increments @v by 1
45  * and returns true if the result is zero, or false for all
46  * other cases.
47  */
48 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
49 
50 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
51 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
52 
53 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
54 
55 /* This is the old 24-bit implementation.  It's still used internally
56  * by some sparc-specific code, notably the semaphore implementation.
57  */
58 typedef struct { volatile int counter; } atomic24_t;
59 
60 #ifndef CONFIG_SMP
61 
62 #define ATOMIC24_INIT(i)  { (i) }
63 #define atomic24_read(v)          ((v)->counter)
64 #define atomic24_set(v, i)        (((v)->counter) = i)
65 
66 #else
67 /* We do the bulk of the actual work out of line in two common
68  * routines in assembler, see arch/sparc/lib/atomic.S for the
69  * "fun" details.
70  *
71  * For SMP the trick is you embed the spin lock byte within
72  * the word, use the low byte so signedness is easily retained
73  * via a quick arithmetic shift.  It looks like this:
74  *
75  *	----------------------------------------
76  *	| signed 24-bit counter value |  lock  |  atomic_t
77  *	----------------------------------------
78  *	 31                          8 7      0
79  */
80 
81 #define ATOMIC24_INIT(i)	{ ((i) << 8) }
82 
83 static inline int atomic24_read(const atomic24_t *v)
84 {
85 	int ret = v->counter;
86 
87 	while(ret & 0xff)
88 		ret = v->counter;
89 
90 	return ret >> 8;
91 }
92 
93 #define atomic24_set(v, i)	(((v)->counter) = ((i) << 8))
94 #endif
95 
96 static inline int __atomic24_add(int i, atomic24_t *v)
97 {
98 	register volatile int *ptr asm("g1");
99 	register int increment asm("g2");
100 	register int tmp1 asm("g3");
101 	register int tmp2 asm("g4");
102 	register int tmp3 asm("g7");
103 
104 	ptr = &v->counter;
105 	increment = i;
106 
107 	__asm__ __volatile__(
108 	"mov	%%o7, %%g4\n\t"
109 	"call	___atomic24_add\n\t"
110 	" add	%%o7, 8, %%o7\n"
111 	: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
112 	: "0" (increment), "r" (ptr)
113 	: "memory", "cc");
114 
115 	return increment;
116 }
117 
118 static inline int __atomic24_sub(int i, atomic24_t *v)
119 {
120 	register volatile int *ptr asm("g1");
121 	register int increment asm("g2");
122 	register int tmp1 asm("g3");
123 	register int tmp2 asm("g4");
124 	register int tmp3 asm("g7");
125 
126 	ptr = &v->counter;
127 	increment = i;
128 
129 	__asm__ __volatile__(
130 	"mov	%%o7, %%g4\n\t"
131 	"call	___atomic24_sub\n\t"
132 	" add	%%o7, 8, %%o7\n"
133 	: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
134 	: "0" (increment), "r" (ptr)
135 	: "memory", "cc");
136 
137 	return increment;
138 }
139 
140 #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
141 #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
142 
143 #define atomic24_dec_return(v) __atomic24_sub(1, (v))
144 #define atomic24_inc_return(v) __atomic24_add(1, (v))
145 
146 #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
147 #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
148 
149 #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
150 #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
151 
152 #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
153 
154 /* Atomic operations are already serializing */
155 #define smp_mb__before_atomic_dec()	barrier()
156 #define smp_mb__after_atomic_dec()	barrier()
157 #define smp_mb__before_atomic_inc()	barrier()
158 #define smp_mb__after_atomic_inc()	barrier()
159 
160 #endif /* !(__KERNEL__) */
161 
162 #include <asm-generic/atomic.h>
163 #endif /* !(__ARCH_SPARC_ATOMIC__) */
164