xref: /openbmc/u-boot/arch/nios2/include/asm/bitops/atomic.h (revision 83653121d7382fccfe329cb732f77f116341ef1d)
1*819833afSPeter Tyser #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2*819833afSPeter Tyser #define _ASM_GENERIC_BITOPS_ATOMIC_H_
3*819833afSPeter Tyser 
4*819833afSPeter Tyser #include <asm/types.h>
5*819833afSPeter Tyser #include <asm/system.h>
6*819833afSPeter Tyser 
7*819833afSPeter Tyser #ifdef CONFIG_SMP
8*819833afSPeter Tyser #include <asm/spinlock.h>
9*819833afSPeter Tyser #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
10*819833afSPeter Tyser 
11*819833afSPeter Tyser /* Use an array of spinlocks for our atomic_ts.
12*819833afSPeter Tyser  * Hash function to index into a different SPINLOCK.
13*819833afSPeter Tyser  * Since "a" is usually an address, use one spinlock per cacheline.
14*819833afSPeter Tyser  */
15*819833afSPeter Tyser #  define ATOMIC_HASH_SIZE 4
16*819833afSPeter Tyser #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17*819833afSPeter Tyser 
18*819833afSPeter Tyser extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19*819833afSPeter Tyser 
20*819833afSPeter Tyser /* Can't use raw_spin_lock_irq because of #include problems, so
21*819833afSPeter Tyser  * this is the substitute */
22*819833afSPeter Tyser #define _atomic_spin_lock_irqsave(l,f) do {	\
23*819833afSPeter Tyser 	raw_spinlock_t *s = ATOMIC_HASH(l);	\
24*819833afSPeter Tyser 	local_irq_save(f);			\
25*819833afSPeter Tyser 	__raw_spin_lock(s);			\
26*819833afSPeter Tyser } while(0)
27*819833afSPeter Tyser 
28*819833afSPeter Tyser #define _atomic_spin_unlock_irqrestore(l,f) do {	\
29*819833afSPeter Tyser 	raw_spinlock_t *s = ATOMIC_HASH(l);		\
30*819833afSPeter Tyser 	__raw_spin_unlock(s);				\
31*819833afSPeter Tyser 	local_irq_restore(f);				\
32*819833afSPeter Tyser } while(0)
33*819833afSPeter Tyser 
34*819833afSPeter Tyser 
35*819833afSPeter Tyser #else
36*819833afSPeter Tyser #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
37*819833afSPeter Tyser #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
38*819833afSPeter Tyser #endif
39*819833afSPeter Tyser 
40*819833afSPeter Tyser /*
41*819833afSPeter Tyser  * NMI events can occur at any time, including when interrupts have been
42*819833afSPeter Tyser  * disabled by *_irqsave().  So you can get NMI events occurring while a
43*819833afSPeter Tyser  * *_bit function is holding a spin lock.  If the NMI handler also wants
44*819833afSPeter Tyser  * to do bit manipulation (and they do) then you can get a deadlock
45*819833afSPeter Tyser  * between the original caller of *_bit() and the NMI handler.
46*819833afSPeter Tyser  *
47*819833afSPeter Tyser  * by Keith Owens
48*819833afSPeter Tyser  */
49*819833afSPeter Tyser 
50*819833afSPeter Tyser /**
51*819833afSPeter Tyser  * set_bit - Atomically set a bit in memory
52*819833afSPeter Tyser  * @nr: the bit to set
53*819833afSPeter Tyser  * @addr: the address to start counting from
54*819833afSPeter Tyser  *
55*819833afSPeter Tyser  * This function is atomic and may not be reordered.  See __set_bit()
56*819833afSPeter Tyser  * if you do not require the atomic guarantees.
57*819833afSPeter Tyser  *
58*819833afSPeter Tyser  * Note: there are no guarantees that this function will not be reordered
59*819833afSPeter Tyser  * on non x86 architectures, so if you are writing portable code,
60*819833afSPeter Tyser  * make sure not to rely on its reordering guarantees.
61*819833afSPeter Tyser  *
62*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
63*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
64*819833afSPeter Tyser  */
set_bit(int nr,volatile unsigned long * addr)65*819833afSPeter Tyser static inline void set_bit(int nr, volatile unsigned long *addr)
66*819833afSPeter Tyser {
67*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
68*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69*819833afSPeter Tyser 	unsigned long flags;
70*819833afSPeter Tyser 
71*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
72*819833afSPeter Tyser 	*p  |= mask;
73*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
74*819833afSPeter Tyser }
75*819833afSPeter Tyser 
76*819833afSPeter Tyser /**
77*819833afSPeter Tyser  * clear_bit - Clears a bit in memory
78*819833afSPeter Tyser  * @nr: Bit to clear
79*819833afSPeter Tyser  * @addr: Address to start counting from
80*819833afSPeter Tyser  *
81*819833afSPeter Tyser  * clear_bit() is atomic and may not be reordered.  However, it does
82*819833afSPeter Tyser  * not contain a memory barrier, so if it is used for locking purposes,
83*819833afSPeter Tyser  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
84*819833afSPeter Tyser  * in order to ensure changes are visible on other processors.
85*819833afSPeter Tyser  */
clear_bit(int nr,volatile unsigned long * addr)86*819833afSPeter Tyser static inline void clear_bit(int nr, volatile unsigned long *addr)
87*819833afSPeter Tyser {
88*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
89*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
90*819833afSPeter Tyser 	unsigned long flags;
91*819833afSPeter Tyser 
92*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
93*819833afSPeter Tyser 	*p &= ~mask;
94*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
95*819833afSPeter Tyser }
96*819833afSPeter Tyser 
97*819833afSPeter Tyser /**
98*819833afSPeter Tyser  * change_bit - Toggle a bit in memory
99*819833afSPeter Tyser  * @nr: Bit to change
100*819833afSPeter Tyser  * @addr: Address to start counting from
101*819833afSPeter Tyser  *
102*819833afSPeter Tyser  * change_bit() is atomic and may not be reordered. It may be
103*819833afSPeter Tyser  * reordered on other architectures than x86.
104*819833afSPeter Tyser  * Note that @nr may be almost arbitrarily large; this function is not
105*819833afSPeter Tyser  * restricted to acting on a single-word quantity.
106*819833afSPeter Tyser  */
change_bit(int nr,volatile unsigned long * addr)107*819833afSPeter Tyser static inline void change_bit(int nr, volatile unsigned long *addr)
108*819833afSPeter Tyser {
109*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
110*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
111*819833afSPeter Tyser 	unsigned long flags;
112*819833afSPeter Tyser 
113*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
114*819833afSPeter Tyser 	*p ^= mask;
115*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
116*819833afSPeter Tyser }
117*819833afSPeter Tyser 
118*819833afSPeter Tyser /**
119*819833afSPeter Tyser  * test_and_set_bit - Set a bit and return its old value
120*819833afSPeter Tyser  * @nr: Bit to set
121*819833afSPeter Tyser  * @addr: Address to count from
122*819833afSPeter Tyser  *
123*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
124*819833afSPeter Tyser  * It may be reordered on other architectures than x86.
125*819833afSPeter Tyser  * It also implies a memory barrier.
126*819833afSPeter Tyser  */
test_and_set_bit(int nr,volatile unsigned long * addr)127*819833afSPeter Tyser static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
128*819833afSPeter Tyser {
129*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
130*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
131*819833afSPeter Tyser 	unsigned long old;
132*819833afSPeter Tyser 	unsigned long flags;
133*819833afSPeter Tyser 
134*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
135*819833afSPeter Tyser 	old = *p;
136*819833afSPeter Tyser 	*p = old | mask;
137*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
138*819833afSPeter Tyser 
139*819833afSPeter Tyser 	return (old & mask) != 0;
140*819833afSPeter Tyser }
141*819833afSPeter Tyser 
142*819833afSPeter Tyser /**
143*819833afSPeter Tyser  * test_and_clear_bit - Clear a bit and return its old value
144*819833afSPeter Tyser  * @nr: Bit to clear
145*819833afSPeter Tyser  * @addr: Address to count from
146*819833afSPeter Tyser  *
147*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
148*819833afSPeter Tyser  * It can be reorderdered on other architectures other than x86.
149*819833afSPeter Tyser  * It also implies a memory barrier.
150*819833afSPeter Tyser  */
test_and_clear_bit(int nr,volatile unsigned long * addr)151*819833afSPeter Tyser static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
152*819833afSPeter Tyser {
153*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
154*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
155*819833afSPeter Tyser 	unsigned long old;
156*819833afSPeter Tyser 	unsigned long flags;
157*819833afSPeter Tyser 
158*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
159*819833afSPeter Tyser 	old = *p;
160*819833afSPeter Tyser 	*p = old & ~mask;
161*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
162*819833afSPeter Tyser 
163*819833afSPeter Tyser 	return (old & mask) != 0;
164*819833afSPeter Tyser }
165*819833afSPeter Tyser 
166*819833afSPeter Tyser /**
167*819833afSPeter Tyser  * test_and_change_bit - Change a bit and return its old value
168*819833afSPeter Tyser  * @nr: Bit to change
169*819833afSPeter Tyser  * @addr: Address to count from
170*819833afSPeter Tyser  *
171*819833afSPeter Tyser  * This operation is atomic and cannot be reordered.
172*819833afSPeter Tyser  * It also implies a memory barrier.
173*819833afSPeter Tyser  */
test_and_change_bit(int nr,volatile unsigned long * addr)174*819833afSPeter Tyser static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
175*819833afSPeter Tyser {
176*819833afSPeter Tyser 	unsigned long mask = BIT_MASK(nr);
177*819833afSPeter Tyser 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
178*819833afSPeter Tyser 	unsigned long old;
179*819833afSPeter Tyser 	unsigned long flags;
180*819833afSPeter Tyser 
181*819833afSPeter Tyser 	_atomic_spin_lock_irqsave(p, flags);
182*819833afSPeter Tyser 	old = *p;
183*819833afSPeter Tyser 	*p = old ^ mask;
184*819833afSPeter Tyser 	_atomic_spin_unlock_irqrestore(p, flags);
185*819833afSPeter Tyser 
186*819833afSPeter Tyser 	return (old & mask) != 0;
187*819833afSPeter Tyser }
188*819833afSPeter Tyser 
189*819833afSPeter Tyser #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
190