xref: /openbmc/linux/arch/powerpc/include/asm/bitops.h (revision b6dcefde)
1 /*
2  * PowerPC atomic bit operations.
3  *
4  * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5  * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6  * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard.  They
7  * originally took it from the ppc32 code.
8  *
9  * Within a word, bits are numbered LSB first.  Lot's of places make
10  * this assumption by directly testing bits with (val & (1<<nr)).
11  * This can cause confusion for large (> 1 word) bitmaps on a
12  * big-endian system because, unlike little endian, the number of each
13  * bit depends on the word size.
14  *
15  * The bitop functions are defined to work on unsigned longs, so for a
16  * ppc64 system the bits end up numbered:
17  *   |63..............0|127............64|191...........128|255...........196|
18  * and on ppc32:
19  *   |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
20  *
21  * There are a few little-endian macros used mostly for filesystem
22  * bitmaps, these work on similar bit arrays layouts, but
23  * byte-oriented:
24  *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25  *
26  * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27  * number field needs to be reversed compared to the big-endian bit
28  * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29  *
30  * This program is free software; you can redistribute it and/or
31  * modify it under the terms of the GNU General Public License
32  * as published by the Free Software Foundation; either version
33  * 2 of the License, or (at your option) any later version.
34  */
35 
36 #ifndef _ASM_POWERPC_BITOPS_H
37 #define _ASM_POWERPC_BITOPS_H
38 
39 #ifdef __KERNEL__
40 
41 #ifndef _LINUX_BITOPS_H
42 #error only <linux/bitops.h> can be included directly
43 #endif
44 
45 #include <linux/compiler.h>
46 #include <asm/asm-compat.h>
47 #include <asm/synch.h>
48 
49 /*
50  * clear_bit doesn't imply a memory barrier
51  */
52 #define smp_mb__before_clear_bit()	smp_mb()
53 #define smp_mb__after_clear_bit()	smp_mb()
54 
55 #define BITOP_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
56 #define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
57 #define BITOP_LE_SWIZZLE	((BITS_PER_LONG-1) & ~0x7)
58 
59 /* Macro for generating the ***_bits() functions */
60 #define DEFINE_BITOP(fn, op, prefix, postfix)	\
61 static __inline__ void fn(unsigned long mask,	\
62 		volatile unsigned long *_p)	\
63 {						\
64 	unsigned long old;			\
65 	unsigned long *p = (unsigned long *)_p;	\
66 	__asm__ __volatile__ (			\
67 	prefix					\
68 "1:"	PPC_LLARX "%0,0,%3\n"			\
69 	stringify_in_c(op) "%0,%0,%2\n"		\
70 	PPC405_ERR77(0,%3)			\
71 	PPC_STLCX "%0,0,%3\n"			\
72 	"bne- 1b\n"				\
73 	postfix					\
74 	: "=&r" (old), "+m" (*p)		\
75 	: "r" (mask), "r" (p)			\
76 	: "cc", "memory");			\
77 }
78 
79 DEFINE_BITOP(set_bits, or, "", "")
80 DEFINE_BITOP(clear_bits, andc, "", "")
81 DEFINE_BITOP(clear_bits_unlock, andc, LWSYNC_ON_SMP, "")
82 DEFINE_BITOP(change_bits, xor, "", "")
83 
84 static __inline__ void set_bit(int nr, volatile unsigned long *addr)
85 {
86 	set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
87 }
88 
89 static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
90 {
91 	clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
92 }
93 
94 static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
95 {
96 	clear_bits_unlock(BITOP_MASK(nr), addr + BITOP_WORD(nr));
97 }
98 
99 static __inline__ void change_bit(int nr, volatile unsigned long *addr)
100 {
101 	change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr));
102 }
103 
104 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
105  * operands. */
106 #define DEFINE_TESTOP(fn, op, prefix, postfix)	\
107 static __inline__ unsigned long fn(		\
108 		unsigned long mask,		\
109 		volatile unsigned long *_p)	\
110 {						\
111 	unsigned long old, t;			\
112 	unsigned long *p = (unsigned long *)_p;	\
113 	__asm__ __volatile__ (			\
114 	prefix					\
115 "1:"	PPC_LLARX "%0,0,%3\n"			\
116 	stringify_in_c(op) "%1,%0,%2\n"		\
117 	PPC405_ERR77(0,%3)			\
118 	PPC_STLCX "%1,0,%3\n"			\
119 	"bne- 1b\n"				\
120 	postfix					\
121 	: "=&r" (old), "=&r" (t)		\
122 	: "r" (mask), "r" (p)			\
123 	: "cc", "memory");			\
124 	return (old & mask);			\
125 }
126 
127 DEFINE_TESTOP(test_and_set_bits, or, LWSYNC_ON_SMP, ISYNC_ON_SMP)
128 DEFINE_TESTOP(test_and_set_bits_lock, or, "", ISYNC_ON_SMP)
129 DEFINE_TESTOP(test_and_clear_bits, andc, LWSYNC_ON_SMP, ISYNC_ON_SMP)
130 DEFINE_TESTOP(test_and_change_bits, xor, LWSYNC_ON_SMP, ISYNC_ON_SMP)
131 
132 static __inline__ int test_and_set_bit(unsigned long nr,
133 				       volatile unsigned long *addr)
134 {
135 	return test_and_set_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
136 }
137 
138 static __inline__ int test_and_set_bit_lock(unsigned long nr,
139 				       volatile unsigned long *addr)
140 {
141 	return test_and_set_bits_lock(BITOP_MASK(nr),
142 				addr + BITOP_WORD(nr)) != 0;
143 }
144 
145 static __inline__ int test_and_clear_bit(unsigned long nr,
146 					 volatile unsigned long *addr)
147 {
148 	return test_and_clear_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
149 }
150 
151 static __inline__ int test_and_change_bit(unsigned long nr,
152 					  volatile unsigned long *addr)
153 {
154 	return test_and_change_bits(BITOP_MASK(nr), addr + BITOP_WORD(nr)) != 0;
155 }
156 
157 #include <asm-generic/bitops/non-atomic.h>
158 
159 static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
160 {
161 	__asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory");
162 	__clear_bit(nr, addr);
163 }
164 
165 /*
166  * Return the zero-based bit position (LE, not IBM bit numbering) of
167  * the most significant 1-bit in a double word.
168  */
169 static __inline__ __attribute__((const))
170 int __ilog2(unsigned long x)
171 {
172 	int lz;
173 
174 	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
175 	return BITS_PER_LONG - 1 - lz;
176 }
177 
178 static inline __attribute__((const))
179 int __ilog2_u32(u32 n)
180 {
181 	int bit;
182 	asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
183 	return 31 - bit;
184 }
185 
186 #ifdef __powerpc64__
187 static inline __attribute__((const))
188 int __ilog2_u64(u64 n)
189 {
190 	int bit;
191 	asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
192 	return 63 - bit;
193 }
194 #endif
195 
196 /*
197  * Determines the bit position of the least significant 0 bit in the
198  * specified double word. The returned bit position will be
199  * zero-based, starting from the right side (63/31 - 0).
200  */
201 static __inline__ unsigned long ffz(unsigned long x)
202 {
203 	/* no zero exists anywhere in the 8 byte area. */
204 	if ((x = ~x) == 0)
205 		return BITS_PER_LONG;
206 
207 	/*
208 	 * Calculate the bit position of the least signficant '1' bit in x
209 	 * (since x has been changed this will actually be the least signficant
210 	 * '0' bit in * the original x).  Note: (x & -x) gives us a mask that
211 	 * is the least significant * (RIGHT-most) 1-bit of the value in x.
212 	 */
213 	return __ilog2(x & -x);
214 }
215 
216 static __inline__ int __ffs(unsigned long x)
217 {
218 	return __ilog2(x & -x);
219 }
220 
221 /*
222  * ffs: find first bit set. This is defined the same way as
223  * the libc and compiler builtin ffs routines, therefore
224  * differs in spirit from the above ffz (man ffs).
225  */
226 static __inline__ int ffs(int x)
227 {
228 	unsigned long i = (unsigned long)x;
229 	return __ilog2(i & -i) + 1;
230 }
231 
232 /*
233  * fls: find last (most-significant) bit set.
234  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
235  */
236 static __inline__ int fls(unsigned int x)
237 {
238 	int lz;
239 
240 	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
241 	return 32 - lz;
242 }
243 
244 static __inline__ unsigned long __fls(unsigned long x)
245 {
246 	return __ilog2(x);
247 }
248 
249 /*
250  * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
251  * instruction; for 32-bit we use the generic version, which does two
252  * 32-bit fls calls.
253  */
254 #ifdef __powerpc64__
255 static __inline__ int fls64(__u64 x)
256 {
257 	int lz;
258 
259 	asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
260 	return 64 - lz;
261 }
262 #else
263 #include <asm-generic/bitops/fls64.h>
264 #endif /* __powerpc64__ */
265 
266 #include <asm-generic/bitops/hweight.h>
267 #include <asm-generic/bitops/find.h>
268 
269 /* Little-endian versions */
270 
271 static __inline__ int test_le_bit(unsigned long nr,
272 				  __const__ unsigned long *addr)
273 {
274 	__const__ unsigned char	*tmp = (__const__ unsigned char *) addr;
275 	return (tmp[nr >> 3] >> (nr & 7)) & 1;
276 }
277 
278 #define __set_le_bit(nr, addr) \
279 	__set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
280 #define __clear_le_bit(nr, addr) \
281 	__clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
282 
283 #define test_and_set_le_bit(nr, addr) \
284 	test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
285 #define test_and_clear_le_bit(nr, addr) \
286 	test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
287 
288 #define __test_and_set_le_bit(nr, addr) \
289 	__test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
290 #define __test_and_clear_le_bit(nr, addr) \
291 	__test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
292 
293 #define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0)
294 unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
295 				    unsigned long size, unsigned long offset);
296 
297 unsigned long generic_find_next_le_bit(const unsigned long *addr,
298 				    unsigned long size, unsigned long offset);
299 /* Bitmap functions for the ext2 filesystem */
300 
301 #define ext2_set_bit(nr,addr) \
302 	__test_and_set_le_bit((nr), (unsigned long*)addr)
303 #define ext2_clear_bit(nr, addr) \
304 	__test_and_clear_le_bit((nr), (unsigned long*)addr)
305 
306 #define ext2_set_bit_atomic(lock, nr, addr) \
307 	test_and_set_le_bit((nr), (unsigned long*)addr)
308 #define ext2_clear_bit_atomic(lock, nr, addr) \
309 	test_and_clear_le_bit((nr), (unsigned long*)addr)
310 
311 #define ext2_test_bit(nr, addr)      test_le_bit((nr),(unsigned long*)addr)
312 
313 #define ext2_find_first_zero_bit(addr, size) \
314 	find_first_zero_le_bit((unsigned long*)addr, size)
315 #define ext2_find_next_zero_bit(addr, size, off) \
316 	generic_find_next_zero_le_bit((unsigned long*)addr, size, off)
317 
318 #define ext2_find_next_bit(addr, size, off) \
319 	generic_find_next_le_bit((unsigned long *)addr, size, off)
320 /* Bitmap functions for the minix filesystem.  */
321 
322 #define minix_test_and_set_bit(nr,addr) \
323 	__test_and_set_le_bit(nr, (unsigned long *)addr)
324 #define minix_set_bit(nr,addr) \
325 	__set_le_bit(nr, (unsigned long *)addr)
326 #define minix_test_and_clear_bit(nr,addr) \
327 	__test_and_clear_le_bit(nr, (unsigned long *)addr)
328 #define minix_test_bit(nr,addr) \
329 	test_le_bit(nr, (unsigned long *)addr)
330 
331 #define minix_find_first_zero_bit(addr,size) \
332 	find_first_zero_le_bit((unsigned long *)addr, size)
333 
334 #include <asm-generic/bitops/sched.h>
335 
336 #endif /* __KERNEL__ */
337 
338 #endif /* _ASM_POWERPC_BITOPS_H */
339