xref: /openbmc/linux/arch/powerpc/include/asm/bitops.h (revision a36954f5)
1 /*
2  * PowerPC atomic bit operations.
3  *
4  * Merged version by David Gibson <david@gibson.dropbear.id.au>.
5  * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
6  * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard.  They
7  * originally took it from the ppc32 code.
8  *
9  * Within a word, bits are numbered LSB first.  Lot's of places make
10  * this assumption by directly testing bits with (val & (1<<nr)).
11  * This can cause confusion for large (> 1 word) bitmaps on a
12  * big-endian system because, unlike little endian, the number of each
13  * bit depends on the word size.
14  *
15  * The bitop functions are defined to work on unsigned longs, so for a
16  * ppc64 system the bits end up numbered:
17  *   |63..............0|127............64|191...........128|255...........192|
18  * and on ppc32:
19  *   |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
20  *
21  * There are a few little-endian macros used mostly for filesystem
22  * bitmaps, these work on similar bit arrays layouts, but
23  * byte-oriented:
24  *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
25  *
26  * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
27  * number field needs to be reversed compared to the big-endian bit
28  * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
29  *
30  * This program is free software; you can redistribute it and/or
31  * modify it under the terms of the GNU General Public License
32  * as published by the Free Software Foundation; either version
33  * 2 of the License, or (at your option) any later version.
34  */
35 
36 #ifndef _ASM_POWERPC_BITOPS_H
37 #define _ASM_POWERPC_BITOPS_H
38 
39 #ifdef __KERNEL__
40 
41 #ifndef _LINUX_BITOPS_H
42 #error only <linux/bitops.h> can be included directly
43 #endif
44 
45 #include <linux/compiler.h>
46 #include <asm/asm-compat.h>
47 #include <asm/synch.h>
48 
49 /* PPC bit number conversion */
50 #define PPC_BITLSHIFT(be)	(BITS_PER_LONG - 1 - (be))
51 #define PPC_BIT(bit)		(1UL << PPC_BITLSHIFT(bit))
52 #define PPC_BITMASK(bs, be)	((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
53 
54 /* Put a PPC bit into a "normal" bit position */
55 #define PPC_BITEXTRACT(bits, ppc_bit, dst_bit)			\
56 	((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
57 
58 #define PPC_BITLSHIFT32(be)	(32 - 1 - (be))
59 #define PPC_BIT32(bit)		(1UL << PPC_BITLSHIFT32(bit))
60 #define PPC_BITMASK32(bs, be)	((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
61 
62 #define PPC_BITLSHIFT8(be)	(8 - 1 - (be))
63 #define PPC_BIT8(bit)		(1UL << PPC_BITLSHIFT8(bit))
64 #define PPC_BITMASK8(bs, be)	((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
65 
66 #include <asm/barrier.h>
67 
68 /* Macro for generating the ***_bits() functions */
69 #define DEFINE_BITOP(fn, op, prefix)		\
70 static __inline__ void fn(unsigned long mask,	\
71 		volatile unsigned long *_p)	\
72 {						\
73 	unsigned long old;			\
74 	unsigned long *p = (unsigned long *)_p;	\
75 	__asm__ __volatile__ (			\
76 	prefix					\
77 "1:"	PPC_LLARX(%0,0,%3,0) "\n"		\
78 	stringify_in_c(op) "%0,%0,%2\n"		\
79 	PPC405_ERR77(0,%3)			\
80 	PPC_STLCX "%0,0,%3\n"			\
81 	"bne- 1b\n"				\
82 	: "=&r" (old), "+m" (*p)		\
83 	: "r" (mask), "r" (p)			\
84 	: "cc", "memory");			\
85 }
86 
87 DEFINE_BITOP(set_bits, or, "")
88 DEFINE_BITOP(clear_bits, andc, "")
89 DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
90 DEFINE_BITOP(change_bits, xor, "")
91 
92 static __inline__ void set_bit(int nr, volatile unsigned long *addr)
93 {
94 	set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
95 }
96 
97 static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
98 {
99 	clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
100 }
101 
102 static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
103 {
104 	clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
105 }
106 
107 static __inline__ void change_bit(int nr, volatile unsigned long *addr)
108 {
109 	change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
110 }
111 
112 /* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
113  * operands. */
114 #define DEFINE_TESTOP(fn, op, prefix, postfix, eh)	\
115 static __inline__ unsigned long fn(			\
116 		unsigned long mask,			\
117 		volatile unsigned long *_p)		\
118 {							\
119 	unsigned long old, t;				\
120 	unsigned long *p = (unsigned long *)_p;		\
121 	__asm__ __volatile__ (				\
122 	prefix						\
123 "1:"	PPC_LLARX(%0,0,%3,eh) "\n"			\
124 	stringify_in_c(op) "%1,%0,%2\n"			\
125 	PPC405_ERR77(0,%3)				\
126 	PPC_STLCX "%1,0,%3\n"				\
127 	"bne- 1b\n"					\
128 	postfix						\
129 	: "=&r" (old), "=&r" (t)			\
130 	: "r" (mask), "r" (p)				\
131 	: "cc", "memory");				\
132 	return (old & mask);				\
133 }
134 
135 DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
136 	      PPC_ATOMIC_EXIT_BARRIER, 0)
137 DEFINE_TESTOP(test_and_set_bits_lock, or, "",
138 	      PPC_ACQUIRE_BARRIER, 1)
139 DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
140 	      PPC_ATOMIC_EXIT_BARRIER, 0)
141 DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
142 	      PPC_ATOMIC_EXIT_BARRIER, 0)
143 
144 static __inline__ int test_and_set_bit(unsigned long nr,
145 				       volatile unsigned long *addr)
146 {
147 	return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
148 }
149 
150 static __inline__ int test_and_set_bit_lock(unsigned long nr,
151 				       volatile unsigned long *addr)
152 {
153 	return test_and_set_bits_lock(BIT_MASK(nr),
154 				addr + BIT_WORD(nr)) != 0;
155 }
156 
157 static __inline__ int test_and_clear_bit(unsigned long nr,
158 					 volatile unsigned long *addr)
159 {
160 	return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
161 }
162 
163 static __inline__ int test_and_change_bit(unsigned long nr,
164 					  volatile unsigned long *addr)
165 {
166 	return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
167 }
168 
169 #ifdef CONFIG_PPC64
170 static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
171 						volatile unsigned long *addr)
172 {
173 	unsigned long old, t;
174 	unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
175 	unsigned long mask = BIT_MASK(nr);
176 
177 	__asm__ __volatile__ (
178 	PPC_RELEASE_BARRIER
179 "1:"	PPC_LLARX(%0,0,%3,0) "\n"
180 	"andc %1,%0,%2\n"
181 	PPC405_ERR77(0,%3)
182 	PPC_STLCX "%1,0,%3\n"
183 	"bne- 1b\n"
184 	: "=&r" (old), "=&r" (t)
185 	: "r" (mask), "r" (p)
186 	: "cc", "memory");
187 
188 	return old;
189 }
190 
191 /* This is a special function for mm/filemap.c */
192 #define clear_bit_unlock_is_negative_byte(nr, addr)			\
193 	(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters))
194 
195 #endif /* CONFIG_PPC64 */
196 
197 #include <asm-generic/bitops/non-atomic.h>
198 
199 static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
200 {
201 	__asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
202 	__clear_bit(nr, addr);
203 }
204 
205 /*
206  * Return the zero-based bit position (LE, not IBM bit numbering) of
207  * the most significant 1-bit in a double word.
208  */
209 static __inline__ __attribute__((const))
210 int __ilog2(unsigned long x)
211 {
212 	int lz;
213 
214 	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
215 	return BITS_PER_LONG - 1 - lz;
216 }
217 
218 static inline __attribute__((const))
219 int __ilog2_u32(u32 n)
220 {
221 	int bit;
222 	asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
223 	return 31 - bit;
224 }
225 
226 #ifdef __powerpc64__
227 static inline __attribute__((const))
228 int __ilog2_u64(u64 n)
229 {
230 	int bit;
231 	asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
232 	return 63 - bit;
233 }
234 #endif
235 
236 /*
237  * Determines the bit position of the least significant 0 bit in the
238  * specified double word. The returned bit position will be
239  * zero-based, starting from the right side (63/31 - 0).
240  */
241 static __inline__ unsigned long ffz(unsigned long x)
242 {
243 	/* no zero exists anywhere in the 8 byte area. */
244 	if ((x = ~x) == 0)
245 		return BITS_PER_LONG;
246 
247 	/*
248 	 * Calculate the bit position of the least significant '1' bit in x
249 	 * (since x has been changed this will actually be the least significant
250 	 * '0' bit in * the original x).  Note: (x & -x) gives us a mask that
251 	 * is the least significant * (RIGHT-most) 1-bit of the value in x.
252 	 */
253 	return __ilog2(x & -x);
254 }
255 
256 static __inline__ unsigned long __ffs(unsigned long x)
257 {
258 	return __ilog2(x & -x);
259 }
260 
261 /*
262  * ffs: find first bit set. This is defined the same way as
263  * the libc and compiler builtin ffs routines, therefore
264  * differs in spirit from the above ffz (man ffs).
265  */
266 static __inline__ int ffs(int x)
267 {
268 	unsigned long i = (unsigned long)x;
269 	return __ilog2(i & -i) + 1;
270 }
271 
272 /*
273  * fls: find last (most-significant) bit set.
274  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
275  */
276 static __inline__ int fls(unsigned int x)
277 {
278 	int lz;
279 
280 	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
281 	return 32 - lz;
282 }
283 
284 static __inline__ unsigned long __fls(unsigned long x)
285 {
286 	return __ilog2(x);
287 }
288 
289 /*
290  * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
291  * instruction; for 32-bit we use the generic version, which does two
292  * 32-bit fls calls.
293  */
294 #ifdef __powerpc64__
295 static __inline__ int fls64(__u64 x)
296 {
297 	int lz;
298 
299 	asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
300 	return 64 - lz;
301 }
302 #else
303 #include <asm-generic/bitops/fls64.h>
304 #endif /* __powerpc64__ */
305 
306 #ifdef CONFIG_PPC64
307 unsigned int __arch_hweight8(unsigned int w);
308 unsigned int __arch_hweight16(unsigned int w);
309 unsigned int __arch_hweight32(unsigned int w);
310 unsigned long __arch_hweight64(__u64 w);
311 #include <asm-generic/bitops/const_hweight.h>
312 #else
313 #include <asm-generic/bitops/hweight.h>
314 #endif
315 
316 #include <asm-generic/bitops/find.h>
317 
318 /* Little-endian versions */
319 #include <asm-generic/bitops/le.h>
320 
321 /* Bitmap functions for the ext2 filesystem */
322 
323 #include <asm-generic/bitops/ext2-atomic-setbit.h>
324 
325 #include <asm-generic/bitops/sched.h>
326 
327 #endif /* __KERNEL__ */
328 
329 #endif /* _ASM_POWERPC_BITOPS_H */
330