xref: /openbmc/u-boot/arch/powerpc/include/asm/bitops.h (revision 1500389c)
1 /*
2  * bitops.h: Bit string operations on the ppc
3  */
4 
5 #ifndef _PPC_BITOPS_H
6 #define _PPC_BITOPS_H
7 
8 #include <asm/byteorder.h>
9 
10 extern void set_bit(int nr, volatile void *addr);
11 extern void clear_bit(int nr, volatile void *addr);
12 extern void change_bit(int nr, volatile void *addr);
13 extern int test_and_set_bit(int nr, volatile void *addr);
14 extern int test_and_clear_bit(int nr, volatile void *addr);
15 extern int test_and_change_bit(int nr, volatile void *addr);
16 
17 /*
18  * Arguably these bit operations don't imply any memory barrier or
19  * SMP ordering, but in fact a lot of drivers expect them to imply
20  * both, since they do on x86 cpus.
21  */
22 #ifdef CONFIG_SMP
23 #define SMP_WMB		"eieio\n"
24 #define SMP_MB		"\nsync"
25 #else
26 #define SMP_WMB
27 #define SMP_MB
28 #endif /* CONFIG_SMP */
29 
30 #define __INLINE_BITOPS	1
31 
32 #if __INLINE_BITOPS
33 /*
34  * These used to be if'd out here because using : "cc" as a constraint
35  * resulted in errors from egcs.  Things may be OK with gcc-2.95.
36  */
37 extern __inline__ void set_bit(int nr, volatile void * addr)
38 {
39 	unsigned long old;
40 	unsigned long mask = 1 << (nr & 0x1f);
41 	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
42 
43 	__asm__ __volatile__(SMP_WMB "\
44 1:	lwarx	%0,0,%3\n\
45 	or	%0,%0,%2\n\
46 	stwcx.	%0,0,%3\n\
47 	bne	1b"
48 	SMP_MB
49 	: "=&r" (old), "=m" (*p)
50 	: "r" (mask), "r" (p), "m" (*p)
51 	: "cc" );
52 }
53 
54 extern __inline__ void clear_bit(int nr, volatile void *addr)
55 {
56 	unsigned long old;
57 	unsigned long mask = 1 << (nr & 0x1f);
58 	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
59 
60 	__asm__ __volatile__(SMP_WMB "\
61 1:	lwarx	%0,0,%3\n\
62 	andc	%0,%0,%2\n\
63 	stwcx.	%0,0,%3\n\
64 	bne	1b"
65 	SMP_MB
66 	: "=&r" (old), "=m" (*p)
67 	: "r" (mask), "r" (p), "m" (*p)
68 	: "cc");
69 }
70 
71 extern __inline__ void change_bit(int nr, volatile void *addr)
72 {
73 	unsigned long old;
74 	unsigned long mask = 1 << (nr & 0x1f);
75 	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
76 
77 	__asm__ __volatile__(SMP_WMB "\
78 1:	lwarx	%0,0,%3\n\
79 	xor	%0,%0,%2\n\
80 	stwcx.	%0,0,%3\n\
81 	bne	1b"
82 	SMP_MB
83 	: "=&r" (old), "=m" (*p)
84 	: "r" (mask), "r" (p), "m" (*p)
85 	: "cc");
86 }
87 
88 extern __inline__ int test_and_set_bit(int nr, volatile void *addr)
89 {
90 	unsigned int old, t;
91 	unsigned int mask = 1 << (nr & 0x1f);
92 	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
93 
94 	__asm__ __volatile__(SMP_WMB "\
95 1:	lwarx	%0,0,%4\n\
96 	or	%1,%0,%3\n\
97 	stwcx.	%1,0,%4\n\
98 	bne	1b"
99 	SMP_MB
100 	: "=&r" (old), "=&r" (t), "=m" (*p)
101 	: "r" (mask), "r" (p), "m" (*p)
102 	: "cc");
103 
104 	return (old & mask) != 0;
105 }
106 
107 extern __inline__ int test_and_clear_bit(int nr, volatile void *addr)
108 {
109 	unsigned int old, t;
110 	unsigned int mask = 1 << (nr & 0x1f);
111 	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
112 
113 	__asm__ __volatile__(SMP_WMB "\
114 1:	lwarx	%0,0,%4\n\
115 	andc	%1,%0,%3\n\
116 	stwcx.	%1,0,%4\n\
117 	bne	1b"
118 	SMP_MB
119 	: "=&r" (old), "=&r" (t), "=m" (*p)
120 	: "r" (mask), "r" (p), "m" (*p)
121 	: "cc");
122 
123 	return (old & mask) != 0;
124 }
125 
126 extern __inline__ int test_and_change_bit(int nr, volatile void *addr)
127 {
128 	unsigned int old, t;
129 	unsigned int mask = 1 << (nr & 0x1f);
130 	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
131 
132 	__asm__ __volatile__(SMP_WMB "\
133 1:	lwarx	%0,0,%4\n\
134 	xor	%1,%0,%3\n\
135 	stwcx.	%1,0,%4\n\
136 	bne	1b"
137 	SMP_MB
138 	: "=&r" (old), "=&r" (t), "=m" (*p)
139 	: "r" (mask), "r" (p), "m" (*p)
140 	: "cc");
141 
142 	return (old & mask) != 0;
143 }
144 #endif /* __INLINE_BITOPS */
145 
146 extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
147 {
148 	__const__ unsigned int *p = (__const__ unsigned int *) addr;
149 
150 	return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
151 }
152 
153 /* Return the bit position of the most significant 1 bit in a word */
154 /* - the result is undefined when x == 0 */
155 extern __inline__ int __ilog2(unsigned int x)
156 {
157 	int lz;
158 
159 	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
160 	return 31 - lz;
161 }
162 
163 extern __inline__ int ffz(unsigned int x)
164 {
165 	if ((x = ~x) == 0)
166 		return 32;
167 	return __ilog2(x & -x);
168 }
169 
170 /*
171  * fls: find last (most-significant) bit set.
172  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
173  *
174  * On powerpc, __ilog2(0) returns -1, but this is not safe in general
175  */
176 static __inline__ int fls(unsigned int x)
177 {
178 	return __ilog2(x) + 1;
179 }
180 #define PLATFORM_FLS
181 
182 /**
183  * fls64 - find last set bit in a 64-bit word
184  * @x: the word to search
185  *
186  * This is defined in a similar way as the libc and compiler builtin
187  * ffsll, but returns the position of the most significant set bit.
188  *
189  * fls64(value) returns 0 if value is 0 or the position of the last
190  * set bit if value is nonzero. The last (most significant) bit is
191  * at position 64.
192  */
193 #if BITS_PER_LONG == 32
194 static inline int fls64(__u64 x)
195 {
196 	__u32 h = x >> 32;
197 	if (h)
198 		return fls(h) + 32;
199 	return fls(x);
200 }
201 #elif BITS_PER_LONG == 64
202 static inline int fls64(__u64 x)
203 {
204 	if (x == 0)
205 		return 0;
206 	return __ilog2(x) + 1;
207 }
208 #else
209 #error BITS_PER_LONG not 32 or 64
210 #endif
211 
212 static inline int __ilog2_u64(u64 n)
213 {
214 	return fls64(n) - 1;
215 }
216 
217 static inline int ffs64(u64 x)
218 {
219 	return __ilog2_u64(x & -x) + 1ull;
220 }
221 
222 #ifdef __KERNEL__
223 
224 /*
225  * ffs: find first bit set. This is defined the same way as
226  * the libc and compiler builtin ffs routines, therefore
227  * differs in spirit from the above ffz (man ffs).
228  */
229 extern __inline__ int ffs(int x)
230 {
231 	return __ilog2(x & -x) + 1;
232 }
233 #define PLATFORM_FFS
234 
235 /*
236  * hweightN: returns the hamming weight (i.e. the number
237  * of bits set) of a N-bit word
238  */
239 
240 #define hweight32(x) generic_hweight32(x)
241 #define hweight16(x) generic_hweight16(x)
242 #define hweight8(x) generic_hweight8(x)
243 
244 #endif /* __KERNEL__ */
245 
246 /*
247  * This implementation of find_{first,next}_zero_bit was stolen from
248  * Linus' asm-alpha/bitops.h.
249  */
250 #define find_first_zero_bit(addr, size) \
251 	find_next_zero_bit((addr), (size), 0)
252 
253 extern __inline__ unsigned long find_next_zero_bit(void * addr,
254 	unsigned long size, unsigned long offset)
255 {
256 	unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
257 	unsigned int result = offset & ~31UL;
258 	unsigned int tmp;
259 
260 	if (offset >= size)
261 		return size;
262 	size -= result;
263 	offset &= 31UL;
264 	if (offset) {
265 		tmp = *p++;
266 		tmp |= ~0UL >> (32-offset);
267 		if (size < 32)
268 			goto found_first;
269 		if (tmp != ~0U)
270 			goto found_middle;
271 		size -= 32;
272 		result += 32;
273 	}
274 	while (size >= 32) {
275 		if ((tmp = *p++) != ~0U)
276 			goto found_middle;
277 		result += 32;
278 		size -= 32;
279 	}
280 	if (!size)
281 		return result;
282 	tmp = *p;
283 found_first:
284 	tmp |= ~0UL << size;
285 found_middle:
286 	return result + ffz(tmp);
287 }
288 
289 
290 #define _EXT2_HAVE_ASM_BITOPS_
291 
292 #ifdef __KERNEL__
293 /*
294  * test_and_{set,clear}_bit guarantee atomicity without
295  * disabling interrupts.
296  */
297 #define ext2_set_bit(nr, addr)		test_and_set_bit((nr) ^ 0x18, addr)
298 #define ext2_clear_bit(nr, addr)	test_and_clear_bit((nr) ^ 0x18, addr)
299 
300 #else
301 extern __inline__ int ext2_set_bit(int nr, void * addr)
302 {
303 	int		mask;
304 	unsigned char	*ADDR = (unsigned char *) addr;
305 	int oldbit;
306 
307 	ADDR += nr >> 3;
308 	mask = 1 << (nr & 0x07);
309 	oldbit = (*ADDR & mask) ? 1 : 0;
310 	*ADDR |= mask;
311 	return oldbit;
312 }
313 
314 extern __inline__ int ext2_clear_bit(int nr, void * addr)
315 {
316 	int		mask;
317 	unsigned char	*ADDR = (unsigned char *) addr;
318 	int oldbit;
319 
320 	ADDR += nr >> 3;
321 	mask = 1 << (nr & 0x07);
322 	oldbit = (*ADDR & mask) ? 1 : 0;
323 	*ADDR = *ADDR & ~mask;
324 	return oldbit;
325 }
326 #endif	/* __KERNEL__ */
327 
328 extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
329 {
330 	__const__ unsigned char	*ADDR = (__const__ unsigned char *) addr;
331 
332 	return (ADDR[nr >> 3] >> (nr & 7)) & 1;
333 }
334 
335 /*
336  * This implementation of ext2_find_{first,next}_zero_bit was stolen from
337  * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
338  */
339 
340 #define ext2_find_first_zero_bit(addr, size) \
341 	ext2_find_next_zero_bit((addr), (size), 0)
342 
343 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
344 	unsigned long size, unsigned long offset)
345 {
346 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
347 	unsigned int result = offset & ~31UL;
348 	unsigned int tmp;
349 
350 	if (offset >= size)
351 		return size;
352 	size -= result;
353 	offset &= 31UL;
354 	if (offset) {
355 		tmp = cpu_to_le32p(p++);
356 		tmp |= ~0UL >> (32-offset);
357 		if (size < 32)
358 			goto found_first;
359 		if (tmp != ~0U)
360 			goto found_middle;
361 		size -= 32;
362 		result += 32;
363 	}
364 	while (size >= 32) {
365 		if ((tmp = cpu_to_le32p(p++)) != ~0U)
366 			goto found_middle;
367 		result += 32;
368 		size -= 32;
369 	}
370 	if (!size)
371 		return result;
372 	tmp = cpu_to_le32p(p);
373 found_first:
374 	tmp |= ~0U << size;
375 found_middle:
376 	return result + ffz(tmp);
377 }
378 
379 /* Bitmap functions for the minix filesystem.  */
380 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
381 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
382 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
383 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
384 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
385 
386 #endif /* _PPC_BITOPS_H */
387