xref: /openbmc/u-boot/arch/mips/include/asm/bitops.h (revision 274bced8)
1 /*
2  * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
3  * Copyright (c) 2000  Silicon Graphics, Inc.
4  *
5  * SPDX-License-Identifier:	GPL-2.0
6  */
7 #ifndef _ASM_BITOPS_H
8 #define _ASM_BITOPS_H
9 
10 #include <linux/types.h>
11 #include <asm/byteorder.h>		/* sigh ... */
12 
13 #ifdef __KERNEL__
14 
15 #include <asm/sgidefs.h>
16 #include <asm/system.h>
17 
18 #include <asm-generic/bitops/fls.h>
19 #include <asm-generic/bitops/__fls.h>
20 #include <asm-generic/bitops/fls64.h>
21 #include <asm-generic/bitops/__ffs.h>
22 
23 /*
24  * clear_bit() doesn't provide any barrier for the compiler.
25  */
26 #define smp_mb__before_clear_bit()	barrier()
27 #define smp_mb__after_clear_bit()	barrier()
28 
29 /*
30  * Only disable interrupt for kernel mode stuff to keep usermode stuff
31  * that dares to use kernel include files alive.
32  */
33 #define __bi_flags unsigned long flags
34 #define __bi_cli() __cli()
35 #define __bi_save_flags(x) __save_flags(x)
36 #define __bi_save_and_cli(x) __save_and_cli(x)
37 #define __bi_restore_flags(x) __restore_flags(x)
38 #else
39 #define __bi_flags
40 #define __bi_cli()
41 #define __bi_save_flags(x)
42 #define __bi_save_and_cli(x)
43 #define __bi_restore_flags(x)
44 #endif /* __KERNEL__ */
45 
46 #ifdef CONFIG_CPU_HAS_LLSC
47 
48 #include <asm/mipsregs.h>
49 
50 /*
51  * These functions for MIPS ISA > 1 are interrupt and SMP proof and
52  * interrupt friendly
53  */
54 
55 /*
56  * set_bit - Atomically set a bit in memory
57  * @nr: the bit to set
58  * @addr: the address to start counting from
59  *
60  * This function is atomic and may not be reordered.  See __set_bit()
61  * if you do not require the atomic guarantees.
62  * Note that @nr may be almost arbitrarily large; this function is not
63  * restricted to acting on a single-word quantity.
64  */
65 static __inline__ void
66 set_bit(int nr, volatile void *addr)
67 {
68 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
69 	unsigned long temp;
70 
71 	__asm__ __volatile__(
72 		"1:\tll\t%0, %1\t\t# set_bit\n\t"
73 		"or\t%0, %2\n\t"
74 		"sc\t%0, %1\n\t"
75 		"beqz\t%0, 1b"
76 		: "=&r" (temp), "=m" (*m)
77 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
78 }
79 
80 /*
81  * __set_bit - Set a bit in memory
82  * @nr: the bit to set
83  * @addr: the address to start counting from
84  *
85  * Unlike set_bit(), this function is non-atomic and may be reordered.
86  * If it's called on the same region of memory simultaneously, the effect
87  * may be that only one operation succeeds.
88  */
89 static __inline__ void __set_bit(int nr, volatile void * addr)
90 {
91 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
92 
93 	*m |= 1UL << (nr & 31);
94 }
95 #define PLATFORM__SET_BIT
96 
97 /*
98  * clear_bit - Clears a bit in memory
99  * @nr: Bit to clear
100  * @addr: Address to start counting from
101  *
102  * clear_bit() is atomic and may not be reordered.  However, it does
103  * not contain a memory barrier, so if it is used for locking purposes,
104  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
105  * in order to ensure changes are visible on other processors.
106  */
107 static __inline__ void
108 clear_bit(int nr, volatile void *addr)
109 {
110 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
111 	unsigned long temp;
112 
113 	__asm__ __volatile__(
114 		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
115 		"and\t%0, %2\n\t"
116 		"sc\t%0, %1\n\t"
117 		"beqz\t%0, 1b\n\t"
118 		: "=&r" (temp), "=m" (*m)
119 		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
120 }
121 
122 /*
123  * change_bit - Toggle a bit in memory
124  * @nr: Bit to clear
125  * @addr: Address to start counting from
126  *
127  * change_bit() is atomic and may not be reordered.
128  * Note that @nr may be almost arbitrarily large; this function is not
129  * restricted to acting on a single-word quantity.
130  */
131 static __inline__ void
132 change_bit(int nr, volatile void *addr)
133 {
134 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
135 	unsigned long temp;
136 
137 	__asm__ __volatile__(
138 		"1:\tll\t%0, %1\t\t# change_bit\n\t"
139 		"xor\t%0, %2\n\t"
140 		"sc\t%0, %1\n\t"
141 		"beqz\t%0, 1b"
142 		: "=&r" (temp), "=m" (*m)
143 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
144 }
145 
146 /*
147  * __change_bit - Toggle a bit in memory
148  * @nr: the bit to set
149  * @addr: the address to start counting from
150  *
151  * Unlike change_bit(), this function is non-atomic and may be reordered.
152  * If it's called on the same region of memory simultaneously, the effect
153  * may be that only one operation succeeds.
154  */
155 static __inline__ void __change_bit(int nr, volatile void * addr)
156 {
157 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
158 
159 	*m ^= 1UL << (nr & 31);
160 }
161 
162 /*
163  * test_and_set_bit - Set a bit and return its old value
164  * @nr: Bit to set
165  * @addr: Address to count from
166  *
167  * This operation is atomic and cannot be reordered.
168  * It also implies a memory barrier.
169  */
170 static __inline__ int
171 test_and_set_bit(int nr, volatile void *addr)
172 {
173 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
174 	unsigned long temp, res;
175 
176 	__asm__ __volatile__(
177 		".set\tnoreorder\t\t# test_and_set_bit\n"
178 		"1:\tll\t%0, %1\n\t"
179 		"or\t%2, %0, %3\n\t"
180 		"sc\t%2, %1\n\t"
181 		"beqz\t%2, 1b\n\t"
182 		" and\t%2, %0, %3\n\t"
183 		".set\treorder"
184 		: "=&r" (temp), "=m" (*m), "=&r" (res)
185 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
186 		: "memory");
187 
188 	return res != 0;
189 }
190 
191 /*
192  * __test_and_set_bit - Set a bit and return its old value
193  * @nr: Bit to set
194  * @addr: Address to count from
195  *
196  * This operation is non-atomic and can be reordered.
197  * If two examples of this operation race, one can appear to succeed
198  * but actually fail.  You must protect multiple accesses with a lock.
199  */
200 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
201 {
202 	int mask, retval;
203 	volatile int *a = addr;
204 
205 	a += nr >> 5;
206 	mask = 1 << (nr & 0x1f);
207 	retval = (mask & *a) != 0;
208 	*a |= mask;
209 
210 	return retval;
211 }
212 
213 /*
214  * test_and_clear_bit - Clear a bit and return its old value
215  * @nr: Bit to set
216  * @addr: Address to count from
217  *
218  * This operation is atomic and cannot be reordered.
219  * It also implies a memory barrier.
220  */
221 static __inline__ int
222 test_and_clear_bit(int nr, volatile void *addr)
223 {
224 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
225 	unsigned long temp, res;
226 
227 	__asm__ __volatile__(
228 		".set\tnoreorder\t\t# test_and_clear_bit\n"
229 		"1:\tll\t%0, %1\n\t"
230 		"or\t%2, %0, %3\n\t"
231 		"xor\t%2, %3\n\t"
232 		"sc\t%2, %1\n\t"
233 		"beqz\t%2, 1b\n\t"
234 		" and\t%2, %0, %3\n\t"
235 		".set\treorder"
236 		: "=&r" (temp), "=m" (*m), "=&r" (res)
237 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
238 		: "memory");
239 
240 	return res != 0;
241 }
242 
243 /*
244  * __test_and_clear_bit - Clear a bit and return its old value
245  * @nr: Bit to set
246  * @addr: Address to count from
247  *
248  * This operation is non-atomic and can be reordered.
249  * If two examples of this operation race, one can appear to succeed
250  * but actually fail.  You must protect multiple accesses with a lock.
251  */
252 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
253 {
254 	int	mask, retval;
255 	volatile int	*a = addr;
256 
257 	a += nr >> 5;
258 	mask = 1 << (nr & 0x1f);
259 	retval = (mask & *a) != 0;
260 	*a &= ~mask;
261 
262 	return retval;
263 }
264 
265 /*
266  * test_and_change_bit - Change a bit and return its new value
267  * @nr: Bit to set
268  * @addr: Address to count from
269  *
270  * This operation is atomic and cannot be reordered.
271  * It also implies a memory barrier.
272  */
273 static __inline__ int
274 test_and_change_bit(int nr, volatile void *addr)
275 {
276 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
277 	unsigned long temp, res;
278 
279 	__asm__ __volatile__(
280 		".set\tnoreorder\t\t# test_and_change_bit\n"
281 		"1:\tll\t%0, %1\n\t"
282 		"xor\t%2, %0, %3\n\t"
283 		"sc\t%2, %1\n\t"
284 		"beqz\t%2, 1b\n\t"
285 		" and\t%2, %0, %3\n\t"
286 		".set\treorder"
287 		: "=&r" (temp), "=m" (*m), "=&r" (res)
288 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
289 		: "memory");
290 
291 	return res != 0;
292 }
293 
294 /*
295  * __test_and_change_bit - Change a bit and return its old value
296  * @nr: Bit to set
297  * @addr: Address to count from
298  *
299  * This operation is non-atomic and can be reordered.
300  * If two examples of this operation race, one can appear to succeed
301  * but actually fail.  You must protect multiple accesses with a lock.
302  */
303 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
304 {
305 	int	mask, retval;
306 	volatile int	*a = addr;
307 
308 	a += nr >> 5;
309 	mask = 1 << (nr & 0x1f);
310 	retval = (mask & *a) != 0;
311 	*a ^= mask;
312 
313 	return retval;
314 }
315 
316 #else /* MIPS I */
317 
318 /*
319  * set_bit - Atomically set a bit in memory
320  * @nr: the bit to set
321  * @addr: the address to start counting from
322  *
323  * This function is atomic and may not be reordered.  See __set_bit()
324  * if you do not require the atomic guarantees.
325  * Note that @nr may be almost arbitrarily large; this function is not
326  * restricted to acting on a single-word quantity.
327  */
328 static __inline__ void set_bit(int nr, volatile void * addr)
329 {
330 	int	mask;
331 	volatile int	*a = addr;
332 	__bi_flags;
333 
334 	a += nr >> 5;
335 	mask = 1 << (nr & 0x1f);
336 	__bi_save_and_cli(flags);
337 	*a |= mask;
338 	__bi_restore_flags(flags);
339 }
340 
341 /*
342  * __set_bit - Set a bit in memory
343  * @nr: the bit to set
344  * @addr: the address to start counting from
345  *
346  * Unlike set_bit(), this function is non-atomic and may be reordered.
347  * If it's called on the same region of memory simultaneously, the effect
348  * may be that only one operation succeeds.
349  */
350 static __inline__ void __set_bit(int nr, volatile void * addr)
351 {
352 	int	mask;
353 	volatile int	*a = addr;
354 
355 	a += nr >> 5;
356 	mask = 1 << (nr & 0x1f);
357 	*a |= mask;
358 }
359 
360 /*
361  * clear_bit - Clears a bit in memory
362  * @nr: Bit to clear
363  * @addr: Address to start counting from
364  *
365  * clear_bit() is atomic and may not be reordered.  However, it does
366  * not contain a memory barrier, so if it is used for locking purposes,
367  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
368  * in order to ensure changes are visible on other processors.
369  */
370 static __inline__ void clear_bit(int nr, volatile void * addr)
371 {
372 	int	mask;
373 	volatile int	*a = addr;
374 	__bi_flags;
375 
376 	a += nr >> 5;
377 	mask = 1 << (nr & 0x1f);
378 	__bi_save_and_cli(flags);
379 	*a &= ~mask;
380 	__bi_restore_flags(flags);
381 }
382 
383 /*
384  * change_bit - Toggle a bit in memory
385  * @nr: Bit to clear
386  * @addr: Address to start counting from
387  *
388  * change_bit() is atomic and may not be reordered.
389  * Note that @nr may be almost arbitrarily large; this function is not
390  * restricted to acting on a single-word quantity.
391  */
392 static __inline__ void change_bit(int nr, volatile void * addr)
393 {
394 	int	mask;
395 	volatile int	*a = addr;
396 	__bi_flags;
397 
398 	a += nr >> 5;
399 	mask = 1 << (nr & 0x1f);
400 	__bi_save_and_cli(flags);
401 	*a ^= mask;
402 	__bi_restore_flags(flags);
403 }
404 
405 /*
406  * __change_bit - Toggle a bit in memory
407  * @nr: the bit to set
408  * @addr: the address to start counting from
409  *
410  * Unlike change_bit(), this function is non-atomic and may be reordered.
411  * If it's called on the same region of memory simultaneously, the effect
412  * may be that only one operation succeeds.
413  */
414 static __inline__ void __change_bit(int nr, volatile void * addr)
415 {
416 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
417 
418 	*m ^= 1UL << (nr & 31);
419 }
420 
421 /*
422  * test_and_set_bit - Set a bit and return its old value
423  * @nr: Bit to set
424  * @addr: Address to count from
425  *
426  * This operation is atomic and cannot be reordered.
427  * It also implies a memory barrier.
428  */
429 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
430 {
431 	int	mask, retval;
432 	volatile int	*a = addr;
433 	__bi_flags;
434 
435 	a += nr >> 5;
436 	mask = 1 << (nr & 0x1f);
437 	__bi_save_and_cli(flags);
438 	retval = (mask & *a) != 0;
439 	*a |= mask;
440 	__bi_restore_flags(flags);
441 
442 	return retval;
443 }
444 
445 /*
446  * __test_and_set_bit - Set a bit and return its old value
447  * @nr: Bit to set
448  * @addr: Address to count from
449  *
450  * This operation is non-atomic and can be reordered.
451  * If two examples of this operation race, one can appear to succeed
452  * but actually fail.  You must protect multiple accesses with a lock.
453  */
454 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
455 {
456 	int	mask, retval;
457 	volatile int	*a = addr;
458 
459 	a += nr >> 5;
460 	mask = 1 << (nr & 0x1f);
461 	retval = (mask & *a) != 0;
462 	*a |= mask;
463 
464 	return retval;
465 }
466 
467 /*
468  * test_and_clear_bit - Clear a bit and return its old value
469  * @nr: Bit to set
470  * @addr: Address to count from
471  *
472  * This operation is atomic and cannot be reordered.
473  * It also implies a memory barrier.
474  */
475 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
476 {
477 	int	mask, retval;
478 	volatile int	*a = addr;
479 	__bi_flags;
480 
481 	a += nr >> 5;
482 	mask = 1 << (nr & 0x1f);
483 	__bi_save_and_cli(flags);
484 	retval = (mask & *a) != 0;
485 	*a &= ~mask;
486 	__bi_restore_flags(flags);
487 
488 	return retval;
489 }
490 
491 /*
492  * __test_and_clear_bit - Clear a bit and return its old value
493  * @nr: Bit to set
494  * @addr: Address to count from
495  *
496  * This operation is non-atomic and can be reordered.
497  * If two examples of this operation race, one can appear to succeed
498  * but actually fail.  You must protect multiple accesses with a lock.
499  */
500 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
501 {
502 	int	mask, retval;
503 	volatile int	*a = addr;
504 
505 	a += nr >> 5;
506 	mask = 1 << (nr & 0x1f);
507 	retval = (mask & *a) != 0;
508 	*a &= ~mask;
509 
510 	return retval;
511 }
512 
513 /*
514  * test_and_change_bit - Change a bit and return its new value
515  * @nr: Bit to set
516  * @addr: Address to count from
517  *
518  * This operation is atomic and cannot be reordered.
519  * It also implies a memory barrier.
520  */
521 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
522 {
523 	int	mask, retval;
524 	volatile int	*a = addr;
525 	__bi_flags;
526 
527 	a += nr >> 5;
528 	mask = 1 << (nr & 0x1f);
529 	__bi_save_and_cli(flags);
530 	retval = (mask & *a) != 0;
531 	*a ^= mask;
532 	__bi_restore_flags(flags);
533 
534 	return retval;
535 }
536 
537 /*
538  * __test_and_change_bit - Change a bit and return its old value
539  * @nr: Bit to set
540  * @addr: Address to count from
541  *
542  * This operation is non-atomic and can be reordered.
543  * If two examples of this operation race, one can appear to succeed
544  * but actually fail.  You must protect multiple accesses with a lock.
545  */
546 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
547 {
548 	int	mask, retval;
549 	volatile int	*a = addr;
550 
551 	a += nr >> 5;
552 	mask = 1 << (nr & 0x1f);
553 	retval = (mask & *a) != 0;
554 	*a ^= mask;
555 
556 	return retval;
557 }
558 
559 #undef __bi_flags
560 #undef __bi_cli
561 #undef __bi_save_flags
562 #undef __bi_restore_flags
563 
564 #endif /* MIPS I */
565 
566 /*
567  * test_bit - Determine whether a bit is set
568  * @nr: bit number to test
569  * @addr: Address to start counting from
570  */
571 static __inline__ int test_bit(int nr, const volatile void *addr)
572 {
573 	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
574 }
575 
576 #ifndef __MIPSEB__
577 
578 /* Little endian versions. */
579 
580 /*
581  * find_first_zero_bit - find the first zero bit in a memory region
582  * @addr: The address to start the search at
583  * @size: The maximum size to search
584  *
585  * Returns the bit-number of the first zero bit, not the number of the byte
586  * containing a bit.
587  */
588 static __inline__ int find_first_zero_bit (void *addr, unsigned size)
589 {
590 	unsigned long dummy;
591 	int res;
592 
593 	if (!size)
594 		return 0;
595 
596 	__asm__ (".set\tnoreorder\n\t"
597 		".set\tnoat\n"
598 		"1:\tsubu\t$1,%6,%0\n\t"
599 		"blez\t$1,2f\n\t"
600 		"lw\t$1,(%5)\n\t"
601 		"addiu\t%5,4\n\t"
602 #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
603     (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
604     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
605 		"beql\t%1,$1,1b\n\t"
606 		"addiu\t%0,32\n\t"
607 #else
608 		"addiu\t%0,32\n\t"
609 		"beq\t%1,$1,1b\n\t"
610 		"nop\n\t"
611 		"subu\t%0,32\n\t"
612 #endif
613 #ifdef __MIPSEB__
614 #error "Fix this for big endian"
615 #endif /* __MIPSEB__ */
616 		"li\t%1,1\n"
617 		"1:\tand\t%2,$1,%1\n\t"
618 		"beqz\t%2,2f\n\t"
619 		"sll\t%1,%1,1\n\t"
620 		"bnez\t%1,1b\n\t"
621 		"add\t%0,%0,1\n\t"
622 		".set\tat\n\t"
623 		".set\treorder\n"
624 		"2:"
625 		: "=r" (res), "=r" (dummy), "=r" (addr)
626 		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
627 		  "2" (addr), "r" (size)
628 		: "$1");
629 
630 	return res;
631 }
632 
633 /*
634  * find_next_zero_bit - find the first zero bit in a memory region
635  * @addr: The address to base the search on
636  * @offset: The bitnumber to start searching at
637  * @size: The maximum size to search
638  */
639 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
640 {
641 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
642 	int set = 0, bit = offset & 31, res;
643 	unsigned long dummy;
644 
645 	if (bit) {
646 		/*
647 		 * Look for zero in first byte
648 		 */
649 #ifdef __MIPSEB__
650 #error "Fix this for big endian byte order"
651 #endif
652 		__asm__(".set\tnoreorder\n\t"
653 			".set\tnoat\n"
654 			"1:\tand\t$1,%4,%1\n\t"
655 			"beqz\t$1,1f\n\t"
656 			"sll\t%1,%1,1\n\t"
657 			"bnez\t%1,1b\n\t"
658 			"addiu\t%0,1\n\t"
659 			".set\tat\n\t"
660 			".set\treorder\n"
661 			"1:"
662 			: "=r" (set), "=r" (dummy)
663 			: "0" (0), "1" (1 << bit), "r" (*p)
664 			: "$1");
665 		if (set < (32 - bit))
666 			return set + offset;
667 		set = 32 - bit;
668 		p++;
669 	}
670 	/*
671 	 * No zero yet, search remaining full bytes for a zero
672 	 */
673 	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
674 	return offset + set + res;
675 }
676 
677 #endif /* !(__MIPSEB__) */
678 
679 /*
680  * ffz - find first zero in word.
681  * @word: The word to search
682  *
683  * Undefined if no zero exists, so code should check against ~0UL first.
684  */
685 static __inline__ unsigned long ffz(unsigned long word)
686 {
687 	unsigned int	__res;
688 	unsigned int	mask = 1;
689 
690 	__asm__ (
691 		".set\tnoreorder\n\t"
692 		".set\tnoat\n\t"
693 		"move\t%0,$0\n"
694 		"1:\tand\t$1,%2,%1\n\t"
695 		"beqz\t$1,2f\n\t"
696 		"sll\t%1,1\n\t"
697 		"bnez\t%1,1b\n\t"
698 		"addiu\t%0,1\n\t"
699 		".set\tat\n\t"
700 		".set\treorder\n"
701 		"2:\n\t"
702 		: "=&r" (__res), "=r" (mask)
703 		: "r" (word), "1" (mask)
704 		: "$1");
705 
706 	return __res;
707 }
708 
709 #ifdef __KERNEL__
710 
711 /*
712  * hweightN - returns the hamming weight of a N-bit word
713  * @x: the word to weigh
714  *
715  * The Hamming Weight of a number is the total number of bits set in it.
716  */
717 
718 #define hweight32(x) generic_hweight32(x)
719 #define hweight16(x) generic_hweight16(x)
720 #define hweight8(x) generic_hweight8(x)
721 
722 #endif /* __KERNEL__ */
723 
724 #ifdef __MIPSEB__
725 /*
726  * find_next_zero_bit - find the first zero bit in a memory region
727  * @addr: The address to base the search on
728  * @offset: The bitnumber to start searching at
729  * @size: The maximum size to search
730  */
731 static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
732 {
733 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
734 	unsigned long result = offset & ~31UL;
735 	unsigned long tmp;
736 
737 	if (offset >= size)
738 		return size;
739 	size -= result;
740 	offset &= 31UL;
741 	if (offset) {
742 		tmp = *(p++);
743 		tmp |= ~0UL >> (32-offset);
744 		if (size < 32)
745 			goto found_first;
746 		if (~tmp)
747 			goto found_middle;
748 		size -= 32;
749 		result += 32;
750 	}
751 	while (size & ~31UL) {
752 		if (~(tmp = *(p++)))
753 			goto found_middle;
754 		result += 32;
755 		size -= 32;
756 	}
757 	if (!size)
758 		return result;
759 	tmp = *p;
760 
761 found_first:
762 	tmp |= ~0UL << size;
763 found_middle:
764 	return result + ffz(tmp);
765 }
766 
767 /* Linus sez that gcc can optimize the following correctly, we'll see if this
768  * holds on the Sparc as it does for the ALPHA.
769  */
770 
771 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
772 /*
773  * find_first_zero_bit - find the first zero bit in a memory region
774  * @addr: The address to start the search at
775  * @size: The maximum size to search
776  *
777  * Returns the bit-number of the first zero bit, not the number of the byte
778  * containing a bit.
779  */
780 static int find_first_zero_bit (void *addr, unsigned size);
781 #endif
782 
783 #define find_first_zero_bit(addr, size) \
784 	find_next_zero_bit((addr), (size), 0)
785 
786 #endif /* (__MIPSEB__) */
787 
788 /* Now for the ext2 filesystem bit operations and helper routines. */
789 
790 #ifdef __MIPSEB__
791 static __inline__ int ext2_set_bit(int nr, void * addr)
792 {
793 	int		mask, retval, flags;
794 	unsigned char	*ADDR = (unsigned char *) addr;
795 
796 	ADDR += nr >> 3;
797 	mask = 1 << (nr & 0x07);
798 	save_and_cli(flags);
799 	retval = (mask & *ADDR) != 0;
800 	*ADDR |= mask;
801 	restore_flags(flags);
802 	return retval;
803 }
804 
805 static __inline__ int ext2_clear_bit(int nr, void * addr)
806 {
807 	int		mask, retval, flags;
808 	unsigned char	*ADDR = (unsigned char *) addr;
809 
810 	ADDR += nr >> 3;
811 	mask = 1 << (nr & 0x07);
812 	save_and_cli(flags);
813 	retval = (mask & *ADDR) != 0;
814 	*ADDR &= ~mask;
815 	restore_flags(flags);
816 	return retval;
817 }
818 
819 static __inline__ int ext2_test_bit(int nr, const void * addr)
820 {
821 	int			mask;
822 	const unsigned char	*ADDR = (const unsigned char *) addr;
823 
824 	ADDR += nr >> 3;
825 	mask = 1 << (nr & 0x07);
826 	return ((mask & *ADDR) != 0);
827 }
828 
829 #define ext2_find_first_zero_bit(addr, size) \
830 	ext2_find_next_zero_bit((addr), (size), 0)
831 
832 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
833 {
834 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
835 	unsigned long result = offset & ~31UL;
836 	unsigned long tmp;
837 
838 	if (offset >= size)
839 		return size;
840 	size -= result;
841 	offset &= 31UL;
842 	if(offset) {
843 		/* We hold the little endian value in tmp, but then the
844 		 * shift is illegal. So we could keep a big endian value
845 		 * in tmp, like this:
846 		 *
847 		 * tmp = __swab32(*(p++));
848 		 * tmp |= ~0UL >> (32-offset);
849 		 *
850 		 * but this would decrease preformance, so we change the
851 		 * shift:
852 		 */
853 		tmp = *(p++);
854 		tmp |= __swab32(~0UL >> (32-offset));
855 		if(size < 32)
856 			goto found_first;
857 		if(~tmp)
858 			goto found_middle;
859 		size -= 32;
860 		result += 32;
861 	}
862 	while(size & ~31UL) {
863 		if(~(tmp = *(p++)))
864 			goto found_middle;
865 		result += 32;
866 		size -= 32;
867 	}
868 	if(!size)
869 		return result;
870 	tmp = *p;
871 
872 found_first:
873 	/* tmp is little endian, so we would have to swab the shift,
874 	 * see above. But then we have to swab tmp below for ffz, so
875 	 * we might as well do this here.
876 	 */
877 	return result + ffz(__swab32(tmp) | (~0UL << size));
878 found_middle:
879 	return result + ffz(__swab32(tmp));
880 }
881 #else /* !(__MIPSEB__) */
882 
883 /* Native ext2 byte ordering, just collapse using defines. */
884 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
885 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
886 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
887 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
888 #define ext2_find_next_zero_bit(addr, size, offset) \
889 		find_next_zero_bit((addr), (size), (offset))
890 
891 #endif /* !(__MIPSEB__) */
892 
893 /*
894  * Bitmap functions for the minix filesystem.
895  * FIXME: These assume that Minix uses the native byte/bitorder.
896  * This limits the Minix filesystem's value for data exchange very much.
897  */
898 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
899 #define minix_set_bit(nr,addr) set_bit(nr,addr)
900 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
901 #define minix_test_bit(nr,addr) test_bit(nr,addr)
902 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
903 
904 #endif /* _ASM_BITOPS_H */
905