xref: /openbmc/u-boot/arch/mips/include/asm/bitops.h (revision 5be93569)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
7  * Copyright (c) 2000  Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11 
12 #include <linux/types.h>
13 #include <asm/byteorder.h>		/* sigh ... */
14 
15 #ifdef __KERNEL__
16 
17 #include <asm/sgidefs.h>
18 #include <asm/system.h>
19 
20 #include <asm-generic/bitops/fls.h>
21 #include <asm-generic/bitops/__fls.h>
22 #include <asm-generic/bitops/fls64.h>
23 #include <asm-generic/bitops/__ffs.h>
24 
25 /*
26  * clear_bit() doesn't provide any barrier for the compiler.
27  */
28 #define smp_mb__before_clear_bit()	barrier()
29 #define smp_mb__after_clear_bit()	barrier()
30 
31 /*
32  * Only disable interrupt for kernel mode stuff to keep usermode stuff
33  * that dares to use kernel include files alive.
34  */
35 #define __bi_flags unsigned long flags
36 #define __bi_cli() __cli()
37 #define __bi_save_flags(x) __save_flags(x)
38 #define __bi_save_and_cli(x) __save_and_cli(x)
39 #define __bi_restore_flags(x) __restore_flags(x)
40 #else
41 #define __bi_flags
42 #define __bi_cli()
43 #define __bi_save_flags(x)
44 #define __bi_save_and_cli(x)
45 #define __bi_restore_flags(x)
46 #endif /* __KERNEL__ */
47 
48 #ifdef CONFIG_CPU_HAS_LLSC
49 
50 #include <asm/mipsregs.h>
51 
52 /*
53  * These functions for MIPS ISA > 1 are interrupt and SMP proof and
54  * interrupt friendly
55  */
56 
57 /*
58  * set_bit - Atomically set a bit in memory
59  * @nr: the bit to set
60  * @addr: the address to start counting from
61  *
62  * This function is atomic and may not be reordered.  See __set_bit()
63  * if you do not require the atomic guarantees.
64  * Note that @nr may be almost arbitrarily large; this function is not
65  * restricted to acting on a single-word quantity.
66  */
67 static __inline__ void
68 set_bit(int nr, volatile void *addr)
69 {
70 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
71 	unsigned long temp;
72 
73 	__asm__ __volatile__(
74 		"1:\tll\t%0, %1\t\t# set_bit\n\t"
75 		"or\t%0, %2\n\t"
76 		"sc\t%0, %1\n\t"
77 		"beqz\t%0, 1b"
78 		: "=&r" (temp), "=m" (*m)
79 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
80 }
81 
82 /*
83  * __set_bit - Set a bit in memory
84  * @nr: the bit to set
85  * @addr: the address to start counting from
86  *
87  * Unlike set_bit(), this function is non-atomic and may be reordered.
88  * If it's called on the same region of memory simultaneously, the effect
89  * may be that only one operation succeeds.
90  */
91 static __inline__ void __set_bit(int nr, volatile void * addr)
92 {
93 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
94 
95 	*m |= 1UL << (nr & 31);
96 }
97 #define PLATFORM__SET_BIT
98 
99 /*
100  * clear_bit - Clears a bit in memory
101  * @nr: Bit to clear
102  * @addr: Address to start counting from
103  *
104  * clear_bit() is atomic and may not be reordered.  However, it does
105  * not contain a memory barrier, so if it is used for locking purposes,
106  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
107  * in order to ensure changes are visible on other processors.
108  */
109 static __inline__ void
110 clear_bit(int nr, volatile void *addr)
111 {
112 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
113 	unsigned long temp;
114 
115 	__asm__ __volatile__(
116 		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
117 		"and\t%0, %2\n\t"
118 		"sc\t%0, %1\n\t"
119 		"beqz\t%0, 1b\n\t"
120 		: "=&r" (temp), "=m" (*m)
121 		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
122 }
123 
124 /*
125  * change_bit - Toggle a bit in memory
126  * @nr: Bit to clear
127  * @addr: Address to start counting from
128  *
129  * change_bit() is atomic and may not be reordered.
130  * Note that @nr may be almost arbitrarily large; this function is not
131  * restricted to acting on a single-word quantity.
132  */
133 static __inline__ void
134 change_bit(int nr, volatile void *addr)
135 {
136 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
137 	unsigned long temp;
138 
139 	__asm__ __volatile__(
140 		"1:\tll\t%0, %1\t\t# change_bit\n\t"
141 		"xor\t%0, %2\n\t"
142 		"sc\t%0, %1\n\t"
143 		"beqz\t%0, 1b"
144 		: "=&r" (temp), "=m" (*m)
145 		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
146 }
147 
148 /*
149  * __change_bit - Toggle a bit in memory
150  * @nr: the bit to set
151  * @addr: the address to start counting from
152  *
153  * Unlike change_bit(), this function is non-atomic and may be reordered.
154  * If it's called on the same region of memory simultaneously, the effect
155  * may be that only one operation succeeds.
156  */
157 static __inline__ void __change_bit(int nr, volatile void * addr)
158 {
159 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
160 
161 	*m ^= 1UL << (nr & 31);
162 }
163 
164 /*
165  * test_and_set_bit - Set a bit and return its old value
166  * @nr: Bit to set
167  * @addr: Address to count from
168  *
169  * This operation is atomic and cannot be reordered.
170  * It also implies a memory barrier.
171  */
172 static __inline__ int
173 test_and_set_bit(int nr, volatile void *addr)
174 {
175 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
176 	unsigned long temp, res;
177 
178 	__asm__ __volatile__(
179 		".set\tnoreorder\t\t# test_and_set_bit\n"
180 		"1:\tll\t%0, %1\n\t"
181 		"or\t%2, %0, %3\n\t"
182 		"sc\t%2, %1\n\t"
183 		"beqz\t%2, 1b\n\t"
184 		" and\t%2, %0, %3\n\t"
185 		".set\treorder"
186 		: "=&r" (temp), "=m" (*m), "=&r" (res)
187 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
188 		: "memory");
189 
190 	return res != 0;
191 }
192 
193 /*
194  * __test_and_set_bit - Set a bit and return its old value
195  * @nr: Bit to set
196  * @addr: Address to count from
197  *
198  * This operation is non-atomic and can be reordered.
199  * If two examples of this operation race, one can appear to succeed
200  * but actually fail.  You must protect multiple accesses with a lock.
201  */
202 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
203 {
204 	int mask, retval;
205 	volatile int *a = addr;
206 
207 	a += nr >> 5;
208 	mask = 1 << (nr & 0x1f);
209 	retval = (mask & *a) != 0;
210 	*a |= mask;
211 
212 	return retval;
213 }
214 
215 /*
216  * test_and_clear_bit - Clear a bit and return its old value
217  * @nr: Bit to set
218  * @addr: Address to count from
219  *
220  * This operation is atomic and cannot be reordered.
221  * It also implies a memory barrier.
222  */
223 static __inline__ int
224 test_and_clear_bit(int nr, volatile void *addr)
225 {
226 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
227 	unsigned long temp, res;
228 
229 	__asm__ __volatile__(
230 		".set\tnoreorder\t\t# test_and_clear_bit\n"
231 		"1:\tll\t%0, %1\n\t"
232 		"or\t%2, %0, %3\n\t"
233 		"xor\t%2, %3\n\t"
234 		"sc\t%2, %1\n\t"
235 		"beqz\t%2, 1b\n\t"
236 		" and\t%2, %0, %3\n\t"
237 		".set\treorder"
238 		: "=&r" (temp), "=m" (*m), "=&r" (res)
239 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
240 		: "memory");
241 
242 	return res != 0;
243 }
244 
245 /*
246  * __test_and_clear_bit - Clear a bit and return its old value
247  * @nr: Bit to set
248  * @addr: Address to count from
249  *
250  * This operation is non-atomic and can be reordered.
251  * If two examples of this operation race, one can appear to succeed
252  * but actually fail.  You must protect multiple accesses with a lock.
253  */
254 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
255 {
256 	int	mask, retval;
257 	volatile int	*a = addr;
258 
259 	a += nr >> 5;
260 	mask = 1 << (nr & 0x1f);
261 	retval = (mask & *a) != 0;
262 	*a &= ~mask;
263 
264 	return retval;
265 }
266 
267 /*
268  * test_and_change_bit - Change a bit and return its new value
269  * @nr: Bit to set
270  * @addr: Address to count from
271  *
272  * This operation is atomic and cannot be reordered.
273  * It also implies a memory barrier.
274  */
275 static __inline__ int
276 test_and_change_bit(int nr, volatile void *addr)
277 {
278 	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
279 	unsigned long temp, res;
280 
281 	__asm__ __volatile__(
282 		".set\tnoreorder\t\t# test_and_change_bit\n"
283 		"1:\tll\t%0, %1\n\t"
284 		"xor\t%2, %0, %3\n\t"
285 		"sc\t%2, %1\n\t"
286 		"beqz\t%2, 1b\n\t"
287 		" and\t%2, %0, %3\n\t"
288 		".set\treorder"
289 		: "=&r" (temp), "=m" (*m), "=&r" (res)
290 		: "r" (1UL << (nr & 0x1f)), "m" (*m)
291 		: "memory");
292 
293 	return res != 0;
294 }
295 
296 /*
297  * __test_and_change_bit - Change a bit and return its old value
298  * @nr: Bit to set
299  * @addr: Address to count from
300  *
301  * This operation is non-atomic and can be reordered.
302  * If two examples of this operation race, one can appear to succeed
303  * but actually fail.  You must protect multiple accesses with a lock.
304  */
305 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
306 {
307 	int	mask, retval;
308 	volatile int	*a = addr;
309 
310 	a += nr >> 5;
311 	mask = 1 << (nr & 0x1f);
312 	retval = (mask & *a) != 0;
313 	*a ^= mask;
314 
315 	return retval;
316 }
317 
318 #else /* MIPS I */
319 
320 /*
321  * set_bit - Atomically set a bit in memory
322  * @nr: the bit to set
323  * @addr: the address to start counting from
324  *
325  * This function is atomic and may not be reordered.  See __set_bit()
326  * if you do not require the atomic guarantees.
327  * Note that @nr may be almost arbitrarily large; this function is not
328  * restricted to acting on a single-word quantity.
329  */
330 static __inline__ void set_bit(int nr, volatile void * addr)
331 {
332 	int	mask;
333 	volatile int	*a = addr;
334 	__bi_flags;
335 
336 	a += nr >> 5;
337 	mask = 1 << (nr & 0x1f);
338 	__bi_save_and_cli(flags);
339 	*a |= mask;
340 	__bi_restore_flags(flags);
341 }
342 
343 /*
344  * __set_bit - Set a bit in memory
345  * @nr: the bit to set
346  * @addr: the address to start counting from
347  *
348  * Unlike set_bit(), this function is non-atomic and may be reordered.
349  * If it's called on the same region of memory simultaneously, the effect
350  * may be that only one operation succeeds.
351  */
352 static __inline__ void __set_bit(int nr, volatile void * addr)
353 {
354 	int	mask;
355 	volatile int	*a = addr;
356 
357 	a += nr >> 5;
358 	mask = 1 << (nr & 0x1f);
359 	*a |= mask;
360 }
361 
362 /*
363  * clear_bit - Clears a bit in memory
364  * @nr: Bit to clear
365  * @addr: Address to start counting from
366  *
367  * clear_bit() is atomic and may not be reordered.  However, it does
368  * not contain a memory barrier, so if it is used for locking purposes,
369  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
370  * in order to ensure changes are visible on other processors.
371  */
372 static __inline__ void clear_bit(int nr, volatile void * addr)
373 {
374 	int	mask;
375 	volatile int	*a = addr;
376 	__bi_flags;
377 
378 	a += nr >> 5;
379 	mask = 1 << (nr & 0x1f);
380 	__bi_save_and_cli(flags);
381 	*a &= ~mask;
382 	__bi_restore_flags(flags);
383 }
384 
385 /*
386  * change_bit - Toggle a bit in memory
387  * @nr: Bit to clear
388  * @addr: Address to start counting from
389  *
390  * change_bit() is atomic and may not be reordered.
391  * Note that @nr may be almost arbitrarily large; this function is not
392  * restricted to acting on a single-word quantity.
393  */
394 static __inline__ void change_bit(int nr, volatile void * addr)
395 {
396 	int	mask;
397 	volatile int	*a = addr;
398 	__bi_flags;
399 
400 	a += nr >> 5;
401 	mask = 1 << (nr & 0x1f);
402 	__bi_save_and_cli(flags);
403 	*a ^= mask;
404 	__bi_restore_flags(flags);
405 }
406 
407 /*
408  * __change_bit - Toggle a bit in memory
409  * @nr: the bit to set
410  * @addr: the address to start counting from
411  *
412  * Unlike change_bit(), this function is non-atomic and may be reordered.
413  * If it's called on the same region of memory simultaneously, the effect
414  * may be that only one operation succeeds.
415  */
416 static __inline__ void __change_bit(int nr, volatile void * addr)
417 {
418 	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
419 
420 	*m ^= 1UL << (nr & 31);
421 }
422 
423 /*
424  * test_and_set_bit - Set a bit and return its old value
425  * @nr: Bit to set
426  * @addr: Address to count from
427  *
428  * This operation is atomic and cannot be reordered.
429  * It also implies a memory barrier.
430  */
431 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
432 {
433 	int	mask, retval;
434 	volatile int	*a = addr;
435 	__bi_flags;
436 
437 	a += nr >> 5;
438 	mask = 1 << (nr & 0x1f);
439 	__bi_save_and_cli(flags);
440 	retval = (mask & *a) != 0;
441 	*a |= mask;
442 	__bi_restore_flags(flags);
443 
444 	return retval;
445 }
446 
447 /*
448  * __test_and_set_bit - Set a bit and return its old value
449  * @nr: Bit to set
450  * @addr: Address to count from
451  *
452  * This operation is non-atomic and can be reordered.
453  * If two examples of this operation race, one can appear to succeed
454  * but actually fail.  You must protect multiple accesses with a lock.
455  */
456 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
457 {
458 	int	mask, retval;
459 	volatile int	*a = addr;
460 
461 	a += nr >> 5;
462 	mask = 1 << (nr & 0x1f);
463 	retval = (mask & *a) != 0;
464 	*a |= mask;
465 
466 	return retval;
467 }
468 
469 /*
470  * test_and_clear_bit - Clear a bit and return its old value
471  * @nr: Bit to set
472  * @addr: Address to count from
473  *
474  * This operation is atomic and cannot be reordered.
475  * It also implies a memory barrier.
476  */
477 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
478 {
479 	int	mask, retval;
480 	volatile int	*a = addr;
481 	__bi_flags;
482 
483 	a += nr >> 5;
484 	mask = 1 << (nr & 0x1f);
485 	__bi_save_and_cli(flags);
486 	retval = (mask & *a) != 0;
487 	*a &= ~mask;
488 	__bi_restore_flags(flags);
489 
490 	return retval;
491 }
492 
493 /*
494  * __test_and_clear_bit - Clear a bit and return its old value
495  * @nr: Bit to set
496  * @addr: Address to count from
497  *
498  * This operation is non-atomic and can be reordered.
499  * If two examples of this operation race, one can appear to succeed
500  * but actually fail.  You must protect multiple accesses with a lock.
501  */
502 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
503 {
504 	int	mask, retval;
505 	volatile int	*a = addr;
506 
507 	a += nr >> 5;
508 	mask = 1 << (nr & 0x1f);
509 	retval = (mask & *a) != 0;
510 	*a &= ~mask;
511 
512 	return retval;
513 }
514 
515 /*
516  * test_and_change_bit - Change a bit and return its new value
517  * @nr: Bit to set
518  * @addr: Address to count from
519  *
520  * This operation is atomic and cannot be reordered.
521  * It also implies a memory barrier.
522  */
523 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
524 {
525 	int	mask, retval;
526 	volatile int	*a = addr;
527 	__bi_flags;
528 
529 	a += nr >> 5;
530 	mask = 1 << (nr & 0x1f);
531 	__bi_save_and_cli(flags);
532 	retval = (mask & *a) != 0;
533 	*a ^= mask;
534 	__bi_restore_flags(flags);
535 
536 	return retval;
537 }
538 
539 /*
540  * __test_and_change_bit - Change a bit and return its old value
541  * @nr: Bit to set
542  * @addr: Address to count from
543  *
544  * This operation is non-atomic and can be reordered.
545  * If two examples of this operation race, one can appear to succeed
546  * but actually fail.  You must protect multiple accesses with a lock.
547  */
548 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
549 {
550 	int	mask, retval;
551 	volatile int	*a = addr;
552 
553 	a += nr >> 5;
554 	mask = 1 << (nr & 0x1f);
555 	retval = (mask & *a) != 0;
556 	*a ^= mask;
557 
558 	return retval;
559 }
560 
561 #undef __bi_flags
562 #undef __bi_cli
563 #undef __bi_save_flags
564 #undef __bi_restore_flags
565 
566 #endif /* MIPS I */
567 
568 /*
569  * test_bit - Determine whether a bit is set
570  * @nr: bit number to test
571  * @addr: Address to start counting from
572  */
573 static __inline__ int test_bit(int nr, const volatile void *addr)
574 {
575 	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
576 }
577 
578 #ifndef __MIPSEB__
579 
580 /* Little endian versions. */
581 
582 /*
583  * find_first_zero_bit - find the first zero bit in a memory region
584  * @addr: The address to start the search at
585  * @size: The maximum size to search
586  *
587  * Returns the bit-number of the first zero bit, not the number of the byte
588  * containing a bit.
589  */
590 static __inline__ int find_first_zero_bit (void *addr, unsigned size)
591 {
592 	unsigned long dummy;
593 	int res;
594 
595 	if (!size)
596 		return 0;
597 
598 	__asm__ (".set\tnoreorder\n\t"
599 		".set\tnoat\n"
600 		"1:\tsubu\t$1,%6,%0\n\t"
601 		"blez\t$1,2f\n\t"
602 		"lw\t$1,(%5)\n\t"
603 		"addiu\t%5,4\n\t"
604 #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
605     (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
606     (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
607 		"beql\t%1,$1,1b\n\t"
608 		"addiu\t%0,32\n\t"
609 #else
610 		"addiu\t%0,32\n\t"
611 		"beq\t%1,$1,1b\n\t"
612 		"nop\n\t"
613 		"subu\t%0,32\n\t"
614 #endif
615 #ifdef __MIPSEB__
616 #error "Fix this for big endian"
617 #endif /* __MIPSEB__ */
618 		"li\t%1,1\n"
619 		"1:\tand\t%2,$1,%1\n\t"
620 		"beqz\t%2,2f\n\t"
621 		"sll\t%1,%1,1\n\t"
622 		"bnez\t%1,1b\n\t"
623 		"add\t%0,%0,1\n\t"
624 		".set\tat\n\t"
625 		".set\treorder\n"
626 		"2:"
627 		: "=r" (res), "=r" (dummy), "=r" (addr)
628 		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
629 		  "2" (addr), "r" (size)
630 		: "$1");
631 
632 	return res;
633 }
634 
635 /*
636  * find_next_zero_bit - find the first zero bit in a memory region
637  * @addr: The address to base the search on
638  * @offset: The bitnumber to start searching at
639  * @size: The maximum size to search
640  */
641 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
642 {
643 	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
644 	int set = 0, bit = offset & 31, res;
645 	unsigned long dummy;
646 
647 	if (bit) {
648 		/*
649 		 * Look for zero in first byte
650 		 */
651 #ifdef __MIPSEB__
652 #error "Fix this for big endian byte order"
653 #endif
654 		__asm__(".set\tnoreorder\n\t"
655 			".set\tnoat\n"
656 			"1:\tand\t$1,%4,%1\n\t"
657 			"beqz\t$1,1f\n\t"
658 			"sll\t%1,%1,1\n\t"
659 			"bnez\t%1,1b\n\t"
660 			"addiu\t%0,1\n\t"
661 			".set\tat\n\t"
662 			".set\treorder\n"
663 			"1:"
664 			: "=r" (set), "=r" (dummy)
665 			: "0" (0), "1" (1 << bit), "r" (*p)
666 			: "$1");
667 		if (set < (32 - bit))
668 			return set + offset;
669 		set = 32 - bit;
670 		p++;
671 	}
672 	/*
673 	 * No zero yet, search remaining full bytes for a zero
674 	 */
675 	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
676 	return offset + set + res;
677 }
678 
679 #endif /* !(__MIPSEB__) */
680 
681 /*
682  * ffz - find first zero in word.
683  * @word: The word to search
684  *
685  * Undefined if no zero exists, so code should check against ~0UL first.
686  */
687 static __inline__ unsigned long ffz(unsigned long word)
688 {
689 	unsigned int	__res;
690 	unsigned int	mask = 1;
691 
692 	__asm__ (
693 		".set\tnoreorder\n\t"
694 		".set\tnoat\n\t"
695 		"move\t%0,$0\n"
696 		"1:\tand\t$1,%2,%1\n\t"
697 		"beqz\t$1,2f\n\t"
698 		"sll\t%1,1\n\t"
699 		"bnez\t%1,1b\n\t"
700 		"addiu\t%0,1\n\t"
701 		".set\tat\n\t"
702 		".set\treorder\n"
703 		"2:\n\t"
704 		: "=&r" (__res), "=r" (mask)
705 		: "r" (word), "1" (mask)
706 		: "$1");
707 
708 	return __res;
709 }
710 
711 #ifdef __KERNEL__
712 
713 /*
714  * hweightN - returns the hamming weight of a N-bit word
715  * @x: the word to weigh
716  *
717  * The Hamming Weight of a number is the total number of bits set in it.
718  */
719 
720 #define hweight32(x) generic_hweight32(x)
721 #define hweight16(x) generic_hweight16(x)
722 #define hweight8(x) generic_hweight8(x)
723 
724 #endif /* __KERNEL__ */
725 
726 #ifdef __MIPSEB__
727 /*
728  * find_next_zero_bit - find the first zero bit in a memory region
729  * @addr: The address to base the search on
730  * @offset: The bitnumber to start searching at
731  * @size: The maximum size to search
732  */
733 static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
734 {
735 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
736 	unsigned long result = offset & ~31UL;
737 	unsigned long tmp;
738 
739 	if (offset >= size)
740 		return size;
741 	size -= result;
742 	offset &= 31UL;
743 	if (offset) {
744 		tmp = *(p++);
745 		tmp |= ~0UL >> (32-offset);
746 		if (size < 32)
747 			goto found_first;
748 		if (~tmp)
749 			goto found_middle;
750 		size -= 32;
751 		result += 32;
752 	}
753 	while (size & ~31UL) {
754 		if (~(tmp = *(p++)))
755 			goto found_middle;
756 		result += 32;
757 		size -= 32;
758 	}
759 	if (!size)
760 		return result;
761 	tmp = *p;
762 
763 found_first:
764 	tmp |= ~0UL << size;
765 found_middle:
766 	return result + ffz(tmp);
767 }
768 
769 /* Linus sez that gcc can optimize the following correctly, we'll see if this
770  * holds on the Sparc as it does for the ALPHA.
771  */
772 
773 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
774 /*
775  * find_first_zero_bit - find the first zero bit in a memory region
776  * @addr: The address to start the search at
777  * @size: The maximum size to search
778  *
779  * Returns the bit-number of the first zero bit, not the number of the byte
780  * containing a bit.
781  */
782 static int find_first_zero_bit (void *addr, unsigned size);
783 #endif
784 
785 #define find_first_zero_bit(addr, size) \
786 	find_next_zero_bit((addr), (size), 0)
787 
788 #endif /* (__MIPSEB__) */
789 
790 /* Now for the ext2 filesystem bit operations and helper routines. */
791 
792 #ifdef __MIPSEB__
793 static __inline__ int ext2_set_bit(int nr, void * addr)
794 {
795 	int		mask, retval, flags;
796 	unsigned char	*ADDR = (unsigned char *) addr;
797 
798 	ADDR += nr >> 3;
799 	mask = 1 << (nr & 0x07);
800 	save_and_cli(flags);
801 	retval = (mask & *ADDR) != 0;
802 	*ADDR |= mask;
803 	restore_flags(flags);
804 	return retval;
805 }
806 
807 static __inline__ int ext2_clear_bit(int nr, void * addr)
808 {
809 	int		mask, retval, flags;
810 	unsigned char	*ADDR = (unsigned char *) addr;
811 
812 	ADDR += nr >> 3;
813 	mask = 1 << (nr & 0x07);
814 	save_and_cli(flags);
815 	retval = (mask & *ADDR) != 0;
816 	*ADDR &= ~mask;
817 	restore_flags(flags);
818 	return retval;
819 }
820 
821 static __inline__ int ext2_test_bit(int nr, const void * addr)
822 {
823 	int			mask;
824 	const unsigned char	*ADDR = (const unsigned char *) addr;
825 
826 	ADDR += nr >> 3;
827 	mask = 1 << (nr & 0x07);
828 	return ((mask & *ADDR) != 0);
829 }
830 
831 #define ext2_find_first_zero_bit(addr, size) \
832 	ext2_find_next_zero_bit((addr), (size), 0)
833 
834 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
835 {
836 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
837 	unsigned long result = offset & ~31UL;
838 	unsigned long tmp;
839 
840 	if (offset >= size)
841 		return size;
842 	size -= result;
843 	offset &= 31UL;
844 	if(offset) {
845 		/* We hold the little endian value in tmp, but then the
846 		 * shift is illegal. So we could keep a big endian value
847 		 * in tmp, like this:
848 		 *
849 		 * tmp = __swab32(*(p++));
850 		 * tmp |= ~0UL >> (32-offset);
851 		 *
852 		 * but this would decrease preformance, so we change the
853 		 * shift:
854 		 */
855 		tmp = *(p++);
856 		tmp |= __swab32(~0UL >> (32-offset));
857 		if(size < 32)
858 			goto found_first;
859 		if(~tmp)
860 			goto found_middle;
861 		size -= 32;
862 		result += 32;
863 	}
864 	while(size & ~31UL) {
865 		if(~(tmp = *(p++)))
866 			goto found_middle;
867 		result += 32;
868 		size -= 32;
869 	}
870 	if(!size)
871 		return result;
872 	tmp = *p;
873 
874 found_first:
875 	/* tmp is little endian, so we would have to swab the shift,
876 	 * see above. But then we have to swab tmp below for ffz, so
877 	 * we might as well do this here.
878 	 */
879 	return result + ffz(__swab32(tmp) | (~0UL << size));
880 found_middle:
881 	return result + ffz(__swab32(tmp));
882 }
883 #else /* !(__MIPSEB__) */
884 
885 /* Native ext2 byte ordering, just collapse using defines. */
886 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
887 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
888 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
889 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
890 #define ext2_find_next_zero_bit(addr, size, offset) \
891 		find_next_zero_bit((addr), (size), (offset))
892 
893 #endif /* !(__MIPSEB__) */
894 
895 /*
896  * Bitmap functions for the minix filesystem.
897  * FIXME: These assume that Minix uses the native byte/bitorder.
898  * This limits the Minix filesystem's value for data exchange very much.
899  */
900 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
901 #define minix_set_bit(nr,addr) set_bit(nr,addr)
902 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
903 #define minix_test_bit(nr,addr) test_bit(nr,addr)
904 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
905 
906 #endif /* _ASM_BITOPS_H */
907