xref: /openbmc/linux/arch/mips/include/asm/bitops.h (revision 50df3be7)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 1994 - 1997, 99, 2000, 06, 07  Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8  */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
11 
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
14 #endif
15 
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h>		/* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/llsc.h>
23 #include <asm/sgidefs.h>
24 #include <asm/war.h>
25 
26 /*
27  * These are the "slower" versions of the functions and are in bitops.c.
28  * These functions call raw_local_irq_{save,restore}().
29  */
30 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33 int __mips_test_and_set_bit(unsigned long nr,
34 			    volatile unsigned long *addr);
35 int __mips_test_and_set_bit_lock(unsigned long nr,
36 				 volatile unsigned long *addr);
37 int __mips_test_and_clear_bit(unsigned long nr,
38 			      volatile unsigned long *addr);
39 int __mips_test_and_change_bit(unsigned long nr,
40 			       volatile unsigned long *addr);
41 
42 
43 /*
44  * set_bit - Atomically set a bit in memory
45  * @nr: the bit to set
46  * @addr: the address to start counting from
47  *
48  * This function is atomic and may not be reordered.  See __set_bit()
49  * if you do not require the atomic guarantees.
50  * Note that @nr may be almost arbitrarily large; this function is not
51  * restricted to acting on a single-word quantity.
52  */
53 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
54 {
55 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 	int bit = nr & SZLONG_MASK;
57 	unsigned long temp;
58 
59 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
60 		__asm__ __volatile__(
61 		"	.set	push					\n"
62 		"	.set	arch=r4000				\n"
63 		"1:	" __LL "%0, %1			# set_bit	\n"
64 		"	or	%0, %2					\n"
65 		"	" __SC	"%0, %1					\n"
66 		"	beqzl	%0, 1b					\n"
67 		"	.set	pop					\n"
68 		: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 		: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
70 		: __LLSC_CLOBBER);
71 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
72 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
73 		loongson_llsc_mb();
74 		do {
75 			__asm__ __volatile__(
76 			"	" __LL "%0, %1		# set_bit	\n"
77 			"	" __INS "%0, %3, %2, 1			\n"
78 			"	" __SC "%0, %1				\n"
79 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
80 			: "ir" (bit), "r" (~0)
81 			: __LLSC_CLOBBER);
82 		} while (unlikely(!temp));
83 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
84 	} else if (kernel_uses_llsc) {
85 		loongson_llsc_mb();
86 		do {
87 			__asm__ __volatile__(
88 			"	.set	push				\n"
89 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
90 			"	" __LL "%0, %1		# set_bit	\n"
91 			"	or	%0, %2				\n"
92 			"	" __SC	"%0, %1				\n"
93 			"	.set	pop				\n"
94 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
95 			: "ir" (1UL << bit)
96 			: __LLSC_CLOBBER);
97 		} while (unlikely(!temp));
98 	} else
99 		__mips_set_bit(nr, addr);
100 }
101 
102 /*
103  * clear_bit - Clears a bit in memory
104  * @nr: Bit to clear
105  * @addr: Address to start counting from
106  *
107  * clear_bit() is atomic and may not be reordered.  However, it does
108  * not contain a memory barrier, so if it is used for locking purposes,
109  * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
110  * in order to ensure changes are visible on other processors.
111  */
112 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
113 {
114 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
115 	int bit = nr & SZLONG_MASK;
116 	unsigned long temp;
117 
118 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
119 		__asm__ __volatile__(
120 		"	.set	push					\n"
121 		"	.set	arch=r4000				\n"
122 		"1:	" __LL "%0, %1			# clear_bit	\n"
123 		"	and	%0, %2					\n"
124 		"	" __SC "%0, %1					\n"
125 		"	beqzl	%0, 1b					\n"
126 		"	.set	pop					\n"
127 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
128 		: "ir" (~(1UL << bit))
129 		: __LLSC_CLOBBER);
130 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
131 	} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
132 		loongson_llsc_mb();
133 		do {
134 			__asm__ __volatile__(
135 			"	" __LL "%0, %1		# clear_bit	\n"
136 			"	" __INS "%0, $0, %2, 1			\n"
137 			"	" __SC "%0, %1				\n"
138 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
139 			: "ir" (bit)
140 			: __LLSC_CLOBBER);
141 		} while (unlikely(!temp));
142 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
143 	} else if (kernel_uses_llsc) {
144 		loongson_llsc_mb();
145 		do {
146 			__asm__ __volatile__(
147 			"	.set	push				\n"
148 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
149 			"	" __LL "%0, %1		# clear_bit	\n"
150 			"	and	%0, %2				\n"
151 			"	" __SC "%0, %1				\n"
152 			"	.set	pop				\n"
153 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
154 			: "ir" (~(1UL << bit))
155 			: __LLSC_CLOBBER);
156 		} while (unlikely(!temp));
157 	} else
158 		__mips_clear_bit(nr, addr);
159 }
160 
161 /*
162  * clear_bit_unlock - Clears a bit in memory
163  * @nr: Bit to clear
164  * @addr: Address to start counting from
165  *
166  * clear_bit() is atomic and implies release semantics before the memory
167  * operation. It can be used for an unlock.
168  */
169 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
170 {
171 	smp_mb__before_atomic();
172 	clear_bit(nr, addr);
173 }
174 
175 /*
176  * change_bit - Toggle a bit in memory
177  * @nr: Bit to change
178  * @addr: Address to start counting from
179  *
180  * change_bit() is atomic and may not be reordered.
181  * Note that @nr may be almost arbitrarily large; this function is not
182  * restricted to acting on a single-word quantity.
183  */
184 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
185 {
186 	int bit = nr & SZLONG_MASK;
187 
188 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
189 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
190 		unsigned long temp;
191 
192 		__asm__ __volatile__(
193 		"	.set	push				\n"
194 		"	.set	arch=r4000			\n"
195 		"1:	" __LL "%0, %1		# change_bit	\n"
196 		"	xor	%0, %2				\n"
197 		"	" __SC	"%0, %1				\n"
198 		"	beqzl	%0, 1b				\n"
199 		"	.set	pop				\n"
200 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 		: "ir" (1UL << bit)
202 		: __LLSC_CLOBBER);
203 	} else if (kernel_uses_llsc) {
204 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
205 		unsigned long temp;
206 
207 		loongson_llsc_mb();
208 		do {
209 			__asm__ __volatile__(
210 			"	.set	push				\n"
211 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
212 			"	" __LL "%0, %1		# change_bit	\n"
213 			"	xor	%0, %2				\n"
214 			"	" __SC	"%0, %1				\n"
215 			"	.set	pop				\n"
216 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
217 			: "ir" (1UL << bit)
218 			: __LLSC_CLOBBER);
219 		} while (unlikely(!temp));
220 	} else
221 		__mips_change_bit(nr, addr);
222 }
223 
224 /*
225  * test_and_set_bit - Set a bit and return its old value
226  * @nr: Bit to set
227  * @addr: Address to count from
228  *
229  * This operation is atomic and cannot be reordered.
230  * It also implies a memory barrier.
231  */
232 static inline int test_and_set_bit(unsigned long nr,
233 	volatile unsigned long *addr)
234 {
235 	int bit = nr & SZLONG_MASK;
236 	unsigned long res;
237 
238 	smp_mb__before_llsc();
239 
240 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
241 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
242 		unsigned long temp;
243 
244 		__asm__ __volatile__(
245 		"	.set	push					\n"
246 		"	.set	arch=r4000				\n"
247 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
248 		"	or	%2, %0, %3				\n"
249 		"	" __SC	"%2, %1					\n"
250 		"	beqzl	%2, 1b					\n"
251 		"	and	%2, %0, %3				\n"
252 		"	.set	pop					\n"
253 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
254 		: "r" (1UL << bit)
255 		: __LLSC_CLOBBER);
256 	} else if (kernel_uses_llsc) {
257 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
258 		unsigned long temp;
259 
260 		loongson_llsc_mb();
261 		do {
262 			__asm__ __volatile__(
263 			"	.set	push				\n"
264 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
265 			"	" __LL "%0, %1	# test_and_set_bit	\n"
266 			"	or	%2, %0, %3			\n"
267 			"	" __SC	"%2, %1				\n"
268 			"	.set	pop				\n"
269 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
270 			: "r" (1UL << bit)
271 			: __LLSC_CLOBBER);
272 		} while (unlikely(!res));
273 
274 		res = temp & (1UL << bit);
275 	} else
276 		res = __mips_test_and_set_bit(nr, addr);
277 
278 	smp_llsc_mb();
279 
280 	return res != 0;
281 }
282 
283 /*
284  * test_and_set_bit_lock - Set a bit and return its old value
285  * @nr: Bit to set
286  * @addr: Address to count from
287  *
288  * This operation is atomic and implies acquire ordering semantics
289  * after the memory operation.
290  */
291 static inline int test_and_set_bit_lock(unsigned long nr,
292 	volatile unsigned long *addr)
293 {
294 	int bit = nr & SZLONG_MASK;
295 	unsigned long res;
296 
297 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
298 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
299 		unsigned long temp;
300 
301 		__asm__ __volatile__(
302 		"	.set	push					\n"
303 		"	.set	arch=r4000				\n"
304 		"1:	" __LL "%0, %1		# test_and_set_bit	\n"
305 		"	or	%2, %0, %3				\n"
306 		"	" __SC	"%2, %1					\n"
307 		"	beqzl	%2, 1b					\n"
308 		"	and	%2, %0, %3				\n"
309 		"	.set	pop					\n"
310 		: "=&r" (temp), "+m" (*m), "=&r" (res)
311 		: "r" (1UL << bit)
312 		: __LLSC_CLOBBER);
313 	} else if (kernel_uses_llsc) {
314 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
315 		unsigned long temp;
316 
317 		loongson_llsc_mb();
318 		do {
319 			__asm__ __volatile__(
320 			"	.set	push				\n"
321 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
322 			"	" __LL "%0, %1	# test_and_set_bit	\n"
323 			"	or	%2, %0, %3			\n"
324 			"	" __SC	"%2, %1				\n"
325 			"	.set	pop				\n"
326 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
327 			: "r" (1UL << bit)
328 			: __LLSC_CLOBBER);
329 		} while (unlikely(!res));
330 
331 		res = temp & (1UL << bit);
332 	} else
333 		res = __mips_test_and_set_bit_lock(nr, addr);
334 
335 	smp_llsc_mb();
336 
337 	return res != 0;
338 }
339 /*
340  * test_and_clear_bit - Clear a bit and return its old value
341  * @nr: Bit to clear
342  * @addr: Address to count from
343  *
344  * This operation is atomic and cannot be reordered.
345  * It also implies a memory barrier.
346  */
347 static inline int test_and_clear_bit(unsigned long nr,
348 	volatile unsigned long *addr)
349 {
350 	int bit = nr & SZLONG_MASK;
351 	unsigned long res;
352 
353 	smp_mb__before_llsc();
354 
355 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
356 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
357 		unsigned long temp;
358 
359 		__asm__ __volatile__(
360 		"	.set	push					\n"
361 		"	.set	arch=r4000				\n"
362 		"1:	" __LL	"%0, %1		# test_and_clear_bit	\n"
363 		"	or	%2, %0, %3				\n"
364 		"	xor	%2, %3					\n"
365 		"	" __SC	"%2, %1					\n"
366 		"	beqzl	%2, 1b					\n"
367 		"	and	%2, %0, %3				\n"
368 		"	.set	pop					\n"
369 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
370 		: "r" (1UL << bit)
371 		: __LLSC_CLOBBER);
372 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
373 	} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
374 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
375 		unsigned long temp;
376 
377 		loongson_llsc_mb();
378 		do {
379 			__asm__ __volatile__(
380 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
381 			"	" __EXT "%2, %0, %3, 1			\n"
382 			"	" __INS "%0, $0, %3, 1			\n"
383 			"	" __SC	"%0, %1				\n"
384 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
385 			: "ir" (bit)
386 			: __LLSC_CLOBBER);
387 		} while (unlikely(!temp));
388 #endif
389 	} else if (kernel_uses_llsc) {
390 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
391 		unsigned long temp;
392 
393 		loongson_llsc_mb();
394 		do {
395 			__asm__ __volatile__(
396 			"	.set	push				\n"
397 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
398 			"	" __LL	"%0, %1 # test_and_clear_bit	\n"
399 			"	or	%2, %0, %3			\n"
400 			"	xor	%2, %3				\n"
401 			"	" __SC	"%2, %1				\n"
402 			"	.set	pop				\n"
403 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
404 			: "r" (1UL << bit)
405 			: __LLSC_CLOBBER);
406 		} while (unlikely(!res));
407 
408 		res = temp & (1UL << bit);
409 	} else
410 		res = __mips_test_and_clear_bit(nr, addr);
411 
412 	smp_llsc_mb();
413 
414 	return res != 0;
415 }
416 
417 /*
418  * test_and_change_bit - Change a bit and return its old value
419  * @nr: Bit to change
420  * @addr: Address to count from
421  *
422  * This operation is atomic and cannot be reordered.
423  * It also implies a memory barrier.
424  */
425 static inline int test_and_change_bit(unsigned long nr,
426 	volatile unsigned long *addr)
427 {
428 	int bit = nr & SZLONG_MASK;
429 	unsigned long res;
430 
431 	smp_mb__before_llsc();
432 
433 	if (kernel_uses_llsc && R10000_LLSC_WAR) {
434 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
435 		unsigned long temp;
436 
437 		__asm__ __volatile__(
438 		"	.set	push					\n"
439 		"	.set	arch=r4000				\n"
440 		"1:	" __LL	"%0, %1		# test_and_change_bit	\n"
441 		"	xor	%2, %0, %3				\n"
442 		"	" __SC	"%2, %1					\n"
443 		"	beqzl	%2, 1b					\n"
444 		"	and	%2, %0, %3				\n"
445 		"	.set	pop					\n"
446 		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
447 		: "r" (1UL << bit)
448 		: __LLSC_CLOBBER);
449 	} else if (kernel_uses_llsc) {
450 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
451 		unsigned long temp;
452 
453 		loongson_llsc_mb();
454 		do {
455 			__asm__ __volatile__(
456 			"	.set	push				\n"
457 			"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
458 			"	" __LL	"%0, %1 # test_and_change_bit	\n"
459 			"	xor	%2, %0, %3			\n"
460 			"	" __SC	"\t%2, %1			\n"
461 			"	.set	pop				\n"
462 			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
463 			: "r" (1UL << bit)
464 			: __LLSC_CLOBBER);
465 		} while (unlikely(!res));
466 
467 		res = temp & (1UL << bit);
468 	} else
469 		res = __mips_test_and_change_bit(nr, addr);
470 
471 	smp_llsc_mb();
472 
473 	return res != 0;
474 }
475 
476 #include <asm-generic/bitops/non-atomic.h>
477 
478 /*
479  * __clear_bit_unlock - Clears a bit in memory
480  * @nr: Bit to clear
481  * @addr: Address to start counting from
482  *
483  * __clear_bit() is non-atomic and implies release semantics before the memory
484  * operation. It can be used for an unlock if no other CPUs can concurrently
485  * modify other bits in the word.
486  */
487 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
488 {
489 	smp_mb__before_llsc();
490 	__clear_bit(nr, addr);
491 	nudge_writes();
492 }
493 
494 /*
495  * Return the bit position (0..63) of the most significant 1 bit in a word
496  * Returns -1 if no 1 bit exists
497  */
498 static __always_inline unsigned long __fls(unsigned long word)
499 {
500 	int num;
501 
502 	if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
503 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
504 		__asm__(
505 		"	.set	push					\n"
506 		"	.set	"MIPS_ISA_LEVEL"			\n"
507 		"	clz	%0, %1					\n"
508 		"	.set	pop					\n"
509 		: "=r" (num)
510 		: "r" (word));
511 
512 		return 31 - num;
513 	}
514 
515 	if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
516 	    __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
517 		__asm__(
518 		"	.set	push					\n"
519 		"	.set	"MIPS_ISA_LEVEL"			\n"
520 		"	dclz	%0, %1					\n"
521 		"	.set	pop					\n"
522 		: "=r" (num)
523 		: "r" (word));
524 
525 		return 63 - num;
526 	}
527 
528 	num = BITS_PER_LONG - 1;
529 
530 #if BITS_PER_LONG == 64
531 	if (!(word & (~0ul << 32))) {
532 		num -= 32;
533 		word <<= 32;
534 	}
535 #endif
536 	if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
537 		num -= 16;
538 		word <<= 16;
539 	}
540 	if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
541 		num -= 8;
542 		word <<= 8;
543 	}
544 	if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
545 		num -= 4;
546 		word <<= 4;
547 	}
548 	if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
549 		num -= 2;
550 		word <<= 2;
551 	}
552 	if (!(word & (~0ul << (BITS_PER_LONG-1))))
553 		num -= 1;
554 	return num;
555 }
556 
557 /*
558  * __ffs - find first bit in word.
559  * @word: The word to search
560  *
561  * Returns 0..SZLONG-1
562  * Undefined if no bit exists, so code should check against 0 first.
563  */
564 static __always_inline unsigned long __ffs(unsigned long word)
565 {
566 	return __fls(word & -word);
567 }
568 
569 /*
570  * fls - find last bit set.
571  * @word: The word to search
572  *
573  * This is defined the same way as ffs.
574  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
575  */
576 static inline int fls(unsigned int x)
577 {
578 	int r;
579 
580 	if (!__builtin_constant_p(x) &&
581 	    __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
582 		__asm__(
583 		"	.set	push					\n"
584 		"	.set	"MIPS_ISA_LEVEL"			\n"
585 		"	clz	%0, %1					\n"
586 		"	.set	pop					\n"
587 		: "=r" (x)
588 		: "r" (x));
589 
590 		return 32 - x;
591 	}
592 
593 	r = 32;
594 	if (!x)
595 		return 0;
596 	if (!(x & 0xffff0000u)) {
597 		x <<= 16;
598 		r -= 16;
599 	}
600 	if (!(x & 0xff000000u)) {
601 		x <<= 8;
602 		r -= 8;
603 	}
604 	if (!(x & 0xf0000000u)) {
605 		x <<= 4;
606 		r -= 4;
607 	}
608 	if (!(x & 0xc0000000u)) {
609 		x <<= 2;
610 		r -= 2;
611 	}
612 	if (!(x & 0x80000000u)) {
613 		x <<= 1;
614 		r -= 1;
615 	}
616 	return r;
617 }
618 
619 #include <asm-generic/bitops/fls64.h>
620 
621 /*
622  * ffs - find first bit set.
623  * @word: The word to search
624  *
625  * This is defined the same way as
626  * the libc and compiler builtin ffs routines, therefore
627  * differs in spirit from the above ffz (man ffs).
628  */
629 static inline int ffs(int word)
630 {
631 	if (!word)
632 		return 0;
633 
634 	return fls(word & -word);
635 }
636 
637 #include <asm-generic/bitops/ffz.h>
638 #include <asm-generic/bitops/find.h>
639 
640 #ifdef __KERNEL__
641 
642 #include <asm-generic/bitops/sched.h>
643 
644 #include <asm/arch_hweight.h>
645 #include <asm-generic/bitops/const_hweight.h>
646 
647 #include <asm-generic/bitops/le.h>
648 #include <asm-generic/bitops/ext2-atomic.h>
649 
650 #endif /* __KERNEL__ */
651 
652 #endif /* _ASM_BITOPS_H */
653