xref: /openbmc/linux/arch/arc/include/asm/bitops.h (revision 80f420842ff42ad61f84584716d74ef635f13892)
114e968baSVineet Gupta /*
214e968baSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
314e968baSVineet Gupta  *
414e968baSVineet Gupta  * This program is free software; you can redistribute it and/or modify
514e968baSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
614e968baSVineet Gupta  * published by the Free Software Foundation.
714e968baSVineet Gupta  */
814e968baSVineet Gupta 
914e968baSVineet Gupta #ifndef _ASM_BITOPS_H
1014e968baSVineet Gupta #define _ASM_BITOPS_H
1114e968baSVineet Gupta 
1214e968baSVineet Gupta #ifndef _LINUX_BITOPS_H
1314e968baSVineet Gupta #error only <linux/bitops.h> can be included directly
1414e968baSVineet Gupta #endif
1514e968baSVineet Gupta 
1614e968baSVineet Gupta #ifndef __ASSEMBLY__
1714e968baSVineet Gupta 
1814e968baSVineet Gupta #include <linux/types.h>
1914e968baSVineet Gupta #include <linux/compiler.h>
20d594ffa9SPeter Zijlstra #include <asm/barrier.h>
2104e2eee4SVineet Gupta #ifndef CONFIG_ARC_HAS_LLSC
2204e2eee4SVineet Gupta #include <asm/smp.h>
2304e2eee4SVineet Gupta #endif
2414e968baSVineet Gupta 
2514e968baSVineet Gupta #if defined(CONFIG_ARC_HAS_LLSC)
2614e968baSVineet Gupta 
27de60c1a1SVineet Gupta /*
2804e2eee4SVineet Gupta  * Hardware assisted Atomic-R-M-W
29de60c1a1SVineet Gupta  */
3014e968baSVineet Gupta 
3104e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
3204e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
3304e2eee4SVineet Gupta {									\
3404e2eee4SVineet Gupta 	unsigned int temp;						\
3504e2eee4SVineet Gupta 									\
3604e2eee4SVineet Gupta 	m += nr >> 5;							\
3704e2eee4SVineet Gupta 									\
3804e2eee4SVineet Gupta 	/*								\
3904e2eee4SVineet Gupta 	 * ARC ISA micro-optimization:					\
4004e2eee4SVineet Gupta 	 *								\
4104e2eee4SVineet Gupta 	 * Instructions dealing with bitpos only consider lower 5 bits	\
4204e2eee4SVineet Gupta 	 * e.g (x << 33) is handled like (x << 1) by ASL instruction	\
4304e2eee4SVineet Gupta 	 *  (mem pointer still needs adjustment to point to next word)	\
4404e2eee4SVineet Gupta 	 *								\
4504e2eee4SVineet Gupta 	 * Hence the masking to clamp @nr arg can be elided in general.	\
4604e2eee4SVineet Gupta 	 *								\
4704e2eee4SVineet Gupta 	 * However if @nr is a constant (above assumed in a register),	\
4804e2eee4SVineet Gupta 	 * and greater than 31, gcc can optimize away (x << 33) to 0,	\
4904e2eee4SVineet Gupta 	 * as overflow, given the 32-bit ISA. Thus masking needs to be	\
5004e2eee4SVineet Gupta 	 * done for const @nr, but no code is generated due to gcc	\
5104e2eee4SVineet Gupta 	 * const prop.							\
5204e2eee4SVineet Gupta 	 */								\
5304e2eee4SVineet Gupta 	nr &= 0x1f;							\
5404e2eee4SVineet Gupta 									\
5504e2eee4SVineet Gupta 	__asm__ __volatile__(						\
5604e2eee4SVineet Gupta 	"1:	llock       %0, [%1]		\n"			\
5704e2eee4SVineet Gupta 	"	" #asm_op " %0, %0, %2	\n"				\
5804e2eee4SVineet Gupta 	"	scond       %0, [%1]		\n"			\
5904e2eee4SVineet Gupta 	"	bnz         1b			\n"			\
6004e2eee4SVineet Gupta 	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
6104e2eee4SVineet Gupta 	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
6204e2eee4SVineet Gupta 	  "ir"(nr)							\
6304e2eee4SVineet Gupta 	: "cc");							\
6414e968baSVineet Gupta }
6514e968baSVineet Gupta 
6614e968baSVineet Gupta /*
6714e968baSVineet Gupta  * Semantically:
6814e968baSVineet Gupta  *    Test the bit
6914e968baSVineet Gupta  *    if clear
7014e968baSVineet Gupta  *        set it and return 0 (old value)
7114e968baSVineet Gupta  *    else
7214e968baSVineet Gupta  *        return 1 (old value).
7314e968baSVineet Gupta  *
7414e968baSVineet Gupta  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
7514e968baSVineet Gupta  * and the old value of bit is returned
7614e968baSVineet Gupta  */
7704e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
7804e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
7904e2eee4SVineet Gupta {									\
8004e2eee4SVineet Gupta 	unsigned long old, temp;					\
8104e2eee4SVineet Gupta 									\
8204e2eee4SVineet Gupta 	m += nr >> 5;							\
8304e2eee4SVineet Gupta 									\
8404e2eee4SVineet Gupta 	nr &= 0x1f;							\
8504e2eee4SVineet Gupta 									\
8604e2eee4SVineet Gupta 	/*								\
8704e2eee4SVineet Gupta 	 * Explicit full memory barrier needed before/after as		\
8804e2eee4SVineet Gupta 	 * LLOCK/SCOND themselves don't provide any such smenatic	\
8904e2eee4SVineet Gupta 	 */								\
9004e2eee4SVineet Gupta 	smp_mb();							\
9104e2eee4SVineet Gupta 									\
9204e2eee4SVineet Gupta 	__asm__ __volatile__(						\
9304e2eee4SVineet Gupta 	"1:	llock       %0, [%2]	\n"				\
9404e2eee4SVineet Gupta 	"	" #asm_op " %1, %0, %3	\n"				\
9504e2eee4SVineet Gupta 	"	scond       %1, [%2]	\n"				\
9604e2eee4SVineet Gupta 	"	bnz         1b		\n"				\
9704e2eee4SVineet Gupta 	: "=&r"(old), "=&r"(temp)					\
9804e2eee4SVineet Gupta 	: "r"(m), "ir"(nr)						\
9904e2eee4SVineet Gupta 	: "cc");							\
10004e2eee4SVineet Gupta 									\
10104e2eee4SVineet Gupta 	smp_mb();							\
10204e2eee4SVineet Gupta 									\
10304e2eee4SVineet Gupta 	return (old & (1 << nr)) != 0;					\
10414e968baSVineet Gupta }
10514e968baSVineet Gupta 
10614e968baSVineet Gupta #else	/* !CONFIG_ARC_HAS_LLSC */
10714e968baSVineet Gupta 
10814e968baSVineet Gupta /*
10914e968baSVineet Gupta  * Non hardware assisted Atomic-R-M-W
11014e968baSVineet Gupta  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
11114e968baSVineet Gupta  *
11214e968baSVineet Gupta  * There's "significant" micro-optimization in writing our own variants of
11314e968baSVineet Gupta  * bitops (over generic variants)
11414e968baSVineet Gupta  *
11514e968baSVineet Gupta  * (1) The generic APIs have "signed" @nr while we have it "unsigned"
11614e968baSVineet Gupta  *     This avoids extra code to be generated for pointer arithmatic, since
11714e968baSVineet Gupta  *     is "not sure" that index is NOT -ve
11814e968baSVineet Gupta  * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
11914e968baSVineet Gupta  *     only consider bottom 5 bits of @nr, so NO need to mask them off.
12014e968baSVineet Gupta  *     (GCC Quirk: however for constant @nr we still need to do the masking
12114e968baSVineet Gupta  *             at compile time)
12214e968baSVineet Gupta  */
12314e968baSVineet Gupta 
12404e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
12504e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
12604e2eee4SVineet Gupta {									\
12704e2eee4SVineet Gupta 	unsigned long temp, flags;					\
12804e2eee4SVineet Gupta 	m += nr >> 5;							\
12904e2eee4SVineet Gupta 									\
13004e2eee4SVineet Gupta 	/*								\
13104e2eee4SVineet Gupta 	 * spin lock/unlock provide the needed smp_mb() before/after	\
13204e2eee4SVineet Gupta 	 */								\
13304e2eee4SVineet Gupta 	bitops_lock(flags);						\
13404e2eee4SVineet Gupta 									\
13504e2eee4SVineet Gupta 	temp = *m;							\
136*80f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));					\
13704e2eee4SVineet Gupta 									\
13804e2eee4SVineet Gupta 	bitops_unlock(flags);						\
13914e968baSVineet Gupta }
14014e968baSVineet Gupta 
14104e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
14204e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
14304e2eee4SVineet Gupta {									\
14404e2eee4SVineet Gupta 	unsigned long old, flags;					\
14504e2eee4SVineet Gupta 	m += nr >> 5;							\
14604e2eee4SVineet Gupta 									\
14704e2eee4SVineet Gupta 	bitops_lock(flags);						\
14804e2eee4SVineet Gupta 									\
14904e2eee4SVineet Gupta 	old = *m;							\
150*80f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
15104e2eee4SVineet Gupta 									\
15204e2eee4SVineet Gupta 	bitops_unlock(flags);						\
15304e2eee4SVineet Gupta 									\
154*80f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
15514e968baSVineet Gupta }
15614e968baSVineet Gupta 
15714e968baSVineet Gupta #endif /* CONFIG_ARC_HAS_LLSC */
15814e968baSVineet Gupta 
15914e968baSVineet Gupta /***************************************
16014e968baSVineet Gupta  * Non atomic variants
16114e968baSVineet Gupta  **************************************/
16214e968baSVineet Gupta 
16304e2eee4SVineet Gupta #define __BIT_OP(op, c_op, asm_op)					\
16404e2eee4SVineet Gupta static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
16504e2eee4SVineet Gupta {									\
16604e2eee4SVineet Gupta 	unsigned long temp;						\
16704e2eee4SVineet Gupta 	m += nr >> 5;							\
16804e2eee4SVineet Gupta 									\
16904e2eee4SVineet Gupta 	temp = *m;							\
170*80f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));				\
17114e968baSVineet Gupta }
17214e968baSVineet Gupta 
17304e2eee4SVineet Gupta #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
17404e2eee4SVineet Gupta static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
17504e2eee4SVineet Gupta {									\
17604e2eee4SVineet Gupta 	unsigned long old;						\
17704e2eee4SVineet Gupta 	m += nr >> 5;							\
17804e2eee4SVineet Gupta 									\
17904e2eee4SVineet Gupta 	old = *m;							\
180*80f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
18104e2eee4SVineet Gupta 									\
182*80f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
18314e968baSVineet Gupta }
18414e968baSVineet Gupta 
18504e2eee4SVineet Gupta #define BIT_OPS(op, c_op, asm_op)					\
18604e2eee4SVineet Gupta 									\
18704e2eee4SVineet Gupta 	/* set_bit(), clear_bit(), change_bit() */			\
18804e2eee4SVineet Gupta 	BIT_OP(op, c_op, asm_op)					\
18904e2eee4SVineet Gupta 									\
19004e2eee4SVineet Gupta 	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
19104e2eee4SVineet Gupta 	TEST_N_BIT_OP(op, c_op, asm_op)					\
19204e2eee4SVineet Gupta 									\
19304e2eee4SVineet Gupta 	/* __set_bit(), __clear_bit(), __change_bit() */		\
19404e2eee4SVineet Gupta 	__BIT_OP(op, c_op, asm_op)					\
19504e2eee4SVineet Gupta 									\
19604e2eee4SVineet Gupta 	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
19704e2eee4SVineet Gupta 	__TEST_N_BIT_OP(op, c_op, asm_op)
19814e968baSVineet Gupta 
19904e2eee4SVineet Gupta BIT_OPS(set, |, bset)
20004e2eee4SVineet Gupta BIT_OPS(clear, & ~, bclr)
20104e2eee4SVineet Gupta BIT_OPS(change, ^, bxor)
20214e968baSVineet Gupta 
20314e968baSVineet Gupta /*
20414e968baSVineet Gupta  * This routine doesn't need to be atomic.
20514e968baSVineet Gupta  */
20614e968baSVineet Gupta static inline int
207de60c1a1SVineet Gupta test_bit(unsigned int nr, const volatile unsigned long *addr)
20814e968baSVineet Gupta {
20914e968baSVineet Gupta 	unsigned long mask;
21014e968baSVineet Gupta 
21114e968baSVineet Gupta 	addr += nr >> 5;
21214e968baSVineet Gupta 
213*80f42084SVineet Gupta 	mask = 1UL << (nr & 0x1f);
21414e968baSVineet Gupta 
21514e968baSVineet Gupta 	return ((mask & *addr) != 0);
21614e968baSVineet Gupta }
21714e968baSVineet Gupta 
2181f6ccfffSVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
2191f6ccfffSVineet Gupta 
22014e968baSVineet Gupta /*
22114e968baSVineet Gupta  * Count the number of zeros, starting from MSB
22214e968baSVineet Gupta  * Helper for fls( ) friends
22314e968baSVineet Gupta  * This is a pure count, so (1-32) or (0-31) doesn't apply
22414e968baSVineet Gupta  * It could be 0 to 32, based on num of 0's in there
22514e968baSVineet Gupta  * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
22614e968baSVineet Gupta  */
22714e968baSVineet Gupta static inline __attribute__ ((const)) int clz(unsigned int x)
22814e968baSVineet Gupta {
22914e968baSVineet Gupta 	unsigned int res;
23014e968baSVineet Gupta 
23114e968baSVineet Gupta 	__asm__ __volatile__(
23214e968baSVineet Gupta 	"	norm.f  %0, %1		\n"
23314e968baSVineet Gupta 	"	mov.n   %0, 0		\n"
23414e968baSVineet Gupta 	"	add.p   %0, %0, 1	\n"
23514e968baSVineet Gupta 	: "=r"(res)
23614e968baSVineet Gupta 	: "r"(x)
23714e968baSVineet Gupta 	: "cc");
23814e968baSVineet Gupta 
23914e968baSVineet Gupta 	return res;
24014e968baSVineet Gupta }
24114e968baSVineet Gupta 
24214e968baSVineet Gupta static inline int constant_fls(int x)
24314e968baSVineet Gupta {
24414e968baSVineet Gupta 	int r = 32;
24514e968baSVineet Gupta 
24614e968baSVineet Gupta 	if (!x)
24714e968baSVineet Gupta 		return 0;
24814e968baSVineet Gupta 	if (!(x & 0xffff0000u)) {
24914e968baSVineet Gupta 		x <<= 16;
25014e968baSVineet Gupta 		r -= 16;
25114e968baSVineet Gupta 	}
25214e968baSVineet Gupta 	if (!(x & 0xff000000u)) {
25314e968baSVineet Gupta 		x <<= 8;
25414e968baSVineet Gupta 		r -= 8;
25514e968baSVineet Gupta 	}
25614e968baSVineet Gupta 	if (!(x & 0xf0000000u)) {
25714e968baSVineet Gupta 		x <<= 4;
25814e968baSVineet Gupta 		r -= 4;
25914e968baSVineet Gupta 	}
26014e968baSVineet Gupta 	if (!(x & 0xc0000000u)) {
26114e968baSVineet Gupta 		x <<= 2;
26214e968baSVineet Gupta 		r -= 2;
26314e968baSVineet Gupta 	}
26414e968baSVineet Gupta 	if (!(x & 0x80000000u)) {
26514e968baSVineet Gupta 		x <<= 1;
26614e968baSVineet Gupta 		r -= 1;
26714e968baSVineet Gupta 	}
26814e968baSVineet Gupta 	return r;
26914e968baSVineet Gupta }
27014e968baSVineet Gupta 
27114e968baSVineet Gupta /*
27214e968baSVineet Gupta  * fls = Find Last Set in word
27314e968baSVineet Gupta  * @result: [1-32]
27414e968baSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
27514e968baSVineet Gupta  */
27614e968baSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
27714e968baSVineet Gupta {
27814e968baSVineet Gupta 	if (__builtin_constant_p(x))
27914e968baSVineet Gupta 	       return constant_fls(x);
28014e968baSVineet Gupta 
28114e968baSVineet Gupta 	return 32 - clz(x);
28214e968baSVineet Gupta }
28314e968baSVineet Gupta 
28414e968baSVineet Gupta /*
28514e968baSVineet Gupta  * __fls: Similar to fls, but zero based (0-31)
28614e968baSVineet Gupta  */
28714e968baSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
28814e968baSVineet Gupta {
28914e968baSVineet Gupta 	if (!x)
29014e968baSVineet Gupta 		return 0;
29114e968baSVineet Gupta 	else
29214e968baSVineet Gupta 		return fls(x) - 1;
29314e968baSVineet Gupta }
29414e968baSVineet Gupta 
29514e968baSVineet Gupta /*
29614e968baSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
29714e968baSVineet Gupta  * @result: [1-32], 0 if all 0's
29814e968baSVineet Gupta  */
29914e968baSVineet Gupta #define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
30014e968baSVineet Gupta 
30114e968baSVineet Gupta /*
30214e968baSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
30314e968baSVineet Gupta  */
30414e968baSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long word)
30514e968baSVineet Gupta {
30614e968baSVineet Gupta 	if (!word)
30714e968baSVineet Gupta 		return word;
30814e968baSVineet Gupta 
30914e968baSVineet Gupta 	return ffs(word) - 1;
31014e968baSVineet Gupta }
31114e968baSVineet Gupta 
3121f6ccfffSVineet Gupta #else	/* CONFIG_ISA_ARCV2 */
3131f6ccfffSVineet Gupta 
3141f6ccfffSVineet Gupta /*
3151f6ccfffSVineet Gupta  * fls = Find Last Set in word
3161f6ccfffSVineet Gupta  * @result: [1-32]
3171f6ccfffSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
3181f6ccfffSVineet Gupta  */
3191f6ccfffSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
3201f6ccfffSVineet Gupta {
3211f6ccfffSVineet Gupta 	int n;
3221f6ccfffSVineet Gupta 
3231f6ccfffSVineet Gupta 	asm volatile(
3241f6ccfffSVineet Gupta 	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
3251f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3261f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3271f6ccfffSVineet Gupta 	: "r"(x)
3281f6ccfffSVineet Gupta 	: "cc");
3291f6ccfffSVineet Gupta 
3301f6ccfffSVineet Gupta 	return n;
3311f6ccfffSVineet Gupta }
3321f6ccfffSVineet Gupta 
3331f6ccfffSVineet Gupta /*
3341f6ccfffSVineet Gupta  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
3351f6ccfffSVineet Gupta  */
3361f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
3371f6ccfffSVineet Gupta {
3381f6ccfffSVineet Gupta 	/* FLS insn has exactly same semantics as the API */
3391f6ccfffSVineet Gupta 	return	__builtin_arc_fls(x);
3401f6ccfffSVineet Gupta }
3411f6ccfffSVineet Gupta 
3421f6ccfffSVineet Gupta /*
3431f6ccfffSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
3441f6ccfffSVineet Gupta  * @result: [1-32], 0 if all 0's
3451f6ccfffSVineet Gupta  */
3461f6ccfffSVineet Gupta static inline __attribute__ ((const)) int ffs(unsigned long x)
3471f6ccfffSVineet Gupta {
3481f6ccfffSVineet Gupta 	int n;
3491f6ccfffSVineet Gupta 
3501f6ccfffSVineet Gupta 	asm volatile(
3511f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
3521f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3531f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
3541f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3551f6ccfffSVineet Gupta 	: "r"(x)
3561f6ccfffSVineet Gupta 	: "cc");
3571f6ccfffSVineet Gupta 
3581f6ccfffSVineet Gupta 	return n;
3591f6ccfffSVineet Gupta }
3601f6ccfffSVineet Gupta 
3611f6ccfffSVineet Gupta /*
3621f6ccfffSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
3631f6ccfffSVineet Gupta  */
3641f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long x)
3651f6ccfffSVineet Gupta {
3661f6ccfffSVineet Gupta 	int n;
3671f6ccfffSVineet Gupta 
3681f6ccfffSVineet Gupta 	asm volatile(
3691f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
3701f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
3711f6ccfffSVineet Gupta 	: "=r"(n)
3721f6ccfffSVineet Gupta 	: "r"(x)
3731f6ccfffSVineet Gupta 	: "cc");
3741f6ccfffSVineet Gupta 
3751f6ccfffSVineet Gupta 	return n;
3761f6ccfffSVineet Gupta 
3771f6ccfffSVineet Gupta }
3781f6ccfffSVineet Gupta 
3791f6ccfffSVineet Gupta #endif	/* CONFIG_ISA_ARCOMPACT */
3801f6ccfffSVineet Gupta 
38114e968baSVineet Gupta /*
38214e968baSVineet Gupta  * ffz = Find First Zero in word.
38314e968baSVineet Gupta  * @return:[0-31], 32 if all 1's
38414e968baSVineet Gupta  */
38514e968baSVineet Gupta #define ffz(x)	__ffs(~(x))
38614e968baSVineet Gupta 
38714e968baSVineet Gupta #include <asm-generic/bitops/hweight.h>
38814e968baSVineet Gupta #include <asm-generic/bitops/fls64.h>
38914e968baSVineet Gupta #include <asm-generic/bitops/sched.h>
39014e968baSVineet Gupta #include <asm-generic/bitops/lock.h>
39114e968baSVineet Gupta 
39214e968baSVineet Gupta #include <asm-generic/bitops/find.h>
39314e968baSVineet Gupta #include <asm-generic/bitops/le.h>
39414e968baSVineet Gupta #include <asm-generic/bitops/ext2-atomic-setbit.h>
39514e968baSVineet Gupta 
39614e968baSVineet Gupta #endif /* !__ASSEMBLY__ */
39714e968baSVineet Gupta 
39814e968baSVineet Gupta #endif
399