xref: /openbmc/linux/arch/arc/include/asm/bitops.h (revision a5a10d99a946602cf4ae50eadc65c2480dbd2e56)
114e968baSVineet Gupta /*
214e968baSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
314e968baSVineet Gupta  *
414e968baSVineet Gupta  * This program is free software; you can redistribute it and/or modify
514e968baSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
614e968baSVineet Gupta  * published by the Free Software Foundation.
714e968baSVineet Gupta  */
814e968baSVineet Gupta 
914e968baSVineet Gupta #ifndef _ASM_BITOPS_H
1014e968baSVineet Gupta #define _ASM_BITOPS_H
1114e968baSVineet Gupta 
1214e968baSVineet Gupta #ifndef _LINUX_BITOPS_H
1314e968baSVineet Gupta #error only <linux/bitops.h> can be included directly
1414e968baSVineet Gupta #endif
1514e968baSVineet Gupta 
1614e968baSVineet Gupta #ifndef __ASSEMBLY__
1714e968baSVineet Gupta 
1814e968baSVineet Gupta #include <linux/types.h>
1914e968baSVineet Gupta #include <linux/compiler.h>
20d594ffa9SPeter Zijlstra #include <asm/barrier.h>
2104e2eee4SVineet Gupta #ifndef CONFIG_ARC_HAS_LLSC
2204e2eee4SVineet Gupta #include <asm/smp.h>
2304e2eee4SVineet Gupta #endif
2414e968baSVineet Gupta 
25*a5a10d99SNoam Camus #ifdef CONFIG_ARC_HAS_LLSC
2614e968baSVineet Gupta 
27de60c1a1SVineet Gupta /*
2804e2eee4SVineet Gupta  * Hardware assisted Atomic-R-M-W
29de60c1a1SVineet Gupta  */
3014e968baSVineet Gupta 
3104e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
3204e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
3304e2eee4SVineet Gupta {									\
3404e2eee4SVineet Gupta 	unsigned int temp;						\
3504e2eee4SVineet Gupta 									\
3604e2eee4SVineet Gupta 	m += nr >> 5;							\
3704e2eee4SVineet Gupta 									\
3804e2eee4SVineet Gupta 	nr &= 0x1f;							\
3904e2eee4SVineet Gupta 									\
4004e2eee4SVineet Gupta 	__asm__ __volatile__(						\
4104e2eee4SVineet Gupta 	"1:	llock       %0, [%1]		\n"			\
4204e2eee4SVineet Gupta 	"	" #asm_op " %0, %0, %2	\n"				\
4304e2eee4SVineet Gupta 	"	scond       %0, [%1]		\n"			\
4404e2eee4SVineet Gupta 	"	bnz         1b			\n"			\
4504e2eee4SVineet Gupta 	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
4604e2eee4SVineet Gupta 	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
4704e2eee4SVineet Gupta 	  "ir"(nr)							\
4804e2eee4SVineet Gupta 	: "cc");							\
4914e968baSVineet Gupta }
5014e968baSVineet Gupta 
5114e968baSVineet Gupta /*
5214e968baSVineet Gupta  * Semantically:
5314e968baSVineet Gupta  *    Test the bit
5414e968baSVineet Gupta  *    if clear
5514e968baSVineet Gupta  *        set it and return 0 (old value)
5614e968baSVineet Gupta  *    else
5714e968baSVineet Gupta  *        return 1 (old value).
5814e968baSVineet Gupta  *
5914e968baSVineet Gupta  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
6014e968baSVineet Gupta  * and the old value of bit is returned
6114e968baSVineet Gupta  */
6204e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
6304e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
6404e2eee4SVineet Gupta {									\
6504e2eee4SVineet Gupta 	unsigned long old, temp;					\
6604e2eee4SVineet Gupta 									\
6704e2eee4SVineet Gupta 	m += nr >> 5;							\
6804e2eee4SVineet Gupta 									\
6904e2eee4SVineet Gupta 	nr &= 0x1f;							\
7004e2eee4SVineet Gupta 									\
7104e2eee4SVineet Gupta 	/*								\
7204e2eee4SVineet Gupta 	 * Explicit full memory barrier needed before/after as		\
7304e2eee4SVineet Gupta 	 * LLOCK/SCOND themselves don't provide any such smenatic	\
7404e2eee4SVineet Gupta 	 */								\
7504e2eee4SVineet Gupta 	smp_mb();							\
7604e2eee4SVineet Gupta 									\
7704e2eee4SVineet Gupta 	__asm__ __volatile__(						\
7804e2eee4SVineet Gupta 	"1:	llock       %0, [%2]	\n"				\
7904e2eee4SVineet Gupta 	"	" #asm_op " %1, %0, %3	\n"				\
8004e2eee4SVineet Gupta 	"	scond       %1, [%2]	\n"				\
8104e2eee4SVineet Gupta 	"	bnz         1b		\n"				\
8204e2eee4SVineet Gupta 	: "=&r"(old), "=&r"(temp)					\
8304e2eee4SVineet Gupta 	: "r"(m), "ir"(nr)						\
8404e2eee4SVineet Gupta 	: "cc");							\
8504e2eee4SVineet Gupta 									\
8604e2eee4SVineet Gupta 	smp_mb();							\
8704e2eee4SVineet Gupta 									\
8804e2eee4SVineet Gupta 	return (old & (1 << nr)) != 0;					\
8914e968baSVineet Gupta }
9014e968baSVineet Gupta 
91*a5a10d99SNoam Camus #elif !defined(CONFIG_ARC_PLAT_EZNPS)
9214e968baSVineet Gupta 
9314e968baSVineet Gupta /*
9414e968baSVineet Gupta  * Non hardware assisted Atomic-R-M-W
9514e968baSVineet Gupta  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
9614e968baSVineet Gupta  *
9714e968baSVineet Gupta  * There's "significant" micro-optimization in writing our own variants of
9814e968baSVineet Gupta  * bitops (over generic variants)
9914e968baSVineet Gupta  *
10014e968baSVineet Gupta  * (1) The generic APIs have "signed" @nr while we have it "unsigned"
10114e968baSVineet Gupta  *     This avoids extra code to be generated for pointer arithmatic, since
10214e968baSVineet Gupta  *     is "not sure" that index is NOT -ve
10314e968baSVineet Gupta  * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
10414e968baSVineet Gupta  *     only consider bottom 5 bits of @nr, so NO need to mask them off.
10514e968baSVineet Gupta  *     (GCC Quirk: however for constant @nr we still need to do the masking
10614e968baSVineet Gupta  *             at compile time)
10714e968baSVineet Gupta  */
10814e968baSVineet Gupta 
10904e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
11004e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
11104e2eee4SVineet Gupta {									\
11204e2eee4SVineet Gupta 	unsigned long temp, flags;					\
11304e2eee4SVineet Gupta 	m += nr >> 5;							\
11404e2eee4SVineet Gupta 									\
11504e2eee4SVineet Gupta 	/*								\
11604e2eee4SVineet Gupta 	 * spin lock/unlock provide the needed smp_mb() before/after	\
11704e2eee4SVineet Gupta 	 */								\
11804e2eee4SVineet Gupta 	bitops_lock(flags);						\
11904e2eee4SVineet Gupta 									\
12004e2eee4SVineet Gupta 	temp = *m;							\
12180f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));					\
12204e2eee4SVineet Gupta 									\
12304e2eee4SVineet Gupta 	bitops_unlock(flags);						\
12414e968baSVineet Gupta }
12514e968baSVineet Gupta 
12604e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
12704e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
12804e2eee4SVineet Gupta {									\
12904e2eee4SVineet Gupta 	unsigned long old, flags;					\
13004e2eee4SVineet Gupta 	m += nr >> 5;							\
13104e2eee4SVineet Gupta 									\
13204e2eee4SVineet Gupta 	bitops_lock(flags);						\
13304e2eee4SVineet Gupta 									\
13404e2eee4SVineet Gupta 	old = *m;							\
13580f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
13604e2eee4SVineet Gupta 									\
13704e2eee4SVineet Gupta 	bitops_unlock(flags);						\
13804e2eee4SVineet Gupta 									\
13980f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
14014e968baSVineet Gupta }
14114e968baSVineet Gupta 
142*a5a10d99SNoam Camus #else /* CONFIG_ARC_PLAT_EZNPS */
143*a5a10d99SNoam Camus 
144*a5a10d99SNoam Camus #define BIT_OP(op, c_op, asm_op)					\
145*a5a10d99SNoam Camus static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
146*a5a10d99SNoam Camus {									\
147*a5a10d99SNoam Camus 	m += nr >> 5;							\
148*a5a10d99SNoam Camus 									\
149*a5a10d99SNoam Camus 	nr = (1UL << (nr & 0x1f));					\
150*a5a10d99SNoam Camus 	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
151*a5a10d99SNoam Camus 		nr = ~nr;						\
152*a5a10d99SNoam Camus 									\
153*a5a10d99SNoam Camus 	__asm__ __volatile__(						\
154*a5a10d99SNoam Camus 	"	mov r2, %0\n"						\
155*a5a10d99SNoam Camus 	"	mov r3, %1\n"						\
156*a5a10d99SNoam Camus 	"	.word %2\n"						\
157*a5a10d99SNoam Camus 	:								\
158*a5a10d99SNoam Camus 	: "r"(nr), "r"(m), "i"(asm_op)					\
159*a5a10d99SNoam Camus 	: "r2", "r3", "memory");					\
160*a5a10d99SNoam Camus }
161*a5a10d99SNoam Camus 
162*a5a10d99SNoam Camus #define TEST_N_BIT_OP(op, c_op, asm_op)					\
163*a5a10d99SNoam Camus static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
164*a5a10d99SNoam Camus {									\
165*a5a10d99SNoam Camus 	unsigned long old;						\
166*a5a10d99SNoam Camus 									\
167*a5a10d99SNoam Camus 	m += nr >> 5;							\
168*a5a10d99SNoam Camus 									\
169*a5a10d99SNoam Camus 	nr = old = (1UL << (nr & 0x1f));				\
170*a5a10d99SNoam Camus 	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
171*a5a10d99SNoam Camus 		old = ~old;						\
172*a5a10d99SNoam Camus 									\
173*a5a10d99SNoam Camus 	/* Explicit full memory barrier needed before/after */		\
174*a5a10d99SNoam Camus 	smp_mb();							\
175*a5a10d99SNoam Camus 									\
176*a5a10d99SNoam Camus 	__asm__ __volatile__(						\
177*a5a10d99SNoam Camus 	"	mov r2, %0\n"						\
178*a5a10d99SNoam Camus 	"	mov r3, %1\n"						\
179*a5a10d99SNoam Camus 	"       .word %2\n"						\
180*a5a10d99SNoam Camus 	"	mov %0, r2"						\
181*a5a10d99SNoam Camus 	: "+r"(old)							\
182*a5a10d99SNoam Camus 	: "r"(m), "i"(asm_op)						\
183*a5a10d99SNoam Camus 	: "r2", "r3", "memory");					\
184*a5a10d99SNoam Camus 									\
185*a5a10d99SNoam Camus 	smp_mb();							\
186*a5a10d99SNoam Camus 									\
187*a5a10d99SNoam Camus 	return (old & nr) != 0;					\
188*a5a10d99SNoam Camus }
189*a5a10d99SNoam Camus 
190*a5a10d99SNoam Camus #endif /* CONFIG_ARC_PLAT_EZNPS */
19114e968baSVineet Gupta 
19214e968baSVineet Gupta /***************************************
19314e968baSVineet Gupta  * Non atomic variants
19414e968baSVineet Gupta  **************************************/
19514e968baSVineet Gupta 
19604e2eee4SVineet Gupta #define __BIT_OP(op, c_op, asm_op)					\
19704e2eee4SVineet Gupta static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
19804e2eee4SVineet Gupta {									\
19904e2eee4SVineet Gupta 	unsigned long temp;						\
20004e2eee4SVineet Gupta 	m += nr >> 5;							\
20104e2eee4SVineet Gupta 									\
20204e2eee4SVineet Gupta 	temp = *m;							\
20380f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));				\
20414e968baSVineet Gupta }
20514e968baSVineet Gupta 
20604e2eee4SVineet Gupta #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
20704e2eee4SVineet Gupta static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
20804e2eee4SVineet Gupta {									\
20904e2eee4SVineet Gupta 	unsigned long old;						\
21004e2eee4SVineet Gupta 	m += nr >> 5;							\
21104e2eee4SVineet Gupta 									\
21204e2eee4SVineet Gupta 	old = *m;							\
21380f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
21404e2eee4SVineet Gupta 									\
21580f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
21614e968baSVineet Gupta }
21714e968baSVineet Gupta 
21804e2eee4SVineet Gupta #define BIT_OPS(op, c_op, asm_op)					\
21904e2eee4SVineet Gupta 									\
22004e2eee4SVineet Gupta 	/* set_bit(), clear_bit(), change_bit() */			\
22104e2eee4SVineet Gupta 	BIT_OP(op, c_op, asm_op)					\
22204e2eee4SVineet Gupta 									\
22304e2eee4SVineet Gupta 	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
22404e2eee4SVineet Gupta 	TEST_N_BIT_OP(op, c_op, asm_op)					\
22504e2eee4SVineet Gupta 									\
22604e2eee4SVineet Gupta 	/* __set_bit(), __clear_bit(), __change_bit() */		\
22704e2eee4SVineet Gupta 	__BIT_OP(op, c_op, asm_op)					\
22804e2eee4SVineet Gupta 									\
22904e2eee4SVineet Gupta 	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
23004e2eee4SVineet Gupta 	__TEST_N_BIT_OP(op, c_op, asm_op)
23114e968baSVineet Gupta 
232*a5a10d99SNoam Camus #ifndef CONFIG_ARC_PLAT_EZNPS
23304e2eee4SVineet Gupta BIT_OPS(set, |, bset)
23404e2eee4SVineet Gupta BIT_OPS(clear, & ~, bclr)
23504e2eee4SVineet Gupta BIT_OPS(change, ^, bxor)
236*a5a10d99SNoam Camus #else
237*a5a10d99SNoam Camus BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
238*a5a10d99SNoam Camus BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
239*a5a10d99SNoam Camus BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
240*a5a10d99SNoam Camus #endif
24114e968baSVineet Gupta 
24214e968baSVineet Gupta /*
24314e968baSVineet Gupta  * This routine doesn't need to be atomic.
24414e968baSVineet Gupta  */
24514e968baSVineet Gupta static inline int
246de60c1a1SVineet Gupta test_bit(unsigned int nr, const volatile unsigned long *addr)
24714e968baSVineet Gupta {
24814e968baSVineet Gupta 	unsigned long mask;
24914e968baSVineet Gupta 
25014e968baSVineet Gupta 	addr += nr >> 5;
25114e968baSVineet Gupta 
25280f42084SVineet Gupta 	mask = 1UL << (nr & 0x1f);
25314e968baSVineet Gupta 
25414e968baSVineet Gupta 	return ((mask & *addr) != 0);
25514e968baSVineet Gupta }
25614e968baSVineet Gupta 
2571f6ccfffSVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
2581f6ccfffSVineet Gupta 
25914e968baSVineet Gupta /*
26014e968baSVineet Gupta  * Count the number of zeros, starting from MSB
26114e968baSVineet Gupta  * Helper for fls( ) friends
26214e968baSVineet Gupta  * This is a pure count, so (1-32) or (0-31) doesn't apply
26314e968baSVineet Gupta  * It could be 0 to 32, based on num of 0's in there
26414e968baSVineet Gupta  * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
26514e968baSVineet Gupta  */
26614e968baSVineet Gupta static inline __attribute__ ((const)) int clz(unsigned int x)
26714e968baSVineet Gupta {
26814e968baSVineet Gupta 	unsigned int res;
26914e968baSVineet Gupta 
27014e968baSVineet Gupta 	__asm__ __volatile__(
27114e968baSVineet Gupta 	"	norm.f  %0, %1		\n"
27214e968baSVineet Gupta 	"	mov.n   %0, 0		\n"
27314e968baSVineet Gupta 	"	add.p   %0, %0, 1	\n"
27414e968baSVineet Gupta 	: "=r"(res)
27514e968baSVineet Gupta 	: "r"(x)
27614e968baSVineet Gupta 	: "cc");
27714e968baSVineet Gupta 
27814e968baSVineet Gupta 	return res;
27914e968baSVineet Gupta }
28014e968baSVineet Gupta 
28114e968baSVineet Gupta static inline int constant_fls(int x)
28214e968baSVineet Gupta {
28314e968baSVineet Gupta 	int r = 32;
28414e968baSVineet Gupta 
28514e968baSVineet Gupta 	if (!x)
28614e968baSVineet Gupta 		return 0;
28714e968baSVineet Gupta 	if (!(x & 0xffff0000u)) {
28814e968baSVineet Gupta 		x <<= 16;
28914e968baSVineet Gupta 		r -= 16;
29014e968baSVineet Gupta 	}
29114e968baSVineet Gupta 	if (!(x & 0xff000000u)) {
29214e968baSVineet Gupta 		x <<= 8;
29314e968baSVineet Gupta 		r -= 8;
29414e968baSVineet Gupta 	}
29514e968baSVineet Gupta 	if (!(x & 0xf0000000u)) {
29614e968baSVineet Gupta 		x <<= 4;
29714e968baSVineet Gupta 		r -= 4;
29814e968baSVineet Gupta 	}
29914e968baSVineet Gupta 	if (!(x & 0xc0000000u)) {
30014e968baSVineet Gupta 		x <<= 2;
30114e968baSVineet Gupta 		r -= 2;
30214e968baSVineet Gupta 	}
30314e968baSVineet Gupta 	if (!(x & 0x80000000u)) {
30414e968baSVineet Gupta 		x <<= 1;
30514e968baSVineet Gupta 		r -= 1;
30614e968baSVineet Gupta 	}
30714e968baSVineet Gupta 	return r;
30814e968baSVineet Gupta }
30914e968baSVineet Gupta 
31014e968baSVineet Gupta /*
31114e968baSVineet Gupta  * fls = Find Last Set in word
31214e968baSVineet Gupta  * @result: [1-32]
31314e968baSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
31414e968baSVineet Gupta  */
31514e968baSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
31614e968baSVineet Gupta {
31714e968baSVineet Gupta 	if (__builtin_constant_p(x))
31814e968baSVineet Gupta 	       return constant_fls(x);
31914e968baSVineet Gupta 
32014e968baSVineet Gupta 	return 32 - clz(x);
32114e968baSVineet Gupta }
32214e968baSVineet Gupta 
32314e968baSVineet Gupta /*
32414e968baSVineet Gupta  * __fls: Similar to fls, but zero based (0-31)
32514e968baSVineet Gupta  */
32614e968baSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
32714e968baSVineet Gupta {
32814e968baSVineet Gupta 	if (!x)
32914e968baSVineet Gupta 		return 0;
33014e968baSVineet Gupta 	else
33114e968baSVineet Gupta 		return fls(x) - 1;
33214e968baSVineet Gupta }
33314e968baSVineet Gupta 
33414e968baSVineet Gupta /*
33514e968baSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
33614e968baSVineet Gupta  * @result: [1-32], 0 if all 0's
33714e968baSVineet Gupta  */
33814e968baSVineet Gupta #define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
33914e968baSVineet Gupta 
34014e968baSVineet Gupta /*
34114e968baSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
34214e968baSVineet Gupta  */
34314e968baSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long word)
34414e968baSVineet Gupta {
34514e968baSVineet Gupta 	if (!word)
34614e968baSVineet Gupta 		return word;
34714e968baSVineet Gupta 
34814e968baSVineet Gupta 	return ffs(word) - 1;
34914e968baSVineet Gupta }
35014e968baSVineet Gupta 
3511f6ccfffSVineet Gupta #else	/* CONFIG_ISA_ARCV2 */
3521f6ccfffSVineet Gupta 
3531f6ccfffSVineet Gupta /*
3541f6ccfffSVineet Gupta  * fls = Find Last Set in word
3551f6ccfffSVineet Gupta  * @result: [1-32]
3561f6ccfffSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
3571f6ccfffSVineet Gupta  */
3581f6ccfffSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
3591f6ccfffSVineet Gupta {
3601f6ccfffSVineet Gupta 	int n;
3611f6ccfffSVineet Gupta 
3621f6ccfffSVineet Gupta 	asm volatile(
3631f6ccfffSVineet Gupta 	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
3641f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3651f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3661f6ccfffSVineet Gupta 	: "r"(x)
3671f6ccfffSVineet Gupta 	: "cc");
3681f6ccfffSVineet Gupta 
3691f6ccfffSVineet Gupta 	return n;
3701f6ccfffSVineet Gupta }
3711f6ccfffSVineet Gupta 
3721f6ccfffSVineet Gupta /*
3731f6ccfffSVineet Gupta  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
3741f6ccfffSVineet Gupta  */
3751f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
3761f6ccfffSVineet Gupta {
3771f6ccfffSVineet Gupta 	/* FLS insn has exactly same semantics as the API */
3781f6ccfffSVineet Gupta 	return	__builtin_arc_fls(x);
3791f6ccfffSVineet Gupta }
3801f6ccfffSVineet Gupta 
3811f6ccfffSVineet Gupta /*
3821f6ccfffSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
3831f6ccfffSVineet Gupta  * @result: [1-32], 0 if all 0's
3841f6ccfffSVineet Gupta  */
3851f6ccfffSVineet Gupta static inline __attribute__ ((const)) int ffs(unsigned long x)
3861f6ccfffSVineet Gupta {
3871f6ccfffSVineet Gupta 	int n;
3881f6ccfffSVineet Gupta 
3891f6ccfffSVineet Gupta 	asm volatile(
3901f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
3911f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3921f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
3931f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3941f6ccfffSVineet Gupta 	: "r"(x)
3951f6ccfffSVineet Gupta 	: "cc");
3961f6ccfffSVineet Gupta 
3971f6ccfffSVineet Gupta 	return n;
3981f6ccfffSVineet Gupta }
3991f6ccfffSVineet Gupta 
4001f6ccfffSVineet Gupta /*
4011f6ccfffSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
4021f6ccfffSVineet Gupta  */
4031f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long x)
4041f6ccfffSVineet Gupta {
4051f6ccfffSVineet Gupta 	int n;
4061f6ccfffSVineet Gupta 
4071f6ccfffSVineet Gupta 	asm volatile(
4081f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
4091f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
4101f6ccfffSVineet Gupta 	: "=r"(n)
4111f6ccfffSVineet Gupta 	: "r"(x)
4121f6ccfffSVineet Gupta 	: "cc");
4131f6ccfffSVineet Gupta 
4141f6ccfffSVineet Gupta 	return n;
4151f6ccfffSVineet Gupta 
4161f6ccfffSVineet Gupta }
4171f6ccfffSVineet Gupta 
4181f6ccfffSVineet Gupta #endif	/* CONFIG_ISA_ARCOMPACT */
4191f6ccfffSVineet Gupta 
42014e968baSVineet Gupta /*
42114e968baSVineet Gupta  * ffz = Find First Zero in word.
42214e968baSVineet Gupta  * @return:[0-31], 32 if all 1's
42314e968baSVineet Gupta  */
42414e968baSVineet Gupta #define ffz(x)	__ffs(~(x))
42514e968baSVineet Gupta 
42614e968baSVineet Gupta #include <asm-generic/bitops/hweight.h>
42714e968baSVineet Gupta #include <asm-generic/bitops/fls64.h>
42814e968baSVineet Gupta #include <asm-generic/bitops/sched.h>
42914e968baSVineet Gupta #include <asm-generic/bitops/lock.h>
43014e968baSVineet Gupta 
43114e968baSVineet Gupta #include <asm-generic/bitops/find.h>
43214e968baSVineet Gupta #include <asm-generic/bitops/le.h>
43314e968baSVineet Gupta #include <asm-generic/bitops/ext2-atomic-setbit.h>
43414e968baSVineet Gupta 
43514e968baSVineet Gupta #endif /* !__ASSEMBLY__ */
43614e968baSVineet Gupta 
43714e968baSVineet Gupta #endif
438