xref: /openbmc/linux/arch/arc/include/asm/bitops.h (revision d2912cb15bdda8ba4a5dd73396ad62641af2f520)
1*d2912cb1SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
214e968baSVineet Gupta /*
314e968baSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
414e968baSVineet Gupta  */
514e968baSVineet Gupta 
614e968baSVineet Gupta #ifndef _ASM_BITOPS_H
714e968baSVineet Gupta #define _ASM_BITOPS_H
814e968baSVineet Gupta 
914e968baSVineet Gupta #ifndef _LINUX_BITOPS_H
1014e968baSVineet Gupta #error only <linux/bitops.h> can be included directly
1114e968baSVineet Gupta #endif
1214e968baSVineet Gupta 
1314e968baSVineet Gupta #ifndef __ASSEMBLY__
1414e968baSVineet Gupta 
1514e968baSVineet Gupta #include <linux/types.h>
1614e968baSVineet Gupta #include <linux/compiler.h>
17d594ffa9SPeter Zijlstra #include <asm/barrier.h>
1804e2eee4SVineet Gupta #ifndef CONFIG_ARC_HAS_LLSC
1904e2eee4SVineet Gupta #include <asm/smp.h>
2004e2eee4SVineet Gupta #endif
2114e968baSVineet Gupta 
22a5a10d99SNoam Camus #ifdef CONFIG_ARC_HAS_LLSC
2314e968baSVineet Gupta 
24de60c1a1SVineet Gupta /*
2504e2eee4SVineet Gupta  * Hardware assisted Atomic-R-M-W
26de60c1a1SVineet Gupta  */
2714e968baSVineet Gupta 
2804e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
2904e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
3004e2eee4SVineet Gupta {									\
3104e2eee4SVineet Gupta 	unsigned int temp;						\
3204e2eee4SVineet Gupta 									\
3304e2eee4SVineet Gupta 	m += nr >> 5;							\
3404e2eee4SVineet Gupta 									\
3504e2eee4SVineet Gupta 	nr &= 0x1f;							\
3604e2eee4SVineet Gupta 									\
3704e2eee4SVineet Gupta 	__asm__ __volatile__(						\
3804e2eee4SVineet Gupta 	"1:	llock       %0, [%1]		\n"			\
3904e2eee4SVineet Gupta 	"	" #asm_op " %0, %0, %2	\n"				\
4004e2eee4SVineet Gupta 	"	scond       %0, [%1]		\n"			\
4104e2eee4SVineet Gupta 	"	bnz         1b			\n"			\
4204e2eee4SVineet Gupta 	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
4304e2eee4SVineet Gupta 	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
4404e2eee4SVineet Gupta 	  "ir"(nr)							\
4504e2eee4SVineet Gupta 	: "cc");							\
4614e968baSVineet Gupta }
4714e968baSVineet Gupta 
4814e968baSVineet Gupta /*
4914e968baSVineet Gupta  * Semantically:
5014e968baSVineet Gupta  *    Test the bit
5114e968baSVineet Gupta  *    if clear
5214e968baSVineet Gupta  *        set it and return 0 (old value)
5314e968baSVineet Gupta  *    else
5414e968baSVineet Gupta  *        return 1 (old value).
5514e968baSVineet Gupta  *
5614e968baSVineet Gupta  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
5714e968baSVineet Gupta  * and the old value of bit is returned
5814e968baSVineet Gupta  */
5904e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
6004e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
6104e2eee4SVineet Gupta {									\
6204e2eee4SVineet Gupta 	unsigned long old, temp;					\
6304e2eee4SVineet Gupta 									\
6404e2eee4SVineet Gupta 	m += nr >> 5;							\
6504e2eee4SVineet Gupta 									\
6604e2eee4SVineet Gupta 	nr &= 0x1f;							\
6704e2eee4SVineet Gupta 									\
6804e2eee4SVineet Gupta 	/*								\
6904e2eee4SVineet Gupta 	 * Explicit full memory barrier needed before/after as		\
7004e2eee4SVineet Gupta 	 * LLOCK/SCOND themselves don't provide any such smenatic	\
7104e2eee4SVineet Gupta 	 */								\
7204e2eee4SVineet Gupta 	smp_mb();							\
7304e2eee4SVineet Gupta 									\
7404e2eee4SVineet Gupta 	__asm__ __volatile__(						\
7504e2eee4SVineet Gupta 	"1:	llock       %0, [%2]	\n"				\
7604e2eee4SVineet Gupta 	"	" #asm_op " %1, %0, %3	\n"				\
7704e2eee4SVineet Gupta 	"	scond       %1, [%2]	\n"				\
7804e2eee4SVineet Gupta 	"	bnz         1b		\n"				\
7904e2eee4SVineet Gupta 	: "=&r"(old), "=&r"(temp)					\
8004e2eee4SVineet Gupta 	: "r"(m), "ir"(nr)						\
8104e2eee4SVineet Gupta 	: "cc");							\
8204e2eee4SVineet Gupta 									\
8304e2eee4SVineet Gupta 	smp_mb();							\
8404e2eee4SVineet Gupta 									\
8504e2eee4SVineet Gupta 	return (old & (1 << nr)) != 0;					\
8614e968baSVineet Gupta }
8714e968baSVineet Gupta 
88a5a10d99SNoam Camus #elif !defined(CONFIG_ARC_PLAT_EZNPS)
8914e968baSVineet Gupta 
9014e968baSVineet Gupta /*
9114e968baSVineet Gupta  * Non hardware assisted Atomic-R-M-W
9214e968baSVineet Gupta  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
9314e968baSVineet Gupta  *
9414e968baSVineet Gupta  * There's "significant" micro-optimization in writing our own variants of
9514e968baSVineet Gupta  * bitops (over generic variants)
9614e968baSVineet Gupta  *
9714e968baSVineet Gupta  * (1) The generic APIs have "signed" @nr while we have it "unsigned"
9814e968baSVineet Gupta  *     This avoids extra code to be generated for pointer arithmatic, since
9914e968baSVineet Gupta  *     is "not sure" that index is NOT -ve
10014e968baSVineet Gupta  * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
10114e968baSVineet Gupta  *     only consider bottom 5 bits of @nr, so NO need to mask them off.
10214e968baSVineet Gupta  *     (GCC Quirk: however for constant @nr we still need to do the masking
10314e968baSVineet Gupta  *             at compile time)
10414e968baSVineet Gupta  */
10514e968baSVineet Gupta 
10604e2eee4SVineet Gupta #define BIT_OP(op, c_op, asm_op)					\
10704e2eee4SVineet Gupta static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
10804e2eee4SVineet Gupta {									\
10904e2eee4SVineet Gupta 	unsigned long temp, flags;					\
11004e2eee4SVineet Gupta 	m += nr >> 5;							\
11104e2eee4SVineet Gupta 									\
11204e2eee4SVineet Gupta 	/*								\
11304e2eee4SVineet Gupta 	 * spin lock/unlock provide the needed smp_mb() before/after	\
11404e2eee4SVineet Gupta 	 */								\
11504e2eee4SVineet Gupta 	bitops_lock(flags);						\
11604e2eee4SVineet Gupta 									\
11704e2eee4SVineet Gupta 	temp = *m;							\
11880f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));					\
11904e2eee4SVineet Gupta 									\
12004e2eee4SVineet Gupta 	bitops_unlock(flags);						\
12114e968baSVineet Gupta }
12214e968baSVineet Gupta 
12304e2eee4SVineet Gupta #define TEST_N_BIT_OP(op, c_op, asm_op)					\
12404e2eee4SVineet Gupta static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
12504e2eee4SVineet Gupta {									\
12604e2eee4SVineet Gupta 	unsigned long old, flags;					\
12704e2eee4SVineet Gupta 	m += nr >> 5;							\
12804e2eee4SVineet Gupta 									\
12904e2eee4SVineet Gupta 	bitops_lock(flags);						\
13004e2eee4SVineet Gupta 									\
13104e2eee4SVineet Gupta 	old = *m;							\
13280f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
13304e2eee4SVineet Gupta 									\
13404e2eee4SVineet Gupta 	bitops_unlock(flags);						\
13504e2eee4SVineet Gupta 									\
13680f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
13714e968baSVineet Gupta }
13814e968baSVineet Gupta 
139a5a10d99SNoam Camus #else /* CONFIG_ARC_PLAT_EZNPS */
140a5a10d99SNoam Camus 
141a5a10d99SNoam Camus #define BIT_OP(op, c_op, asm_op)					\
142a5a10d99SNoam Camus static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
143a5a10d99SNoam Camus {									\
144a5a10d99SNoam Camus 	m += nr >> 5;							\
145a5a10d99SNoam Camus 									\
146a5a10d99SNoam Camus 	nr = (1UL << (nr & 0x1f));					\
147a5a10d99SNoam Camus 	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
148a5a10d99SNoam Camus 		nr = ~nr;						\
149a5a10d99SNoam Camus 									\
150a5a10d99SNoam Camus 	__asm__ __volatile__(						\
151a5a10d99SNoam Camus 	"	mov r2, %0\n"						\
152a5a10d99SNoam Camus 	"	mov r3, %1\n"						\
153a5a10d99SNoam Camus 	"	.word %2\n"						\
154a5a10d99SNoam Camus 	:								\
155a5a10d99SNoam Camus 	: "r"(nr), "r"(m), "i"(asm_op)					\
156a5a10d99SNoam Camus 	: "r2", "r3", "memory");					\
157a5a10d99SNoam Camus }
158a5a10d99SNoam Camus 
159a5a10d99SNoam Camus #define TEST_N_BIT_OP(op, c_op, asm_op)					\
160a5a10d99SNoam Camus static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
161a5a10d99SNoam Camus {									\
162a5a10d99SNoam Camus 	unsigned long old;						\
163a5a10d99SNoam Camus 									\
164a5a10d99SNoam Camus 	m += nr >> 5;							\
165a5a10d99SNoam Camus 									\
166a5a10d99SNoam Camus 	nr = old = (1UL << (nr & 0x1f));				\
167a5a10d99SNoam Camus 	if (asm_op == CTOP_INST_AAND_DI_R2_R2_R3)			\
168a5a10d99SNoam Camus 		old = ~old;						\
169a5a10d99SNoam Camus 									\
170a5a10d99SNoam Camus 	/* Explicit full memory barrier needed before/after */		\
171a5a10d99SNoam Camus 	smp_mb();							\
172a5a10d99SNoam Camus 									\
173a5a10d99SNoam Camus 	__asm__ __volatile__(						\
174a5a10d99SNoam Camus 	"	mov r2, %0\n"						\
175a5a10d99SNoam Camus 	"	mov r3, %1\n"						\
176a5a10d99SNoam Camus 	"       .word %2\n"						\
177a5a10d99SNoam Camus 	"	mov %0, r2"						\
178a5a10d99SNoam Camus 	: "+r"(old)							\
179a5a10d99SNoam Camus 	: "r"(m), "i"(asm_op)						\
180a5a10d99SNoam Camus 	: "r2", "r3", "memory");					\
181a5a10d99SNoam Camus 									\
182a5a10d99SNoam Camus 	smp_mb();							\
183a5a10d99SNoam Camus 									\
184a5a10d99SNoam Camus 	return (old & nr) != 0;					\
185a5a10d99SNoam Camus }
186a5a10d99SNoam Camus 
187a5a10d99SNoam Camus #endif /* CONFIG_ARC_PLAT_EZNPS */
18814e968baSVineet Gupta 
18914e968baSVineet Gupta /***************************************
19014e968baSVineet Gupta  * Non atomic variants
19114e968baSVineet Gupta  **************************************/
19214e968baSVineet Gupta 
19304e2eee4SVineet Gupta #define __BIT_OP(op, c_op, asm_op)					\
19404e2eee4SVineet Gupta static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
19504e2eee4SVineet Gupta {									\
19604e2eee4SVineet Gupta 	unsigned long temp;						\
19704e2eee4SVineet Gupta 	m += nr >> 5;							\
19804e2eee4SVineet Gupta 									\
19904e2eee4SVineet Gupta 	temp = *m;							\
20080f42084SVineet Gupta 	*m = temp c_op (1UL << (nr & 0x1f));				\
20114e968baSVineet Gupta }
20214e968baSVineet Gupta 
20304e2eee4SVineet Gupta #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
20404e2eee4SVineet Gupta static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
20504e2eee4SVineet Gupta {									\
20604e2eee4SVineet Gupta 	unsigned long old;						\
20704e2eee4SVineet Gupta 	m += nr >> 5;							\
20804e2eee4SVineet Gupta 									\
20904e2eee4SVineet Gupta 	old = *m;							\
21080f42084SVineet Gupta 	*m = old c_op (1UL << (nr & 0x1f));				\
21104e2eee4SVineet Gupta 									\
21280f42084SVineet Gupta 	return (old & (1UL << (nr & 0x1f))) != 0;			\
21314e968baSVineet Gupta }
21414e968baSVineet Gupta 
21504e2eee4SVineet Gupta #define BIT_OPS(op, c_op, asm_op)					\
21604e2eee4SVineet Gupta 									\
21704e2eee4SVineet Gupta 	/* set_bit(), clear_bit(), change_bit() */			\
21804e2eee4SVineet Gupta 	BIT_OP(op, c_op, asm_op)					\
21904e2eee4SVineet Gupta 									\
22004e2eee4SVineet Gupta 	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
22104e2eee4SVineet Gupta 	TEST_N_BIT_OP(op, c_op, asm_op)					\
22204e2eee4SVineet Gupta 									\
22304e2eee4SVineet Gupta 	/* __set_bit(), __clear_bit(), __change_bit() */		\
22404e2eee4SVineet Gupta 	__BIT_OP(op, c_op, asm_op)					\
22504e2eee4SVineet Gupta 									\
22604e2eee4SVineet Gupta 	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
22704e2eee4SVineet Gupta 	__TEST_N_BIT_OP(op, c_op, asm_op)
22814e968baSVineet Gupta 
229a5a10d99SNoam Camus #ifndef CONFIG_ARC_PLAT_EZNPS
23004e2eee4SVineet Gupta BIT_OPS(set, |, bset)
23104e2eee4SVineet Gupta BIT_OPS(clear, & ~, bclr)
23204e2eee4SVineet Gupta BIT_OPS(change, ^, bxor)
233a5a10d99SNoam Camus #else
234a5a10d99SNoam Camus BIT_OPS(set, |, CTOP_INST_AOR_DI_R2_R2_R3)
235a5a10d99SNoam Camus BIT_OPS(clear, & ~, CTOP_INST_AAND_DI_R2_R2_R3)
236a5a10d99SNoam Camus BIT_OPS(change, ^, CTOP_INST_AXOR_DI_R2_R2_R3)
237a5a10d99SNoam Camus #endif
23814e968baSVineet Gupta 
23914e968baSVineet Gupta /*
24014e968baSVineet Gupta  * This routine doesn't need to be atomic.
24114e968baSVineet Gupta  */
24214e968baSVineet Gupta static inline int
243de60c1a1SVineet Gupta test_bit(unsigned int nr, const volatile unsigned long *addr)
24414e968baSVineet Gupta {
24514e968baSVineet Gupta 	unsigned long mask;
24614e968baSVineet Gupta 
24714e968baSVineet Gupta 	addr += nr >> 5;
24814e968baSVineet Gupta 
24980f42084SVineet Gupta 	mask = 1UL << (nr & 0x1f);
25014e968baSVineet Gupta 
25114e968baSVineet Gupta 	return ((mask & *addr) != 0);
25214e968baSVineet Gupta }
25314e968baSVineet Gupta 
2541f6ccfffSVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
2551f6ccfffSVineet Gupta 
25614e968baSVineet Gupta /*
25714e968baSVineet Gupta  * Count the number of zeros, starting from MSB
25814e968baSVineet Gupta  * Helper for fls( ) friends
25914e968baSVineet Gupta  * This is a pure count, so (1-32) or (0-31) doesn't apply
26014e968baSVineet Gupta  * It could be 0 to 32, based on num of 0's in there
26114e968baSVineet Gupta  * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
26214e968baSVineet Gupta  */
26314e968baSVineet Gupta static inline __attribute__ ((const)) int clz(unsigned int x)
26414e968baSVineet Gupta {
26514e968baSVineet Gupta 	unsigned int res;
26614e968baSVineet Gupta 
26714e968baSVineet Gupta 	__asm__ __volatile__(
26814e968baSVineet Gupta 	"	norm.f  %0, %1		\n"
26914e968baSVineet Gupta 	"	mov.n   %0, 0		\n"
27014e968baSVineet Gupta 	"	add.p   %0, %0, 1	\n"
27114e968baSVineet Gupta 	: "=r"(res)
27214e968baSVineet Gupta 	: "r"(x)
27314e968baSVineet Gupta 	: "cc");
27414e968baSVineet Gupta 
27514e968baSVineet Gupta 	return res;
27614e968baSVineet Gupta }
27714e968baSVineet Gupta 
2783fc2579eSMatthew Wilcox static inline int constant_fls(unsigned int x)
27914e968baSVineet Gupta {
28014e968baSVineet Gupta 	int r = 32;
28114e968baSVineet Gupta 
28214e968baSVineet Gupta 	if (!x)
28314e968baSVineet Gupta 		return 0;
28414e968baSVineet Gupta 	if (!(x & 0xffff0000u)) {
28514e968baSVineet Gupta 		x <<= 16;
28614e968baSVineet Gupta 		r -= 16;
28714e968baSVineet Gupta 	}
28814e968baSVineet Gupta 	if (!(x & 0xff000000u)) {
28914e968baSVineet Gupta 		x <<= 8;
29014e968baSVineet Gupta 		r -= 8;
29114e968baSVineet Gupta 	}
29214e968baSVineet Gupta 	if (!(x & 0xf0000000u)) {
29314e968baSVineet Gupta 		x <<= 4;
29414e968baSVineet Gupta 		r -= 4;
29514e968baSVineet Gupta 	}
29614e968baSVineet Gupta 	if (!(x & 0xc0000000u)) {
29714e968baSVineet Gupta 		x <<= 2;
29814e968baSVineet Gupta 		r -= 2;
29914e968baSVineet Gupta 	}
30014e968baSVineet Gupta 	if (!(x & 0x80000000u)) {
30114e968baSVineet Gupta 		x <<= 1;
30214e968baSVineet Gupta 		r -= 1;
30314e968baSVineet Gupta 	}
30414e968baSVineet Gupta 	return r;
30514e968baSVineet Gupta }
30614e968baSVineet Gupta 
30714e968baSVineet Gupta /*
30814e968baSVineet Gupta  * fls = Find Last Set in word
30914e968baSVineet Gupta  * @result: [1-32]
31014e968baSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
31114e968baSVineet Gupta  */
3123fc2579eSMatthew Wilcox static inline __attribute__ ((const)) int fls(unsigned int x)
31314e968baSVineet Gupta {
31414e968baSVineet Gupta 	if (__builtin_constant_p(x))
31514e968baSVineet Gupta 	       return constant_fls(x);
31614e968baSVineet Gupta 
31714e968baSVineet Gupta 	return 32 - clz(x);
31814e968baSVineet Gupta }
31914e968baSVineet Gupta 
32014e968baSVineet Gupta /*
32114e968baSVineet Gupta  * __fls: Similar to fls, but zero based (0-31)
32214e968baSVineet Gupta  */
32314e968baSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
32414e968baSVineet Gupta {
32514e968baSVineet Gupta 	if (!x)
32614e968baSVineet Gupta 		return 0;
32714e968baSVineet Gupta 	else
32814e968baSVineet Gupta 		return fls(x) - 1;
32914e968baSVineet Gupta }
33014e968baSVineet Gupta 
33114e968baSVineet Gupta /*
33214e968baSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
33314e968baSVineet Gupta  * @result: [1-32], 0 if all 0's
33414e968baSVineet Gupta  */
33514e968baSVineet Gupta #define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
33614e968baSVineet Gupta 
33714e968baSVineet Gupta /*
33814e968baSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
33914e968baSVineet Gupta  */
3404e868f84SEugeniy Paltsev static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
34114e968baSVineet Gupta {
34214e968baSVineet Gupta 	if (!word)
34314e968baSVineet Gupta 		return word;
34414e968baSVineet Gupta 
34514e968baSVineet Gupta 	return ffs(word) - 1;
34614e968baSVineet Gupta }
34714e968baSVineet Gupta 
3481f6ccfffSVineet Gupta #else	/* CONFIG_ISA_ARCV2 */
3491f6ccfffSVineet Gupta 
3501f6ccfffSVineet Gupta /*
3511f6ccfffSVineet Gupta  * fls = Find Last Set in word
3521f6ccfffSVineet Gupta  * @result: [1-32]
3531f6ccfffSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
3541f6ccfffSVineet Gupta  */
3551f6ccfffSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
3561f6ccfffSVineet Gupta {
3571f6ccfffSVineet Gupta 	int n;
3581f6ccfffSVineet Gupta 
3591f6ccfffSVineet Gupta 	asm volatile(
3601f6ccfffSVineet Gupta 	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
3611f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3621f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3631f6ccfffSVineet Gupta 	: "r"(x)
3641f6ccfffSVineet Gupta 	: "cc");
3651f6ccfffSVineet Gupta 
3661f6ccfffSVineet Gupta 	return n;
3671f6ccfffSVineet Gupta }
3681f6ccfffSVineet Gupta 
3691f6ccfffSVineet Gupta /*
3701f6ccfffSVineet Gupta  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
3711f6ccfffSVineet Gupta  */
3721f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
3731f6ccfffSVineet Gupta {
3741f6ccfffSVineet Gupta 	/* FLS insn has exactly same semantics as the API */
3751f6ccfffSVineet Gupta 	return	__builtin_arc_fls(x);
3761f6ccfffSVineet Gupta }
3771f6ccfffSVineet Gupta 
3781f6ccfffSVineet Gupta /*
3791f6ccfffSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
3801f6ccfffSVineet Gupta  * @result: [1-32], 0 if all 0's
3811f6ccfffSVineet Gupta  */
3821f6ccfffSVineet Gupta static inline __attribute__ ((const)) int ffs(unsigned long x)
3831f6ccfffSVineet Gupta {
3841f6ccfffSVineet Gupta 	int n;
3851f6ccfffSVineet Gupta 
3861f6ccfffSVineet Gupta 	asm volatile(
3871f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
3881f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
3891f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
3901f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
3911f6ccfffSVineet Gupta 	: "r"(x)
3921f6ccfffSVineet Gupta 	: "cc");
3931f6ccfffSVineet Gupta 
3941f6ccfffSVineet Gupta 	return n;
3951f6ccfffSVineet Gupta }
3961f6ccfffSVineet Gupta 
3971f6ccfffSVineet Gupta /*
3981f6ccfffSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
3991f6ccfffSVineet Gupta  */
4004e868f84SEugeniy Paltsev static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
4011f6ccfffSVineet Gupta {
4024e868f84SEugeniy Paltsev 	unsigned long n;
4031f6ccfffSVineet Gupta 
4041f6ccfffSVineet Gupta 	asm volatile(
4051f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
4061f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
4071f6ccfffSVineet Gupta 	: "=r"(n)
4081f6ccfffSVineet Gupta 	: "r"(x)
4091f6ccfffSVineet Gupta 	: "cc");
4101f6ccfffSVineet Gupta 
4111f6ccfffSVineet Gupta 	return n;
4121f6ccfffSVineet Gupta 
4131f6ccfffSVineet Gupta }
4141f6ccfffSVineet Gupta 
4151f6ccfffSVineet Gupta #endif	/* CONFIG_ISA_ARCOMPACT */
4161f6ccfffSVineet Gupta 
41714e968baSVineet Gupta /*
41814e968baSVineet Gupta  * ffz = Find First Zero in word.
41914e968baSVineet Gupta  * @return:[0-31], 32 if all 1's
42014e968baSVineet Gupta  */
42114e968baSVineet Gupta #define ffz(x)	__ffs(~(x))
42214e968baSVineet Gupta 
42314e968baSVineet Gupta #include <asm-generic/bitops/hweight.h>
42414e968baSVineet Gupta #include <asm-generic/bitops/fls64.h>
42514e968baSVineet Gupta #include <asm-generic/bitops/sched.h>
42614e968baSVineet Gupta #include <asm-generic/bitops/lock.h>
42714e968baSVineet Gupta 
42814e968baSVineet Gupta #include <asm-generic/bitops/find.h>
42914e968baSVineet Gupta #include <asm-generic/bitops/le.h>
43014e968baSVineet Gupta #include <asm-generic/bitops/ext2-atomic-setbit.h>
43114e968baSVineet Gupta 
43214e968baSVineet Gupta #endif /* !__ASSEMBLY__ */
43314e968baSVineet Gupta 
43414e968baSVineet Gupta #endif
435