xref: /openbmc/linux/arch/arc/include/asm/bitops.h (revision 1f6ccfff6314672743ad7252160654709e997a2a)
114e968baSVineet Gupta /*
214e968baSVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
314e968baSVineet Gupta  *
414e968baSVineet Gupta  * This program is free software; you can redistribute it and/or modify
514e968baSVineet Gupta  * it under the terms of the GNU General Public License version 2 as
614e968baSVineet Gupta  * published by the Free Software Foundation.
714e968baSVineet Gupta  */
814e968baSVineet Gupta 
914e968baSVineet Gupta #ifndef _ASM_BITOPS_H
1014e968baSVineet Gupta #define _ASM_BITOPS_H
1114e968baSVineet Gupta 
1214e968baSVineet Gupta #ifndef _LINUX_BITOPS_H
1314e968baSVineet Gupta #error only <linux/bitops.h> can be included directly
1414e968baSVineet Gupta #endif
1514e968baSVineet Gupta 
1614e968baSVineet Gupta #ifndef __ASSEMBLY__
1714e968baSVineet Gupta 
1814e968baSVineet Gupta #include <linux/types.h>
1914e968baSVineet Gupta #include <linux/compiler.h>
20d594ffa9SPeter Zijlstra #include <asm/barrier.h>
2114e968baSVineet Gupta 
2214e968baSVineet Gupta /*
2314e968baSVineet Gupta  * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
2414e968baSVineet Gupta  * The Kconfig glue ensures that in SMP, this is only set if the container
2514e968baSVineet Gupta  * SoC/platform has cross-core coherent LLOCK/SCOND
2614e968baSVineet Gupta  */
2714e968baSVineet Gupta #if defined(CONFIG_ARC_HAS_LLSC)
2814e968baSVineet Gupta 
2914e968baSVineet Gupta static inline void set_bit(unsigned long nr, volatile unsigned long *m)
3014e968baSVineet Gupta {
3114e968baSVineet Gupta 	unsigned int temp;
3214e968baSVineet Gupta 
3314e968baSVineet Gupta 	m += nr >> 5;
3414e968baSVineet Gupta 
35de60c1a1SVineet Gupta 	/*
36de60c1a1SVineet Gupta 	 * ARC ISA micro-optimization:
37de60c1a1SVineet Gupta 	 *
38de60c1a1SVineet Gupta 	 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
39de60c1a1SVineet Gupta 	 * e.g (x << 33) is handled like (x << 1) by ASL instruction
40de60c1a1SVineet Gupta 	 *  (mem pointer still needs adjustment to point to next word)
41de60c1a1SVineet Gupta 	 *
42de60c1a1SVineet Gupta 	 * Hence the masking to clamp @nr arg can be elided in general.
43de60c1a1SVineet Gupta 	 *
44de60c1a1SVineet Gupta 	 * However if @nr is a constant (above assumed it in a register),
45de60c1a1SVineet Gupta 	 * and greater than 31, gcc can optimize away (x << 33) to 0,
46de60c1a1SVineet Gupta 	 * as overflow, given the 32-bit ISA. Thus masking needs to be done
47de60c1a1SVineet Gupta 	 * for constant @nr, but no code is generated due to const prop.
48de60c1a1SVineet Gupta 	 */
4914e968baSVineet Gupta 	if (__builtin_constant_p(nr))
5014e968baSVineet Gupta 		nr &= 0x1f;
5114e968baSVineet Gupta 
5214e968baSVineet Gupta 	__asm__ __volatile__(
5314e968baSVineet Gupta 	"1:	llock   %0, [%1]	\n"
5414e968baSVineet Gupta 	"	bset    %0, %0, %2	\n"
5514e968baSVineet Gupta 	"	scond   %0, [%1]	\n"
5614e968baSVineet Gupta 	"	bnz     1b	\n"
5714e968baSVineet Gupta 	: "=&r"(temp)
5814e968baSVineet Gupta 	: "r"(m), "ir"(nr)
5914e968baSVineet Gupta 	: "cc");
6014e968baSVineet Gupta }
6114e968baSVineet Gupta 
6214e968baSVineet Gupta static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
6314e968baSVineet Gupta {
6414e968baSVineet Gupta 	unsigned int temp;
6514e968baSVineet Gupta 
6614e968baSVineet Gupta 	m += nr >> 5;
6714e968baSVineet Gupta 
6814e968baSVineet Gupta 	if (__builtin_constant_p(nr))
6914e968baSVineet Gupta 		nr &= 0x1f;
7014e968baSVineet Gupta 
7114e968baSVineet Gupta 	__asm__ __volatile__(
7214e968baSVineet Gupta 	"1:	llock   %0, [%1]	\n"
7314e968baSVineet Gupta 	"	bclr    %0, %0, %2	\n"
7414e968baSVineet Gupta 	"	scond   %0, [%1]	\n"
7514e968baSVineet Gupta 	"	bnz     1b	\n"
7614e968baSVineet Gupta 	: "=&r"(temp)
7714e968baSVineet Gupta 	: "r"(m), "ir"(nr)
7814e968baSVineet Gupta 	: "cc");
7914e968baSVineet Gupta }
8014e968baSVineet Gupta 
8114e968baSVineet Gupta static inline void change_bit(unsigned long nr, volatile unsigned long *m)
8214e968baSVineet Gupta {
8314e968baSVineet Gupta 	unsigned int temp;
8414e968baSVineet Gupta 
8514e968baSVineet Gupta 	m += nr >> 5;
8614e968baSVineet Gupta 
8714e968baSVineet Gupta 	if (__builtin_constant_p(nr))
8814e968baSVineet Gupta 		nr &= 0x1f;
8914e968baSVineet Gupta 
9014e968baSVineet Gupta 	__asm__ __volatile__(
9114e968baSVineet Gupta 	"1:	llock   %0, [%1]	\n"
9214e968baSVineet Gupta 	"	bxor    %0, %0, %2	\n"
9314e968baSVineet Gupta 	"	scond   %0, [%1]	\n"
9414e968baSVineet Gupta 	"	bnz     1b		\n"
9514e968baSVineet Gupta 	: "=&r"(temp)
9614e968baSVineet Gupta 	: "r"(m), "ir"(nr)
9714e968baSVineet Gupta 	: "cc");
9814e968baSVineet Gupta }
9914e968baSVineet Gupta 
10014e968baSVineet Gupta /*
10114e968baSVineet Gupta  * Semantically:
10214e968baSVineet Gupta  *    Test the bit
10314e968baSVineet Gupta  *    if clear
10414e968baSVineet Gupta  *        set it and return 0 (old value)
10514e968baSVineet Gupta  *    else
10614e968baSVineet Gupta  *        return 1 (old value).
10714e968baSVineet Gupta  *
10814e968baSVineet Gupta  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
10914e968baSVineet Gupta  * and the old value of bit is returned
11014e968baSVineet Gupta  */
11114e968baSVineet Gupta static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
11214e968baSVineet Gupta {
11314e968baSVineet Gupta 	unsigned long old, temp;
11414e968baSVineet Gupta 
11514e968baSVineet Gupta 	m += nr >> 5;
11614e968baSVineet Gupta 
11714e968baSVineet Gupta 	if (__builtin_constant_p(nr))
11814e968baSVineet Gupta 		nr &= 0x1f;
11914e968baSVineet Gupta 
12014e968baSVineet Gupta 	__asm__ __volatile__(
12114e968baSVineet Gupta 	"1:	llock   %0, [%2]	\n"
12214e968baSVineet Gupta 	"	bset    %1, %0, %3	\n"
12314e968baSVineet Gupta 	"	scond   %1, [%2]	\n"
12414e968baSVineet Gupta 	"	bnz     1b		\n"
12514e968baSVineet Gupta 	: "=&r"(old), "=&r"(temp)
12614e968baSVineet Gupta 	: "r"(m), "ir"(nr)
12714e968baSVineet Gupta 	: "cc");
12814e968baSVineet Gupta 
12914e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
13014e968baSVineet Gupta }
13114e968baSVineet Gupta 
13214e968baSVineet Gupta static inline int
13314e968baSVineet Gupta test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
13414e968baSVineet Gupta {
13514e968baSVineet Gupta 	unsigned int old, temp;
13614e968baSVineet Gupta 
13714e968baSVineet Gupta 	m += nr >> 5;
13814e968baSVineet Gupta 
13914e968baSVineet Gupta 	if (__builtin_constant_p(nr))
14014e968baSVineet Gupta 		nr &= 0x1f;
14114e968baSVineet Gupta 
14214e968baSVineet Gupta 	__asm__ __volatile__(
14314e968baSVineet Gupta 	"1:	llock   %0, [%2]	\n"
14414e968baSVineet Gupta 	"	bclr    %1, %0, %3	\n"
14514e968baSVineet Gupta 	"	scond   %1, [%2]	\n"
14614e968baSVineet Gupta 	"	bnz     1b		\n"
14714e968baSVineet Gupta 	: "=&r"(old), "=&r"(temp)
14814e968baSVineet Gupta 	: "r"(m), "ir"(nr)
14914e968baSVineet Gupta 	: "cc");
15014e968baSVineet Gupta 
15114e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
15214e968baSVineet Gupta }
15314e968baSVineet Gupta 
15414e968baSVineet Gupta static inline int
15514e968baSVineet Gupta test_and_change_bit(unsigned long nr, volatile unsigned long *m)
15614e968baSVineet Gupta {
15714e968baSVineet Gupta 	unsigned int old, temp;
15814e968baSVineet Gupta 
15914e968baSVineet Gupta 	m += nr >> 5;
16014e968baSVineet Gupta 
16114e968baSVineet Gupta 	if (__builtin_constant_p(nr))
16214e968baSVineet Gupta 		nr &= 0x1f;
16314e968baSVineet Gupta 
16414e968baSVineet Gupta 	__asm__ __volatile__(
16514e968baSVineet Gupta 	"1:	llock   %0, [%2]	\n"
16614e968baSVineet Gupta 	"	bxor    %1, %0, %3	\n"
16714e968baSVineet Gupta 	"	scond   %1, [%2]	\n"
16814e968baSVineet Gupta 	"	bnz     1b		\n"
16914e968baSVineet Gupta 	: "=&r"(old), "=&r"(temp)
17014e968baSVineet Gupta 	: "r"(m), "ir"(nr)
17114e968baSVineet Gupta 	: "cc");
17214e968baSVineet Gupta 
17314e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
17414e968baSVineet Gupta }
17514e968baSVineet Gupta 
17614e968baSVineet Gupta #else	/* !CONFIG_ARC_HAS_LLSC */
17714e968baSVineet Gupta 
17814e968baSVineet Gupta #include <asm/smp.h>
17914e968baSVineet Gupta 
18014e968baSVineet Gupta /*
18114e968baSVineet Gupta  * Non hardware assisted Atomic-R-M-W
18214e968baSVineet Gupta  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
18314e968baSVineet Gupta  *
18414e968baSVineet Gupta  * There's "significant" micro-optimization in writing our own variants of
18514e968baSVineet Gupta  * bitops (over generic variants)
18614e968baSVineet Gupta  *
18714e968baSVineet Gupta  * (1) The generic APIs have "signed" @nr while we have it "unsigned"
18814e968baSVineet Gupta  *     This avoids extra code to be generated for pointer arithmatic, since
18914e968baSVineet Gupta  *     is "not sure" that index is NOT -ve
19014e968baSVineet Gupta  * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
19114e968baSVineet Gupta  *     only consider bottom 5 bits of @nr, so NO need to mask them off.
19214e968baSVineet Gupta  *     (GCC Quirk: however for constant @nr we still need to do the masking
19314e968baSVineet Gupta  *             at compile time)
19414e968baSVineet Gupta  */
19514e968baSVineet Gupta 
19614e968baSVineet Gupta static inline void set_bit(unsigned long nr, volatile unsigned long *m)
19714e968baSVineet Gupta {
19814e968baSVineet Gupta 	unsigned long temp, flags;
19914e968baSVineet Gupta 	m += nr >> 5;
20014e968baSVineet Gupta 
20114e968baSVineet Gupta 	if (__builtin_constant_p(nr))
20214e968baSVineet Gupta 		nr &= 0x1f;
20314e968baSVineet Gupta 
20414e968baSVineet Gupta 	bitops_lock(flags);
20514e968baSVineet Gupta 
20614e968baSVineet Gupta 	temp = *m;
20714e968baSVineet Gupta 	*m = temp | (1UL << nr);
20814e968baSVineet Gupta 
20914e968baSVineet Gupta 	bitops_unlock(flags);
21014e968baSVineet Gupta }
21114e968baSVineet Gupta 
21214e968baSVineet Gupta static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
21314e968baSVineet Gupta {
21414e968baSVineet Gupta 	unsigned long temp, flags;
21514e968baSVineet Gupta 	m += nr >> 5;
21614e968baSVineet Gupta 
21714e968baSVineet Gupta 	if (__builtin_constant_p(nr))
21814e968baSVineet Gupta 		nr &= 0x1f;
21914e968baSVineet Gupta 
22014e968baSVineet Gupta 	bitops_lock(flags);
22114e968baSVineet Gupta 
22214e968baSVineet Gupta 	temp = *m;
22314e968baSVineet Gupta 	*m = temp & ~(1UL << nr);
22414e968baSVineet Gupta 
22514e968baSVineet Gupta 	bitops_unlock(flags);
22614e968baSVineet Gupta }
22714e968baSVineet Gupta 
22814e968baSVineet Gupta static inline void change_bit(unsigned long nr, volatile unsigned long *m)
22914e968baSVineet Gupta {
23014e968baSVineet Gupta 	unsigned long temp, flags;
23114e968baSVineet Gupta 	m += nr >> 5;
23214e968baSVineet Gupta 
23314e968baSVineet Gupta 	if (__builtin_constant_p(nr))
23414e968baSVineet Gupta 		nr &= 0x1f;
23514e968baSVineet Gupta 
23614e968baSVineet Gupta 	bitops_lock(flags);
23714e968baSVineet Gupta 
23814e968baSVineet Gupta 	temp = *m;
23914e968baSVineet Gupta 	*m = temp ^ (1UL << nr);
24014e968baSVineet Gupta 
24114e968baSVineet Gupta 	bitops_unlock(flags);
24214e968baSVineet Gupta }
24314e968baSVineet Gupta 
24414e968baSVineet Gupta static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
24514e968baSVineet Gupta {
24614e968baSVineet Gupta 	unsigned long old, flags;
24714e968baSVineet Gupta 	m += nr >> 5;
24814e968baSVineet Gupta 
24914e968baSVineet Gupta 	if (__builtin_constant_p(nr))
25014e968baSVineet Gupta 		nr &= 0x1f;
25114e968baSVineet Gupta 
25214e968baSVineet Gupta 	bitops_lock(flags);
25314e968baSVineet Gupta 
25414e968baSVineet Gupta 	old = *m;
25514e968baSVineet Gupta 	*m = old | (1 << nr);
25614e968baSVineet Gupta 
25714e968baSVineet Gupta 	bitops_unlock(flags);
25814e968baSVineet Gupta 
25914e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
26014e968baSVineet Gupta }
26114e968baSVineet Gupta 
26214e968baSVineet Gupta static inline int
26314e968baSVineet Gupta test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
26414e968baSVineet Gupta {
26514e968baSVineet Gupta 	unsigned long old, flags;
26614e968baSVineet Gupta 	m += nr >> 5;
26714e968baSVineet Gupta 
26814e968baSVineet Gupta 	if (__builtin_constant_p(nr))
26914e968baSVineet Gupta 		nr &= 0x1f;
27014e968baSVineet Gupta 
27114e968baSVineet Gupta 	bitops_lock(flags);
27214e968baSVineet Gupta 
27314e968baSVineet Gupta 	old = *m;
27414e968baSVineet Gupta 	*m = old & ~(1 << nr);
27514e968baSVineet Gupta 
27614e968baSVineet Gupta 	bitops_unlock(flags);
27714e968baSVineet Gupta 
27814e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
27914e968baSVineet Gupta }
28014e968baSVineet Gupta 
28114e968baSVineet Gupta static inline int
28214e968baSVineet Gupta test_and_change_bit(unsigned long nr, volatile unsigned long *m)
28314e968baSVineet Gupta {
28414e968baSVineet Gupta 	unsigned long old, flags;
28514e968baSVineet Gupta 	m += nr >> 5;
28614e968baSVineet Gupta 
28714e968baSVineet Gupta 	if (__builtin_constant_p(nr))
28814e968baSVineet Gupta 		nr &= 0x1f;
28914e968baSVineet Gupta 
29014e968baSVineet Gupta 	bitops_lock(flags);
29114e968baSVineet Gupta 
29214e968baSVineet Gupta 	old = *m;
29314e968baSVineet Gupta 	*m = old ^ (1 << nr);
29414e968baSVineet Gupta 
29514e968baSVineet Gupta 	bitops_unlock(flags);
29614e968baSVineet Gupta 
29714e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
29814e968baSVineet Gupta }
29914e968baSVineet Gupta 
30014e968baSVineet Gupta #endif /* CONFIG_ARC_HAS_LLSC */
30114e968baSVineet Gupta 
30214e968baSVineet Gupta /***************************************
30314e968baSVineet Gupta  * Non atomic variants
30414e968baSVineet Gupta  **************************************/
30514e968baSVineet Gupta 
30614e968baSVineet Gupta static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
30714e968baSVineet Gupta {
30814e968baSVineet Gupta 	unsigned long temp;
30914e968baSVineet Gupta 	m += nr >> 5;
31014e968baSVineet Gupta 
31114e968baSVineet Gupta 	if (__builtin_constant_p(nr))
31214e968baSVineet Gupta 		nr &= 0x1f;
31314e968baSVineet Gupta 
31414e968baSVineet Gupta 	temp = *m;
31514e968baSVineet Gupta 	*m = temp | (1UL << nr);
31614e968baSVineet Gupta }
31714e968baSVineet Gupta 
31814e968baSVineet Gupta static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
31914e968baSVineet Gupta {
32014e968baSVineet Gupta 	unsigned long temp;
32114e968baSVineet Gupta 	m += nr >> 5;
32214e968baSVineet Gupta 
32314e968baSVineet Gupta 	if (__builtin_constant_p(nr))
32414e968baSVineet Gupta 		nr &= 0x1f;
32514e968baSVineet Gupta 
32614e968baSVineet Gupta 	temp = *m;
32714e968baSVineet Gupta 	*m = temp & ~(1UL << nr);
32814e968baSVineet Gupta }
32914e968baSVineet Gupta 
33014e968baSVineet Gupta static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
33114e968baSVineet Gupta {
33214e968baSVineet Gupta 	unsigned long temp;
33314e968baSVineet Gupta 	m += nr >> 5;
33414e968baSVineet Gupta 
33514e968baSVineet Gupta 	if (__builtin_constant_p(nr))
33614e968baSVineet Gupta 		nr &= 0x1f;
33714e968baSVineet Gupta 
33814e968baSVineet Gupta 	temp = *m;
33914e968baSVineet Gupta 	*m = temp ^ (1UL << nr);
34014e968baSVineet Gupta }
34114e968baSVineet Gupta 
34214e968baSVineet Gupta static inline int
34314e968baSVineet Gupta __test_and_set_bit(unsigned long nr, volatile unsigned long *m)
34414e968baSVineet Gupta {
34514e968baSVineet Gupta 	unsigned long old;
34614e968baSVineet Gupta 	m += nr >> 5;
34714e968baSVineet Gupta 
34814e968baSVineet Gupta 	if (__builtin_constant_p(nr))
34914e968baSVineet Gupta 		nr &= 0x1f;
35014e968baSVineet Gupta 
35114e968baSVineet Gupta 	old = *m;
35214e968baSVineet Gupta 	*m = old | (1 << nr);
35314e968baSVineet Gupta 
35414e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
35514e968baSVineet Gupta }
35614e968baSVineet Gupta 
35714e968baSVineet Gupta static inline int
35814e968baSVineet Gupta __test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
35914e968baSVineet Gupta {
36014e968baSVineet Gupta 	unsigned long old;
36114e968baSVineet Gupta 	m += nr >> 5;
36214e968baSVineet Gupta 
36314e968baSVineet Gupta 	if (__builtin_constant_p(nr))
36414e968baSVineet Gupta 		nr &= 0x1f;
36514e968baSVineet Gupta 
36614e968baSVineet Gupta 	old = *m;
36714e968baSVineet Gupta 	*m = old & ~(1 << nr);
36814e968baSVineet Gupta 
36914e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
37014e968baSVineet Gupta }
37114e968baSVineet Gupta 
37214e968baSVineet Gupta static inline int
37314e968baSVineet Gupta __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
37414e968baSVineet Gupta {
37514e968baSVineet Gupta 	unsigned long old;
37614e968baSVineet Gupta 	m += nr >> 5;
37714e968baSVineet Gupta 
37814e968baSVineet Gupta 	if (__builtin_constant_p(nr))
37914e968baSVineet Gupta 		nr &= 0x1f;
38014e968baSVineet Gupta 
38114e968baSVineet Gupta 	old = *m;
38214e968baSVineet Gupta 	*m = old ^ (1 << nr);
38314e968baSVineet Gupta 
38414e968baSVineet Gupta 	return (old & (1 << nr)) != 0;
38514e968baSVineet Gupta }
38614e968baSVineet Gupta 
38714e968baSVineet Gupta /*
38814e968baSVineet Gupta  * This routine doesn't need to be atomic.
38914e968baSVineet Gupta  */
39014e968baSVineet Gupta static inline int
391de60c1a1SVineet Gupta test_bit(unsigned int nr, const volatile unsigned long *addr)
39214e968baSVineet Gupta {
39314e968baSVineet Gupta 	unsigned long mask;
39414e968baSVineet Gupta 
39514e968baSVineet Gupta 	addr += nr >> 5;
39614e968baSVineet Gupta 
397de60c1a1SVineet Gupta 	if (__builtin_constant_p(nr))
398de60c1a1SVineet Gupta 		nr &= 0x1f;
399de60c1a1SVineet Gupta 
40014e968baSVineet Gupta 	mask = 1 << nr;
40114e968baSVineet Gupta 
40214e968baSVineet Gupta 	return ((mask & *addr) != 0);
40314e968baSVineet Gupta }
40414e968baSVineet Gupta 
405*1f6ccfffSVineet Gupta #ifdef CONFIG_ISA_ARCOMPACT
406*1f6ccfffSVineet Gupta 
40714e968baSVineet Gupta /*
40814e968baSVineet Gupta  * Count the number of zeros, starting from MSB
40914e968baSVineet Gupta  * Helper for fls( ) friends
41014e968baSVineet Gupta  * This is a pure count, so (1-32) or (0-31) doesn't apply
41114e968baSVineet Gupta  * It could be 0 to 32, based on num of 0's in there
41214e968baSVineet Gupta  * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
41314e968baSVineet Gupta  */
41414e968baSVineet Gupta static inline __attribute__ ((const)) int clz(unsigned int x)
41514e968baSVineet Gupta {
41614e968baSVineet Gupta 	unsigned int res;
41714e968baSVineet Gupta 
41814e968baSVineet Gupta 	__asm__ __volatile__(
41914e968baSVineet Gupta 	"	norm.f  %0, %1		\n"
42014e968baSVineet Gupta 	"	mov.n   %0, 0		\n"
42114e968baSVineet Gupta 	"	add.p   %0, %0, 1	\n"
42214e968baSVineet Gupta 	: "=r"(res)
42314e968baSVineet Gupta 	: "r"(x)
42414e968baSVineet Gupta 	: "cc");
42514e968baSVineet Gupta 
42614e968baSVineet Gupta 	return res;
42714e968baSVineet Gupta }
42814e968baSVineet Gupta 
42914e968baSVineet Gupta static inline int constant_fls(int x)
43014e968baSVineet Gupta {
43114e968baSVineet Gupta 	int r = 32;
43214e968baSVineet Gupta 
43314e968baSVineet Gupta 	if (!x)
43414e968baSVineet Gupta 		return 0;
43514e968baSVineet Gupta 	if (!(x & 0xffff0000u)) {
43614e968baSVineet Gupta 		x <<= 16;
43714e968baSVineet Gupta 		r -= 16;
43814e968baSVineet Gupta 	}
43914e968baSVineet Gupta 	if (!(x & 0xff000000u)) {
44014e968baSVineet Gupta 		x <<= 8;
44114e968baSVineet Gupta 		r -= 8;
44214e968baSVineet Gupta 	}
44314e968baSVineet Gupta 	if (!(x & 0xf0000000u)) {
44414e968baSVineet Gupta 		x <<= 4;
44514e968baSVineet Gupta 		r -= 4;
44614e968baSVineet Gupta 	}
44714e968baSVineet Gupta 	if (!(x & 0xc0000000u)) {
44814e968baSVineet Gupta 		x <<= 2;
44914e968baSVineet Gupta 		r -= 2;
45014e968baSVineet Gupta 	}
45114e968baSVineet Gupta 	if (!(x & 0x80000000u)) {
45214e968baSVineet Gupta 		x <<= 1;
45314e968baSVineet Gupta 		r -= 1;
45414e968baSVineet Gupta 	}
45514e968baSVineet Gupta 	return r;
45614e968baSVineet Gupta }
45714e968baSVineet Gupta 
45814e968baSVineet Gupta /*
45914e968baSVineet Gupta  * fls = Find Last Set in word
46014e968baSVineet Gupta  * @result: [1-32]
46114e968baSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
46214e968baSVineet Gupta  */
46314e968baSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
46414e968baSVineet Gupta {
46514e968baSVineet Gupta 	if (__builtin_constant_p(x))
46614e968baSVineet Gupta 	       return constant_fls(x);
46714e968baSVineet Gupta 
46814e968baSVineet Gupta 	return 32 - clz(x);
46914e968baSVineet Gupta }
47014e968baSVineet Gupta 
47114e968baSVineet Gupta /*
47214e968baSVineet Gupta  * __fls: Similar to fls, but zero based (0-31)
47314e968baSVineet Gupta  */
47414e968baSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
47514e968baSVineet Gupta {
47614e968baSVineet Gupta 	if (!x)
47714e968baSVineet Gupta 		return 0;
47814e968baSVineet Gupta 	else
47914e968baSVineet Gupta 		return fls(x) - 1;
48014e968baSVineet Gupta }
48114e968baSVineet Gupta 
48214e968baSVineet Gupta /*
48314e968baSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
48414e968baSVineet Gupta  * @result: [1-32], 0 if all 0's
48514e968baSVineet Gupta  */
48614e968baSVineet Gupta #define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
48714e968baSVineet Gupta 
48814e968baSVineet Gupta /*
48914e968baSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
49014e968baSVineet Gupta  */
49114e968baSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long word)
49214e968baSVineet Gupta {
49314e968baSVineet Gupta 	if (!word)
49414e968baSVineet Gupta 		return word;
49514e968baSVineet Gupta 
49614e968baSVineet Gupta 	return ffs(word) - 1;
49714e968baSVineet Gupta }
49814e968baSVineet Gupta 
499*1f6ccfffSVineet Gupta #else	/* CONFIG_ISA_ARCV2 */
500*1f6ccfffSVineet Gupta 
501*1f6ccfffSVineet Gupta /*
502*1f6ccfffSVineet Gupta  * fls = Find Last Set in word
503*1f6ccfffSVineet Gupta  * @result: [1-32]
504*1f6ccfffSVineet Gupta  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
505*1f6ccfffSVineet Gupta  */
506*1f6ccfffSVineet Gupta static inline __attribute__ ((const)) int fls(unsigned long x)
507*1f6ccfffSVineet Gupta {
508*1f6ccfffSVineet Gupta 	int n;
509*1f6ccfffSVineet Gupta 
510*1f6ccfffSVineet Gupta 	asm volatile(
511*1f6ccfffSVineet Gupta 	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
512*1f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
513*1f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
514*1f6ccfffSVineet Gupta 	: "r"(x)
515*1f6ccfffSVineet Gupta 	: "cc");
516*1f6ccfffSVineet Gupta 
517*1f6ccfffSVineet Gupta 	return n;
518*1f6ccfffSVineet Gupta }
519*1f6ccfffSVineet Gupta 
520*1f6ccfffSVineet Gupta /*
521*1f6ccfffSVineet Gupta  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
522*1f6ccfffSVineet Gupta  */
523*1f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __fls(unsigned long x)
524*1f6ccfffSVineet Gupta {
525*1f6ccfffSVineet Gupta 	/* FLS insn has exactly same semantics as the API */
526*1f6ccfffSVineet Gupta 	return	__builtin_arc_fls(x);
527*1f6ccfffSVineet Gupta }
528*1f6ccfffSVineet Gupta 
529*1f6ccfffSVineet Gupta /*
530*1f6ccfffSVineet Gupta  * ffs = Find First Set in word (LSB to MSB)
531*1f6ccfffSVineet Gupta  * @result: [1-32], 0 if all 0's
532*1f6ccfffSVineet Gupta  */
533*1f6ccfffSVineet Gupta static inline __attribute__ ((const)) int ffs(unsigned long x)
534*1f6ccfffSVineet Gupta {
535*1f6ccfffSVineet Gupta 	int n;
536*1f6ccfffSVineet Gupta 
537*1f6ccfffSVineet Gupta 	asm volatile(
538*1f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
539*1f6ccfffSVineet Gupta 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
540*1f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
541*1f6ccfffSVineet Gupta 	: "=r"(n)	/* Early clobber not needed */
542*1f6ccfffSVineet Gupta 	: "r"(x)
543*1f6ccfffSVineet Gupta 	: "cc");
544*1f6ccfffSVineet Gupta 
545*1f6ccfffSVineet Gupta 	return n;
546*1f6ccfffSVineet Gupta }
547*1f6ccfffSVineet Gupta 
548*1f6ccfffSVineet Gupta /*
549*1f6ccfffSVineet Gupta  * __ffs: Similar to ffs, but zero based (0-31)
550*1f6ccfffSVineet Gupta  */
551*1f6ccfffSVineet Gupta static inline __attribute__ ((const)) int __ffs(unsigned long x)
552*1f6ccfffSVineet Gupta {
553*1f6ccfffSVineet Gupta 	int n;
554*1f6ccfffSVineet Gupta 
555*1f6ccfffSVineet Gupta 	asm volatile(
556*1f6ccfffSVineet Gupta 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
557*1f6ccfffSVineet Gupta 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
558*1f6ccfffSVineet Gupta 	: "=r"(n)
559*1f6ccfffSVineet Gupta 	: "r"(x)
560*1f6ccfffSVineet Gupta 	: "cc");
561*1f6ccfffSVineet Gupta 
562*1f6ccfffSVineet Gupta 	return n;
563*1f6ccfffSVineet Gupta 
564*1f6ccfffSVineet Gupta }
565*1f6ccfffSVineet Gupta 
566*1f6ccfffSVineet Gupta #endif	/* CONFIG_ISA_ARCOMPACT */
567*1f6ccfffSVineet Gupta 
56814e968baSVineet Gupta /*
56914e968baSVineet Gupta  * ffz = Find First Zero in word.
57014e968baSVineet Gupta  * @return:[0-31], 32 if all 1's
57114e968baSVineet Gupta  */
57214e968baSVineet Gupta #define ffz(x)	__ffs(~(x))
57314e968baSVineet Gupta 
57414e968baSVineet Gupta #include <asm-generic/bitops/hweight.h>
57514e968baSVineet Gupta #include <asm-generic/bitops/fls64.h>
57614e968baSVineet Gupta #include <asm-generic/bitops/sched.h>
57714e968baSVineet Gupta #include <asm-generic/bitops/lock.h>
57814e968baSVineet Gupta 
57914e968baSVineet Gupta #include <asm-generic/bitops/find.h>
58014e968baSVineet Gupta #include <asm-generic/bitops/le.h>
58114e968baSVineet Gupta #include <asm-generic/bitops/ext2-atomic-setbit.h>
58214e968baSVineet Gupta 
58314e968baSVineet Gupta #endif /* !__ASSEMBLY__ */
58414e968baSVineet Gupta 
58514e968baSVineet Gupta #endif
586