xref: /openbmc/linux/arch/arc/include/asm/bitops.h (revision 5497b23e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5 
6 #ifndef _ASM_BITOPS_H
7 #define _ASM_BITOPS_H
8 
9 #ifndef _LINUX_BITOPS_H
10 #error only <linux/bitops.h> can be included directly
11 #endif
12 
13 #ifndef __ASSEMBLY__
14 
15 #include <linux/types.h>
16 #include <linux/compiler.h>
17 #include <asm/barrier.h>
18 #ifndef CONFIG_ARC_HAS_LLSC
19 #include <asm/smp.h>
20 #endif
21 
22 #ifdef CONFIG_ARC_HAS_LLSC
23 
24 /*
25  * Hardware assisted Atomic-R-M-W
26  */
27 
28 #define BIT_OP(op, c_op, asm_op)					\
29 static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
30 {									\
31 	unsigned int temp;						\
32 									\
33 	m += nr >> 5;							\
34 									\
35 	nr &= 0x1f;							\
36 									\
37 	__asm__ __volatile__(						\
38 	"1:	llock       %0, [%1]		\n"			\
39 	"	" #asm_op " %0, %0, %2	\n"				\
40 	"	scond       %0, [%1]		\n"			\
41 	"	bnz         1b			\n"			\
42 	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
43 	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
44 	  "ir"(nr)							\
45 	: "cc");							\
46 }
47 
48 /*
49  * Semantically:
50  *    Test the bit
51  *    if clear
52  *        set it and return 0 (old value)
53  *    else
54  *        return 1 (old value).
55  *
56  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
57  * and the old value of bit is returned
58  */
59 #define TEST_N_BIT_OP(op, c_op, asm_op)					\
60 static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
61 {									\
62 	unsigned long old, temp;					\
63 									\
64 	m += nr >> 5;							\
65 									\
66 	nr &= 0x1f;							\
67 									\
68 	/*								\
69 	 * Explicit full memory barrier needed before/after as		\
70 	 * LLOCK/SCOND themselves don't provide any such smenatic	\
71 	 */								\
72 	smp_mb();							\
73 									\
74 	__asm__ __volatile__(						\
75 	"1:	llock       %0, [%2]	\n"				\
76 	"	" #asm_op " %1, %0, %3	\n"				\
77 	"	scond       %1, [%2]	\n"				\
78 	"	bnz         1b		\n"				\
79 	: "=&r"(old), "=&r"(temp)					\
80 	: "r"(m), "ir"(nr)						\
81 	: "cc");							\
82 									\
83 	smp_mb();							\
84 									\
85 	return (old & (1 << nr)) != 0;					\
86 }
87 
88 #else /* !CONFIG_ARC_HAS_LLSC */
89 
90 /*
91  * Non hardware assisted Atomic-R-M-W
92  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
93  *
94  * There's "significant" micro-optimization in writing our own variants of
95  * bitops (over generic variants)
96  *
97  * (1) The generic APIs have "signed" @nr while we have it "unsigned"
98  *     This avoids extra code to be generated for pointer arithmatic, since
99  *     is "not sure" that index is NOT -ve
100  * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
101  *     only consider bottom 5 bits of @nr, so NO need to mask them off.
102  *     (GCC Quirk: however for constant @nr we still need to do the masking
103  *             at compile time)
104  */
105 
106 #define BIT_OP(op, c_op, asm_op)					\
107 static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
108 {									\
109 	unsigned long temp, flags;					\
110 	m += nr >> 5;							\
111 									\
112 	/*								\
113 	 * spin lock/unlock provide the needed smp_mb() before/after	\
114 	 */								\
115 	bitops_lock(flags);						\
116 									\
117 	temp = *m;							\
118 	*m = temp c_op (1UL << (nr & 0x1f));					\
119 									\
120 	bitops_unlock(flags);						\
121 }
122 
123 #define TEST_N_BIT_OP(op, c_op, asm_op)					\
124 static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
125 {									\
126 	unsigned long old, flags;					\
127 	m += nr >> 5;							\
128 									\
129 	bitops_lock(flags);						\
130 									\
131 	old = *m;							\
132 	*m = old c_op (1UL << (nr & 0x1f));				\
133 									\
134 	bitops_unlock(flags);						\
135 									\
136 	return (old & (1UL << (nr & 0x1f))) != 0;			\
137 }
138 
139 #endif
140 
141 /***************************************
142  * Non atomic variants
143  **************************************/
144 
145 #define __BIT_OP(op, c_op, asm_op)					\
146 static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
147 {									\
148 	unsigned long temp;						\
149 	m += nr >> 5;							\
150 									\
151 	temp = *m;							\
152 	*m = temp c_op (1UL << (nr & 0x1f));				\
153 }
154 
155 #define __TEST_N_BIT_OP(op, c_op, asm_op)				\
156 static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
157 {									\
158 	unsigned long old;						\
159 	m += nr >> 5;							\
160 									\
161 	old = *m;							\
162 	*m = old c_op (1UL << (nr & 0x1f));				\
163 									\
164 	return (old & (1UL << (nr & 0x1f))) != 0;			\
165 }
166 
167 #define BIT_OPS(op, c_op, asm_op)					\
168 									\
169 	/* set_bit(), clear_bit(), change_bit() */			\
170 	BIT_OP(op, c_op, asm_op)					\
171 									\
172 	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
173 	TEST_N_BIT_OP(op, c_op, asm_op)					\
174 									\
175 	/* __set_bit(), __clear_bit(), __change_bit() */		\
176 	__BIT_OP(op, c_op, asm_op)					\
177 									\
178 	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
179 	__TEST_N_BIT_OP(op, c_op, asm_op)
180 
181 BIT_OPS(set, |, bset)
182 BIT_OPS(clear, & ~, bclr)
183 BIT_OPS(change, ^, bxor)
184 
185 /*
186  * This routine doesn't need to be atomic.
187  */
188 static inline int
189 test_bit(unsigned int nr, const volatile unsigned long *addr)
190 {
191 	unsigned long mask;
192 
193 	addr += nr >> 5;
194 
195 	mask = 1UL << (nr & 0x1f);
196 
197 	return ((mask & *addr) != 0);
198 }
199 
200 #ifdef CONFIG_ISA_ARCOMPACT
201 
202 /*
203  * Count the number of zeros, starting from MSB
204  * Helper for fls( ) friends
205  * This is a pure count, so (1-32) or (0-31) doesn't apply
206  * It could be 0 to 32, based on num of 0's in there
207  * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
208  */
209 static inline __attribute__ ((const)) int clz(unsigned int x)
210 {
211 	unsigned int res;
212 
213 	__asm__ __volatile__(
214 	"	norm.f  %0, %1		\n"
215 	"	mov.n   %0, 0		\n"
216 	"	add.p   %0, %0, 1	\n"
217 	: "=r"(res)
218 	: "r"(x)
219 	: "cc");
220 
221 	return res;
222 }
223 
224 static inline int constant_fls(unsigned int x)
225 {
226 	int r = 32;
227 
228 	if (!x)
229 		return 0;
230 	if (!(x & 0xffff0000u)) {
231 		x <<= 16;
232 		r -= 16;
233 	}
234 	if (!(x & 0xff000000u)) {
235 		x <<= 8;
236 		r -= 8;
237 	}
238 	if (!(x & 0xf0000000u)) {
239 		x <<= 4;
240 		r -= 4;
241 	}
242 	if (!(x & 0xc0000000u)) {
243 		x <<= 2;
244 		r -= 2;
245 	}
246 	if (!(x & 0x80000000u))
247 		r -= 1;
248 	return r;
249 }
250 
251 /*
252  * fls = Find Last Set in word
253  * @result: [1-32]
254  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
255  */
256 static inline __attribute__ ((const)) int fls(unsigned int x)
257 {
258 	if (__builtin_constant_p(x))
259 	       return constant_fls(x);
260 
261 	return 32 - clz(x);
262 }
263 
264 /*
265  * __fls: Similar to fls, but zero based (0-31)
266  */
267 static inline __attribute__ ((const)) int __fls(unsigned long x)
268 {
269 	if (!x)
270 		return 0;
271 	else
272 		return fls(x) - 1;
273 }
274 
275 /*
276  * ffs = Find First Set in word (LSB to MSB)
277  * @result: [1-32], 0 if all 0's
278  */
279 #define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
280 
281 /*
282  * __ffs: Similar to ffs, but zero based (0-31)
283  */
284 static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
285 {
286 	if (!word)
287 		return word;
288 
289 	return ffs(word) - 1;
290 }
291 
292 #else	/* CONFIG_ISA_ARCV2 */
293 
294 /*
295  * fls = Find Last Set in word
296  * @result: [1-32]
297  * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
298  */
299 static inline __attribute__ ((const)) int fls(unsigned long x)
300 {
301 	int n;
302 
303 	asm volatile(
304 	"	fls.f	%0, %1		\n"  /* 0:31; 0(Z) if src 0 */
305 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
306 	: "=r"(n)	/* Early clobber not needed */
307 	: "r"(x)
308 	: "cc");
309 
310 	return n;
311 }
312 
313 /*
314  * __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
315  */
316 static inline __attribute__ ((const)) int __fls(unsigned long x)
317 {
318 	/* FLS insn has exactly same semantics as the API */
319 	return	__builtin_arc_fls(x);
320 }
321 
322 /*
323  * ffs = Find First Set in word (LSB to MSB)
324  * @result: [1-32], 0 if all 0's
325  */
326 static inline __attribute__ ((const)) int ffs(unsigned long x)
327 {
328 	int n;
329 
330 	asm volatile(
331 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
332 	"	add.nz	%0, %0, 1	\n"  /* 0:31 -> 1:32 */
333 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
334 	: "=r"(n)	/* Early clobber not needed */
335 	: "r"(x)
336 	: "cc");
337 
338 	return n;
339 }
340 
341 /*
342  * __ffs: Similar to ffs, but zero based (0-31)
343  */
344 static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
345 {
346 	unsigned long n;
347 
348 	asm volatile(
349 	"	ffs.f	%0, %1		\n"  /* 0:31; 31(Z) if src 0 */
350 	"	mov.z	%0, 0		\n"  /* 31(Z)-> 0 */
351 	: "=r"(n)
352 	: "r"(x)
353 	: "cc");
354 
355 	return n;
356 
357 }
358 
359 #endif	/* CONFIG_ISA_ARCOMPACT */
360 
361 /*
362  * ffz = Find First Zero in word.
363  * @return:[0-31], 32 if all 1's
364  */
365 #define ffz(x)	__ffs(~(x))
366 
367 #include <asm-generic/bitops/hweight.h>
368 #include <asm-generic/bitops/fls64.h>
369 #include <asm-generic/bitops/sched.h>
370 #include <asm-generic/bitops/lock.h>
371 
372 #include <asm-generic/bitops/find.h>
373 #include <asm-generic/bitops/le.h>
374 #include <asm-generic/bitops/ext2-atomic-setbit.h>
375 
376 #endif /* !__ASSEMBLY__ */
377 
378 #endif
379