xref: /openbmc/linux/arch/m68k/include/asm/bitops.h (revision dff03381)
1 #ifndef _M68K_BITOPS_H
2 #define _M68K_BITOPS_H
3 /*
4  * Copyright 1992, Linus Torvalds.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14 
15 #include <linux/compiler.h>
16 #include <asm/barrier.h>
17 
18 /*
19  *	Bit access functions vary across the ColdFire and 68k families.
20  *	So we will break them out here, and then macro in the ones we want.
21  *
22  *	ColdFire - supports standard bset/bclr/bchg with register operand only
23  *	68000    - supports standard bset/bclr/bchg with memory operand
24  *	>= 68020 - also supports the bfset/bfclr/bfchg instructions
25  *
26  *	Although it is possible to use only the bset/bclr/bchg with register
27  *	operands on all platforms you end up with larger generated code.
28  *	So we use the best form possible on a given platform.
29  */
30 
31 static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
32 {
33 	char *p = (char *)vaddr + (nr ^ 31) / 8;
34 
35 	__asm__ __volatile__ ("bset %1,(%0)"
36 		:
37 		: "a" (p), "di" (nr & 7)
38 		: "memory");
39 }
40 
41 static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
42 {
43 	char *p = (char *)vaddr + (nr ^ 31) / 8;
44 
45 	__asm__ __volatile__ ("bset %1,%0"
46 		: "+m" (*p)
47 		: "di" (nr & 7));
48 }
49 
50 static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
51 {
52 	__asm__ __volatile__ ("bfset %1{%0:#1}"
53 		:
54 		: "d" (nr ^ 31), "o" (*vaddr)
55 		: "memory");
56 }
57 
58 #if defined(CONFIG_COLDFIRE)
59 #define	set_bit(nr, vaddr)	bset_reg_set_bit(nr, vaddr)
60 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
61 #define	set_bit(nr, vaddr)	bset_mem_set_bit(nr, vaddr)
62 #else
63 #define set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
64 				bset_mem_set_bit(nr, vaddr) : \
65 				bfset_mem_set_bit(nr, vaddr))
66 #endif
67 
68 static __always_inline void
69 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
70 {
71 	set_bit(nr, addr);
72 }
73 
74 static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
75 {
76 	char *p = (char *)vaddr + (nr ^ 31) / 8;
77 
78 	__asm__ __volatile__ ("bclr %1,(%0)"
79 		:
80 		: "a" (p), "di" (nr & 7)
81 		: "memory");
82 }
83 
84 static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
85 {
86 	char *p = (char *)vaddr + (nr ^ 31) / 8;
87 
88 	__asm__ __volatile__ ("bclr %1,%0"
89 		: "+m" (*p)
90 		: "di" (nr & 7));
91 }
92 
93 static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
94 {
95 	__asm__ __volatile__ ("bfclr %1{%0:#1}"
96 		:
97 		: "d" (nr ^ 31), "o" (*vaddr)
98 		: "memory");
99 }
100 
101 #if defined(CONFIG_COLDFIRE)
102 #define	clear_bit(nr, vaddr)	bclr_reg_clear_bit(nr, vaddr)
103 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
104 #define	clear_bit(nr, vaddr)	bclr_mem_clear_bit(nr, vaddr)
105 #else
106 #define clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
107 				bclr_mem_clear_bit(nr, vaddr) : \
108 				bfclr_mem_clear_bit(nr, vaddr))
109 #endif
110 
111 static __always_inline void
112 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
113 {
114 	clear_bit(nr, addr);
115 }
116 
117 static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
118 {
119 	char *p = (char *)vaddr + (nr ^ 31) / 8;
120 
121 	__asm__ __volatile__ ("bchg %1,(%0)"
122 		:
123 		: "a" (p), "di" (nr & 7)
124 		: "memory");
125 }
126 
127 static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
128 {
129 	char *p = (char *)vaddr + (nr ^ 31) / 8;
130 
131 	__asm__ __volatile__ ("bchg %1,%0"
132 		: "+m" (*p)
133 		: "di" (nr & 7));
134 }
135 
136 static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
137 {
138 	__asm__ __volatile__ ("bfchg %1{%0:#1}"
139 		:
140 		: "d" (nr ^ 31), "o" (*vaddr)
141 		: "memory");
142 }
143 
144 #if defined(CONFIG_COLDFIRE)
145 #define	change_bit(nr, vaddr)	bchg_reg_change_bit(nr, vaddr)
146 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
147 #define	change_bit(nr, vaddr)	bchg_mem_change_bit(nr, vaddr)
148 #else
149 #define change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
150 				bchg_mem_change_bit(nr, vaddr) : \
151 				bfchg_mem_change_bit(nr, vaddr))
152 #endif
153 
154 static __always_inline void
155 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
156 {
157 	change_bit(nr, addr);
158 }
159 
160 static __always_inline bool
161 arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
162 {
163 	return (addr[nr >> 5] & (1UL << (nr & 31))) != 0;
164 }
165 
166 static inline int bset_reg_test_and_set_bit(int nr,
167 					    volatile unsigned long *vaddr)
168 {
169 	char *p = (char *)vaddr + (nr ^ 31) / 8;
170 	char retval;
171 
172 	__asm__ __volatile__ ("bset %2,(%1); sne %0"
173 		: "=d" (retval)
174 		: "a" (p), "di" (nr & 7)
175 		: "memory");
176 	return retval;
177 }
178 
179 static inline int bset_mem_test_and_set_bit(int nr,
180 					    volatile unsigned long *vaddr)
181 {
182 	char *p = (char *)vaddr + (nr ^ 31) / 8;
183 	char retval;
184 
185 	__asm__ __volatile__ ("bset %2,%1; sne %0"
186 		: "=d" (retval), "+m" (*p)
187 		: "di" (nr & 7));
188 	return retval;
189 }
190 
191 static inline int bfset_mem_test_and_set_bit(int nr,
192 					     volatile unsigned long *vaddr)
193 {
194 	char retval;
195 
196 	__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
197 		: "=d" (retval)
198 		: "d" (nr ^ 31), "o" (*vaddr)
199 		: "memory");
200 	return retval;
201 }
202 
203 #if defined(CONFIG_COLDFIRE)
204 #define	test_and_set_bit(nr, vaddr)	bset_reg_test_and_set_bit(nr, vaddr)
205 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
206 #define	test_and_set_bit(nr, vaddr)	bset_mem_test_and_set_bit(nr, vaddr)
207 #else
208 #define test_and_set_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
209 					bset_mem_test_and_set_bit(nr, vaddr) : \
210 					bfset_mem_test_and_set_bit(nr, vaddr))
211 #endif
212 
213 static __always_inline bool
214 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
215 {
216 	return test_and_set_bit(nr, addr);
217 }
218 
219 static inline int bclr_reg_test_and_clear_bit(int nr,
220 					      volatile unsigned long *vaddr)
221 {
222 	char *p = (char *)vaddr + (nr ^ 31) / 8;
223 	char retval;
224 
225 	__asm__ __volatile__ ("bclr %2,(%1); sne %0"
226 		: "=d" (retval)
227 		: "a" (p), "di" (nr & 7)
228 		: "memory");
229 	return retval;
230 }
231 
232 static inline int bclr_mem_test_and_clear_bit(int nr,
233 					      volatile unsigned long *vaddr)
234 {
235 	char *p = (char *)vaddr + (nr ^ 31) / 8;
236 	char retval;
237 
238 	__asm__ __volatile__ ("bclr %2,%1; sne %0"
239 		: "=d" (retval), "+m" (*p)
240 		: "di" (nr & 7));
241 	return retval;
242 }
243 
244 static inline int bfclr_mem_test_and_clear_bit(int nr,
245 					       volatile unsigned long *vaddr)
246 {
247 	char retval;
248 
249 	__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
250 		: "=d" (retval)
251 		: "d" (nr ^ 31), "o" (*vaddr)
252 		: "memory");
253 	return retval;
254 }
255 
256 #if defined(CONFIG_COLDFIRE)
257 #define	test_and_clear_bit(nr, vaddr)	bclr_reg_test_and_clear_bit(nr, vaddr)
258 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
259 #define	test_and_clear_bit(nr, vaddr)	bclr_mem_test_and_clear_bit(nr, vaddr)
260 #else
261 #define test_and_clear_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
262 					bclr_mem_test_and_clear_bit(nr, vaddr) : \
263 					bfclr_mem_test_and_clear_bit(nr, vaddr))
264 #endif
265 
266 static __always_inline bool
267 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
268 {
269 	return test_and_clear_bit(nr, addr);
270 }
271 
272 static inline int bchg_reg_test_and_change_bit(int nr,
273 					       volatile unsigned long *vaddr)
274 {
275 	char *p = (char *)vaddr + (nr ^ 31) / 8;
276 	char retval;
277 
278 	__asm__ __volatile__ ("bchg %2,(%1); sne %0"
279 		: "=d" (retval)
280 		: "a" (p), "di" (nr & 7)
281 		: "memory");
282 	return retval;
283 }
284 
285 static inline int bchg_mem_test_and_change_bit(int nr,
286 					       volatile unsigned long *vaddr)
287 {
288 	char *p = (char *)vaddr + (nr ^ 31) / 8;
289 	char retval;
290 
291 	__asm__ __volatile__ ("bchg %2,%1; sne %0"
292 		: "=d" (retval), "+m" (*p)
293 		: "di" (nr & 7));
294 	return retval;
295 }
296 
297 static inline int bfchg_mem_test_and_change_bit(int nr,
298 						volatile unsigned long *vaddr)
299 {
300 	char retval;
301 
302 	__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
303 		: "=d" (retval)
304 		: "d" (nr ^ 31), "o" (*vaddr)
305 		: "memory");
306 	return retval;
307 }
308 
309 #if defined(CONFIG_COLDFIRE)
310 #define	test_and_change_bit(nr, vaddr)	bchg_reg_test_and_change_bit(nr, vaddr)
311 #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
312 #define	test_and_change_bit(nr, vaddr)	bchg_mem_test_and_change_bit(nr, vaddr)
313 #else
314 #define test_and_change_bit(nr, vaddr)	(__builtin_constant_p(nr) ? \
315 					bchg_mem_test_and_change_bit(nr, vaddr) : \
316 					bfchg_mem_test_and_change_bit(nr, vaddr))
317 #endif
318 
319 static __always_inline bool
320 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
321 {
322 	return test_and_change_bit(nr, addr);
323 }
324 
325 /*
326  *	The true 68020 and more advanced processors support the "bfffo"
327  *	instruction for finding bits. ColdFire and simple 68000 parts
328  *	(including CPU32) do not support this. They simply use the generic
329  *	functions.
330  */
331 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
332 #include <asm-generic/bitops/ffz.h>
333 #else
334 
335 static inline int find_first_zero_bit(const unsigned long *vaddr,
336 				      unsigned size)
337 {
338 	const unsigned long *p = vaddr;
339 	int res = 32;
340 	unsigned int words;
341 	unsigned long num;
342 
343 	if (!size)
344 		return 0;
345 
346 	words = (size + 31) >> 5;
347 	while (!(num = ~*p++)) {
348 		if (!--words)
349 			goto out;
350 	}
351 
352 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
353 			      : "=d" (res) : "d" (num & -num));
354 	res ^= 31;
355 out:
356 	res += ((long)p - (long)vaddr - 4) * 8;
357 	return res < size ? res : size;
358 }
359 #define find_first_zero_bit find_first_zero_bit
360 
361 static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
362 				     int offset)
363 {
364 	const unsigned long *p = vaddr + (offset >> 5);
365 	int bit = offset & 31UL, res;
366 
367 	if (offset >= size)
368 		return size;
369 
370 	if (bit) {
371 		unsigned long num = ~*p++ & (~0UL << bit);
372 		offset -= bit;
373 
374 		/* Look for zero in first longword */
375 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
376 				      : "=d" (res) : "d" (num & -num));
377 		if (res < 32) {
378 			offset += res ^ 31;
379 			return offset < size ? offset : size;
380 		}
381 		offset += 32;
382 
383 		if (offset >= size)
384 			return size;
385 	}
386 	/* No zero yet, search remaining full bytes for a zero */
387 	return offset + find_first_zero_bit(p, size - offset);
388 }
389 #define find_next_zero_bit find_next_zero_bit
390 
391 static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
392 {
393 	const unsigned long *p = vaddr;
394 	int res = 32;
395 	unsigned int words;
396 	unsigned long num;
397 
398 	if (!size)
399 		return 0;
400 
401 	words = (size + 31) >> 5;
402 	while (!(num = *p++)) {
403 		if (!--words)
404 			goto out;
405 	}
406 
407 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
408 			      : "=d" (res) : "d" (num & -num));
409 	res ^= 31;
410 out:
411 	res += ((long)p - (long)vaddr - 4) * 8;
412 	return res < size ? res : size;
413 }
414 #define find_first_bit find_first_bit
415 
416 static inline int find_next_bit(const unsigned long *vaddr, int size,
417 				int offset)
418 {
419 	const unsigned long *p = vaddr + (offset >> 5);
420 	int bit = offset & 31UL, res;
421 
422 	if (offset >= size)
423 		return size;
424 
425 	if (bit) {
426 		unsigned long num = *p++ & (~0UL << bit);
427 		offset -= bit;
428 
429 		/* Look for one in first longword */
430 		__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
431 				      : "=d" (res) : "d" (num & -num));
432 		if (res < 32) {
433 			offset += res ^ 31;
434 			return offset < size ? offset : size;
435 		}
436 		offset += 32;
437 
438 		if (offset >= size)
439 			return size;
440 	}
441 	/* No one yet, search remaining full bytes for a one */
442 	return offset + find_first_bit(p, size - offset);
443 }
444 #define find_next_bit find_next_bit
445 
446 /*
447  * ffz = Find First Zero in word. Undefined if no zero exists,
448  * so code should check against ~0UL first..
449  */
450 static inline unsigned long ffz(unsigned long word)
451 {
452 	int res;
453 
454 	__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
455 			      : "=d" (res) : "d" (~word & -~word));
456 	return res ^ 31;
457 }
458 
459 #endif
460 
461 #ifdef __KERNEL__
462 
463 #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
464 
465 /*
466  *	The newer ColdFire family members support a "bitrev" instruction
467  *	and we can use that to implement a fast ffs. Older Coldfire parts,
468  *	and normal 68000 parts don't have anything special, so we use the
469  *	generic functions for those.
470  */
471 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
472 	!defined(CONFIG_M68000)
473 static inline unsigned long __ffs(unsigned long x)
474 {
475 	__asm__ __volatile__ ("bitrev %0; ff1 %0"
476 		: "=d" (x)
477 		: "0" (x));
478 	return x;
479 }
480 
481 static inline int ffs(int x)
482 {
483 	if (!x)
484 		return 0;
485 	return __ffs(x) + 1;
486 }
487 
488 #else
489 #include <asm-generic/bitops/ffs.h>
490 #include <asm-generic/bitops/__ffs.h>
491 #endif
492 
493 #include <asm-generic/bitops/fls.h>
494 #include <asm-generic/bitops/__fls.h>
495 
496 #else
497 
498 /*
499  *	ffs: find first bit set. This is defined the same way as
500  *	the libc and compiler builtin ffs routines, therefore
501  *	differs in spirit from the above ffz (man ffs).
502  */
503 static inline int ffs(int x)
504 {
505 	int cnt;
506 
507 	__asm__ ("bfffo %1{#0:#0},%0"
508 		: "=d" (cnt)
509 		: "dm" (x & -x));
510 	return 32 - cnt;
511 }
512 
513 static inline unsigned long __ffs(unsigned long x)
514 {
515 	return ffs(x) - 1;
516 }
517 
518 /*
519  *	fls: find last bit set.
520  */
521 static inline int fls(unsigned int x)
522 {
523 	int cnt;
524 
525 	__asm__ ("bfffo %1{#0,#0},%0"
526 		: "=d" (cnt)
527 		: "dm" (x));
528 	return 32 - cnt;
529 }
530 
531 static inline unsigned long __fls(unsigned long x)
532 {
533 	return fls(x) - 1;
534 }
535 
536 #endif
537 
538 /* Simple test-and-set bit locks */
539 #define test_and_set_bit_lock	test_and_set_bit
540 #define clear_bit_unlock	clear_bit
541 #define __clear_bit_unlock	clear_bit_unlock
542 
543 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
544 #include <asm-generic/bitops/ext2-atomic.h>
545 #include <asm-generic/bitops/fls64.h>
546 #include <asm-generic/bitops/sched.h>
547 #include <asm-generic/bitops/hweight.h>
548 #include <asm-generic/bitops/le.h>
549 #endif /* __KERNEL__ */
550 
551 #endif /* _M68K_BITOPS_H */
552