xref: /openbmc/linux/arch/alpha/include/asm/bitops.h (revision a8fe58ce)
1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
3 
4 #ifndef _LINUX_BITOPS_H
5 #error only <linux/bitops.h> can be included directly
6 #endif
7 
8 #include <asm/compiler.h>
9 #include <asm/barrier.h>
10 
11 /*
12  * Copyright 1994, Linus Torvalds.
13  */
14 
15 /*
16  * These have to be done with inline assembly: that way the bit-setting
17  * is guaranteed to be atomic. All bit operations return 0 if the bit
18  * was cleared before the operation and != 0 if it was not.
19  *
20  * To get proper branch prediction for the main line, we must branch
21  * forward to code at the end of this object's .text section, then
22  * branch back to restart the operation.
23  *
24  * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
25  */
26 
27 static inline void
28 set_bit(unsigned long nr, volatile void * addr)
29 {
30 	unsigned long temp;
31 	int *m = ((int *) addr) + (nr >> 5);
32 
33 	__asm__ __volatile__(
34 	"1:	ldl_l %0,%3\n"
35 	"	bis %0,%2,%0\n"
36 	"	stl_c %0,%1\n"
37 	"	beq %0,2f\n"
38 	".subsection 2\n"
39 	"2:	br 1b\n"
40 	".previous"
41 	:"=&r" (temp), "=m" (*m)
42 	:"Ir" (1UL << (nr & 31)), "m" (*m));
43 }
44 
45 /*
46  * WARNING: non atomic version.
47  */
48 static inline void
49 __set_bit(unsigned long nr, volatile void * addr)
50 {
51 	int *m = ((int *) addr) + (nr >> 5);
52 
53 	*m |= 1 << (nr & 31);
54 }
55 
56 static inline void
57 clear_bit(unsigned long nr, volatile void * addr)
58 {
59 	unsigned long temp;
60 	int *m = ((int *) addr) + (nr >> 5);
61 
62 	__asm__ __volatile__(
63 	"1:	ldl_l %0,%3\n"
64 	"	bic %0,%2,%0\n"
65 	"	stl_c %0,%1\n"
66 	"	beq %0,2f\n"
67 	".subsection 2\n"
68 	"2:	br 1b\n"
69 	".previous"
70 	:"=&r" (temp), "=m" (*m)
71 	:"Ir" (1UL << (nr & 31)), "m" (*m));
72 }
73 
74 static inline void
75 clear_bit_unlock(unsigned long nr, volatile void * addr)
76 {
77 	smp_mb();
78 	clear_bit(nr, addr);
79 }
80 
81 /*
82  * WARNING: non atomic version.
83  */
84 static __inline__ void
85 __clear_bit(unsigned long nr, volatile void * addr)
86 {
87 	int *m = ((int *) addr) + (nr >> 5);
88 
89 	*m &= ~(1 << (nr & 31));
90 }
91 
92 static inline void
93 __clear_bit_unlock(unsigned long nr, volatile void * addr)
94 {
95 	smp_mb();
96 	__clear_bit(nr, addr);
97 }
98 
99 static inline void
100 change_bit(unsigned long nr, volatile void * addr)
101 {
102 	unsigned long temp;
103 	int *m = ((int *) addr) + (nr >> 5);
104 
105 	__asm__ __volatile__(
106 	"1:	ldl_l %0,%3\n"
107 	"	xor %0,%2,%0\n"
108 	"	stl_c %0,%1\n"
109 	"	beq %0,2f\n"
110 	".subsection 2\n"
111 	"2:	br 1b\n"
112 	".previous"
113 	:"=&r" (temp), "=m" (*m)
114 	:"Ir" (1UL << (nr & 31)), "m" (*m));
115 }
116 
117 /*
118  * WARNING: non atomic version.
119  */
120 static __inline__ void
121 __change_bit(unsigned long nr, volatile void * addr)
122 {
123 	int *m = ((int *) addr) + (nr >> 5);
124 
125 	*m ^= 1 << (nr & 31);
126 }
127 
128 static inline int
129 test_and_set_bit(unsigned long nr, volatile void *addr)
130 {
131 	unsigned long oldbit;
132 	unsigned long temp;
133 	int *m = ((int *) addr) + (nr >> 5);
134 
135 	__asm__ __volatile__(
136 #ifdef CONFIG_SMP
137 	"	mb\n"
138 #endif
139 	"1:	ldl_l %0,%4\n"
140 	"	and %0,%3,%2\n"
141 	"	bne %2,2f\n"
142 	"	xor %0,%3,%0\n"
143 	"	stl_c %0,%1\n"
144 	"	beq %0,3f\n"
145 	"2:\n"
146 #ifdef CONFIG_SMP
147 	"	mb\n"
148 #endif
149 	".subsection 2\n"
150 	"3:	br 1b\n"
151 	".previous"
152 	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
153 	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
154 
155 	return oldbit != 0;
156 }
157 
158 static inline int
159 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
160 {
161 	unsigned long oldbit;
162 	unsigned long temp;
163 	int *m = ((int *) addr) + (nr >> 5);
164 
165 	__asm__ __volatile__(
166 	"1:	ldl_l %0,%4\n"
167 	"	and %0,%3,%2\n"
168 	"	bne %2,2f\n"
169 	"	xor %0,%3,%0\n"
170 	"	stl_c %0,%1\n"
171 	"	beq %0,3f\n"
172 	"2:\n"
173 #ifdef CONFIG_SMP
174 	"	mb\n"
175 #endif
176 	".subsection 2\n"
177 	"3:	br 1b\n"
178 	".previous"
179 	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
180 	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
181 
182 	return oldbit != 0;
183 }
184 
185 /*
186  * WARNING: non atomic version.
187  */
188 static inline int
189 __test_and_set_bit(unsigned long nr, volatile void * addr)
190 {
191 	unsigned long mask = 1 << (nr & 0x1f);
192 	int *m = ((int *) addr) + (nr >> 5);
193 	int old = *m;
194 
195 	*m = old | mask;
196 	return (old & mask) != 0;
197 }
198 
199 static inline int
200 test_and_clear_bit(unsigned long nr, volatile void * addr)
201 {
202 	unsigned long oldbit;
203 	unsigned long temp;
204 	int *m = ((int *) addr) + (nr >> 5);
205 
206 	__asm__ __volatile__(
207 #ifdef CONFIG_SMP
208 	"	mb\n"
209 #endif
210 	"1:	ldl_l %0,%4\n"
211 	"	and %0,%3,%2\n"
212 	"	beq %2,2f\n"
213 	"	xor %0,%3,%0\n"
214 	"	stl_c %0,%1\n"
215 	"	beq %0,3f\n"
216 	"2:\n"
217 #ifdef CONFIG_SMP
218 	"	mb\n"
219 #endif
220 	".subsection 2\n"
221 	"3:	br 1b\n"
222 	".previous"
223 	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
224 	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
225 
226 	return oldbit != 0;
227 }
228 
229 /*
230  * WARNING: non atomic version.
231  */
232 static inline int
233 __test_and_clear_bit(unsigned long nr, volatile void * addr)
234 {
235 	unsigned long mask = 1 << (nr & 0x1f);
236 	int *m = ((int *) addr) + (nr >> 5);
237 	int old = *m;
238 
239 	*m = old & ~mask;
240 	return (old & mask) != 0;
241 }
242 
243 static inline int
244 test_and_change_bit(unsigned long nr, volatile void * addr)
245 {
246 	unsigned long oldbit;
247 	unsigned long temp;
248 	int *m = ((int *) addr) + (nr >> 5);
249 
250 	__asm__ __volatile__(
251 #ifdef CONFIG_SMP
252 	"	mb\n"
253 #endif
254 	"1:	ldl_l %0,%4\n"
255 	"	and %0,%3,%2\n"
256 	"	xor %0,%3,%0\n"
257 	"	stl_c %0,%1\n"
258 	"	beq %0,3f\n"
259 #ifdef CONFIG_SMP
260 	"	mb\n"
261 #endif
262 	".subsection 2\n"
263 	"3:	br 1b\n"
264 	".previous"
265 	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
266 	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
267 
268 	return oldbit != 0;
269 }
270 
271 /*
272  * WARNING: non atomic version.
273  */
274 static __inline__ int
275 __test_and_change_bit(unsigned long nr, volatile void * addr)
276 {
277 	unsigned long mask = 1 << (nr & 0x1f);
278 	int *m = ((int *) addr) + (nr >> 5);
279 	int old = *m;
280 
281 	*m = old ^ mask;
282 	return (old & mask) != 0;
283 }
284 
285 static inline int
286 test_bit(int nr, const volatile void * addr)
287 {
288 	return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
289 }
290 
291 /*
292  * ffz = Find First Zero in word. Undefined if no zero exists,
293  * so code should check against ~0UL first..
294  *
295  * Do a binary search on the bits.  Due to the nature of large
296  * constants on the alpha, it is worthwhile to split the search.
297  */
298 static inline unsigned long ffz_b(unsigned long x)
299 {
300 	unsigned long sum, x1, x2, x4;
301 
302 	x = ~x & -~x;		/* set first 0 bit, clear others */
303 	x1 = x & 0xAA;
304 	x2 = x & 0xCC;
305 	x4 = x & 0xF0;
306 	sum = x2 ? 2 : 0;
307 	sum += (x4 != 0) * 4;
308 	sum += (x1 != 0);
309 
310 	return sum;
311 }
312 
313 static inline unsigned long ffz(unsigned long word)
314 {
315 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
316 	/* Whee.  EV67 can calculate it directly.  */
317 	return __kernel_cttz(~word);
318 #else
319 	unsigned long bits, qofs, bofs;
320 
321 	bits = __kernel_cmpbge(word, ~0UL);
322 	qofs = ffz_b(bits);
323 	bits = __kernel_extbl(word, qofs);
324 	bofs = ffz_b(bits);
325 
326 	return qofs*8 + bofs;
327 #endif
328 }
329 
330 /*
331  * __ffs = Find First set bit in word.  Undefined if no set bit exists.
332  */
333 static inline unsigned long __ffs(unsigned long word)
334 {
335 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
336 	/* Whee.  EV67 can calculate it directly.  */
337 	return __kernel_cttz(word);
338 #else
339 	unsigned long bits, qofs, bofs;
340 
341 	bits = __kernel_cmpbge(0, word);
342 	qofs = ffz_b(bits);
343 	bits = __kernel_extbl(word, qofs);
344 	bofs = ffz_b(~bits);
345 
346 	return qofs*8 + bofs;
347 #endif
348 }
349 
350 #ifdef __KERNEL__
351 
352 /*
353  * ffs: find first bit set. This is defined the same way as
354  * the libc and compiler builtin ffs routines, therefore
355  * differs in spirit from the above __ffs.
356  */
357 
358 static inline int ffs(int word)
359 {
360 	int result = __ffs(word) + 1;
361 	return word ? result : 0;
362 }
363 
364 /*
365  * fls: find last bit set.
366  */
367 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
368 static inline int fls64(unsigned long word)
369 {
370 	return 64 - __kernel_ctlz(word);
371 }
372 #else
373 extern const unsigned char __flsm1_tab[256];
374 
375 static inline int fls64(unsigned long x)
376 {
377 	unsigned long t, a, r;
378 
379 	t = __kernel_cmpbge (x, 0x0101010101010101UL);
380 	a = __flsm1_tab[t];
381 	t = __kernel_extbl (x, a);
382 	r = a*8 + __flsm1_tab[t] + (x != 0);
383 
384 	return r;
385 }
386 #endif
387 
388 static inline unsigned long __fls(unsigned long x)
389 {
390 	return fls64(x) - 1;
391 }
392 
393 static inline int fls(int x)
394 {
395 	return fls64((unsigned int) x);
396 }
397 
398 /*
399  * hweightN: returns the hamming weight (i.e. the number
400  * of bits set) of a N-bit word
401  */
402 
403 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
404 /* Whee.  EV67 can calculate it directly.  */
405 static inline unsigned long __arch_hweight64(unsigned long w)
406 {
407 	return __kernel_ctpop(w);
408 }
409 
410 static inline unsigned int __arch_hweight32(unsigned int w)
411 {
412 	return __arch_hweight64(w);
413 }
414 
415 static inline unsigned int __arch_hweight16(unsigned int w)
416 {
417 	return __arch_hweight64(w & 0xffff);
418 }
419 
420 static inline unsigned int __arch_hweight8(unsigned int w)
421 {
422 	return __arch_hweight64(w & 0xff);
423 }
424 #else
425 #include <asm-generic/bitops/arch_hweight.h>
426 #endif
427 
428 #include <asm-generic/bitops/const_hweight.h>
429 
430 #endif /* __KERNEL__ */
431 
432 #include <asm-generic/bitops/find.h>
433 
434 #ifdef __KERNEL__
435 
436 /*
437  * Every architecture must define this function. It's the fastest
438  * way of searching a 100-bit bitmap.  It's guaranteed that at least
439  * one of the 100 bits is cleared.
440  */
441 static inline unsigned long
442 sched_find_first_bit(const unsigned long b[2])
443 {
444 	unsigned long b0, b1, ofs, tmp;
445 
446 	b0 = b[0];
447 	b1 = b[1];
448 	ofs = (b0 ? 0 : 64);
449 	tmp = (b0 ? b0 : b1);
450 
451 	return __ffs(tmp) + ofs;
452 }
453 
454 #include <asm-generic/bitops/le.h>
455 
456 #include <asm-generic/bitops/ext2-atomic-setbit.h>
457 
458 #endif /* __KERNEL__ */
459 
460 #endif /* _ALPHA_BITOPS_H */
461