xref: /openbmc/linux/arch/x86/include/asm/bitops.h (revision c4ee0af3)
1 #ifndef _ASM_X86_BITOPS_H
2 #define _ASM_X86_BITOPS_H
3 
4 /*
5  * Copyright 1992, Linus Torvalds.
6  *
7  * Note: inlines with more than a single statement should be marked
8  * __always_inline to avoid problems with older gcc's inlining heuristics.
9  */
10 
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
13 #endif
14 
15 #include <linux/compiler.h>
16 #include <asm/alternative.h>
17 #include <asm/rmwcc.h>
18 
19 #if BITS_PER_LONG == 32
20 # define _BITOPS_LONG_SHIFT 5
21 #elif BITS_PER_LONG == 64
22 # define _BITOPS_LONG_SHIFT 6
23 #else
24 # error "Unexpected BITS_PER_LONG"
25 #endif
26 
27 #define BIT_64(n)			(U64_C(1) << (n))
28 
29 /*
30  * These have to be done with inline assembly: that way the bit-setting
31  * is guaranteed to be atomic. All bit operations return 0 if the bit
32  * was cleared before the operation and != 0 if it was not.
33  *
34  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
35  */
36 
37 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
38 /* Technically wrong, but this avoids compilation errors on some gcc
39    versions. */
40 #define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
41 #else
42 #define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
43 #endif
44 
45 #define ADDR				BITOP_ADDR(addr)
46 
47 /*
48  * We do the locked ops that don't return the old value as
49  * a mask operation on a byte.
50  */
51 #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
52 #define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
53 #define CONST_MASK(nr)			(1 << ((nr) & 7))
54 
55 /**
56  * set_bit - Atomically set a bit in memory
57  * @nr: the bit to set
58  * @addr: the address to start counting from
59  *
60  * This function is atomic and may not be reordered.  See __set_bit()
61  * if you do not require the atomic guarantees.
62  *
63  * Note: there are no guarantees that this function will not be reordered
64  * on non x86 architectures, so if you are writing portable code,
65  * make sure not to rely on its reordering guarantees.
66  *
67  * Note that @nr may be almost arbitrarily large; this function is not
68  * restricted to acting on a single-word quantity.
69  */
70 static __always_inline void
71 set_bit(long nr, volatile unsigned long *addr)
72 {
73 	if (IS_IMMEDIATE(nr)) {
74 		asm volatile(LOCK_PREFIX "orb %1,%0"
75 			: CONST_MASK_ADDR(nr, addr)
76 			: "iq" ((u8)CONST_MASK(nr))
77 			: "memory");
78 	} else {
79 		asm volatile(LOCK_PREFIX "bts %1,%0"
80 			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
81 	}
82 }
83 
84 /**
85  * __set_bit - Set a bit in memory
86  * @nr: the bit to set
87  * @addr: the address to start counting from
88  *
89  * Unlike set_bit(), this function is non-atomic and may be reordered.
90  * If it's called on the same region of memory simultaneously, the effect
91  * may be that only one operation succeeds.
92  */
93 static inline void __set_bit(long nr, volatile unsigned long *addr)
94 {
95 	asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
96 }
97 
98 /**
99  * clear_bit - Clears a bit in memory
100  * @nr: Bit to clear
101  * @addr: Address to start counting from
102  *
103  * clear_bit() is atomic and may not be reordered.  However, it does
104  * not contain a memory barrier, so if it is used for locking purposes,
105  * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
106  * in order to ensure changes are visible on other processors.
107  */
108 static __always_inline void
109 clear_bit(long nr, volatile unsigned long *addr)
110 {
111 	if (IS_IMMEDIATE(nr)) {
112 		asm volatile(LOCK_PREFIX "andb %1,%0"
113 			: CONST_MASK_ADDR(nr, addr)
114 			: "iq" ((u8)~CONST_MASK(nr)));
115 	} else {
116 		asm volatile(LOCK_PREFIX "btr %1,%0"
117 			: BITOP_ADDR(addr)
118 			: "Ir" (nr));
119 	}
120 }
121 
122 /*
123  * clear_bit_unlock - Clears a bit in memory
124  * @nr: Bit to clear
125  * @addr: Address to start counting from
126  *
127  * clear_bit() is atomic and implies release semantics before the memory
128  * operation. It can be used for an unlock.
129  */
130 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
131 {
132 	barrier();
133 	clear_bit(nr, addr);
134 }
135 
136 static inline void __clear_bit(long nr, volatile unsigned long *addr)
137 {
138 	asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
139 }
140 
141 /*
142  * __clear_bit_unlock - Clears a bit in memory
143  * @nr: Bit to clear
144  * @addr: Address to start counting from
145  *
146  * __clear_bit() is non-atomic and implies release semantics before the memory
147  * operation. It can be used for an unlock if no other CPUs can concurrently
148  * modify other bits in the word.
149  *
150  * No memory barrier is required here, because x86 cannot reorder stores past
151  * older loads. Same principle as spin_unlock.
152  */
153 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
154 {
155 	barrier();
156 	__clear_bit(nr, addr);
157 }
158 
159 #define smp_mb__before_clear_bit()	barrier()
160 #define smp_mb__after_clear_bit()	barrier()
161 
162 /**
163  * __change_bit - Toggle a bit in memory
164  * @nr: the bit to change
165  * @addr: the address to start counting from
166  *
167  * Unlike change_bit(), this function is non-atomic and may be reordered.
168  * If it's called on the same region of memory simultaneously, the effect
169  * may be that only one operation succeeds.
170  */
171 static inline void __change_bit(long nr, volatile unsigned long *addr)
172 {
173 	asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
174 }
175 
176 /**
177  * change_bit - Toggle a bit in memory
178  * @nr: Bit to change
179  * @addr: Address to start counting from
180  *
181  * change_bit() is atomic and may not be reordered.
182  * Note that @nr may be almost arbitrarily large; this function is not
183  * restricted to acting on a single-word quantity.
184  */
185 static inline void change_bit(long nr, volatile unsigned long *addr)
186 {
187 	if (IS_IMMEDIATE(nr)) {
188 		asm volatile(LOCK_PREFIX "xorb %1,%0"
189 			: CONST_MASK_ADDR(nr, addr)
190 			: "iq" ((u8)CONST_MASK(nr)));
191 	} else {
192 		asm volatile(LOCK_PREFIX "btc %1,%0"
193 			: BITOP_ADDR(addr)
194 			: "Ir" (nr));
195 	}
196 }
197 
198 /**
199  * test_and_set_bit - Set a bit and return its old value
200  * @nr: Bit to set
201  * @addr: Address to count from
202  *
203  * This operation is atomic and cannot be reordered.
204  * It also implies a memory barrier.
205  */
206 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
207 {
208 	GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
209 }
210 
211 /**
212  * test_and_set_bit_lock - Set a bit and return its old value for lock
213  * @nr: Bit to set
214  * @addr: Address to count from
215  *
216  * This is the same as test_and_set_bit on x86.
217  */
218 static __always_inline int
219 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
220 {
221 	return test_and_set_bit(nr, addr);
222 }
223 
224 /**
225  * __test_and_set_bit - Set a bit and return its old value
226  * @nr: Bit to set
227  * @addr: Address to count from
228  *
229  * This operation is non-atomic and can be reordered.
230  * If two examples of this operation race, one can appear to succeed
231  * but actually fail.  You must protect multiple accesses with a lock.
232  */
233 static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
234 {
235 	int oldbit;
236 
237 	asm("bts %2,%1\n\t"
238 	    "sbb %0,%0"
239 	    : "=r" (oldbit), ADDR
240 	    : "Ir" (nr));
241 	return oldbit;
242 }
243 
244 /**
245  * test_and_clear_bit - Clear a bit and return its old value
246  * @nr: Bit to clear
247  * @addr: Address to count from
248  *
249  * This operation is atomic and cannot be reordered.
250  * It also implies a memory barrier.
251  */
252 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
253 {
254 	GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
255 }
256 
257 /**
258  * __test_and_clear_bit - Clear a bit and return its old value
259  * @nr: Bit to clear
260  * @addr: Address to count from
261  *
262  * This operation is non-atomic and can be reordered.
263  * If two examples of this operation race, one can appear to succeed
264  * but actually fail.  You must protect multiple accesses with a lock.
265  *
266  * Note: the operation is performed atomically with respect to
267  * the local CPU, but not other CPUs. Portable code should not
268  * rely on this behaviour.
269  * KVM relies on this behaviour on x86 for modifying memory that is also
270  * accessed from a hypervisor on the same CPU if running in a VM: don't change
271  * this without also updating arch/x86/kernel/kvm.c
272  */
273 static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
274 {
275 	int oldbit;
276 
277 	asm volatile("btr %2,%1\n\t"
278 		     "sbb %0,%0"
279 		     : "=r" (oldbit), ADDR
280 		     : "Ir" (nr));
281 	return oldbit;
282 }
283 
284 /* WARNING: non atomic and it can be reordered! */
285 static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
286 {
287 	int oldbit;
288 
289 	asm volatile("btc %2,%1\n\t"
290 		     "sbb %0,%0"
291 		     : "=r" (oldbit), ADDR
292 		     : "Ir" (nr) : "memory");
293 
294 	return oldbit;
295 }
296 
297 /**
298  * test_and_change_bit - Change a bit and return its old value
299  * @nr: Bit to change
300  * @addr: Address to count from
301  *
302  * This operation is atomic and cannot be reordered.
303  * It also implies a memory barrier.
304  */
305 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
306 {
307 	GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
308 }
309 
310 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
311 {
312 	return ((1UL << (nr & (BITS_PER_LONG-1))) &
313 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
314 }
315 
316 static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
317 {
318 	int oldbit;
319 
320 	asm volatile("bt %2,%1\n\t"
321 		     "sbb %0,%0"
322 		     : "=r" (oldbit)
323 		     : "m" (*(unsigned long *)addr), "Ir" (nr));
324 
325 	return oldbit;
326 }
327 
328 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
329 /**
330  * test_bit - Determine whether a bit is set
331  * @nr: bit number to test
332  * @addr: Address to start counting from
333  */
334 static int test_bit(int nr, const volatile unsigned long *addr);
335 #endif
336 
337 #define test_bit(nr, addr)			\
338 	(__builtin_constant_p((nr))		\
339 	 ? constant_test_bit((nr), (addr))	\
340 	 : variable_test_bit((nr), (addr)))
341 
342 /**
343  * __ffs - find first set bit in word
344  * @word: The word to search
345  *
346  * Undefined if no bit exists, so code should check against 0 first.
347  */
348 static inline unsigned long __ffs(unsigned long word)
349 {
350 	asm("rep; bsf %1,%0"
351 		: "=r" (word)
352 		: "rm" (word));
353 	return word;
354 }
355 
356 /**
357  * ffz - find first zero bit in word
358  * @word: The word to search
359  *
360  * Undefined if no zero exists, so code should check against ~0UL first.
361  */
362 static inline unsigned long ffz(unsigned long word)
363 {
364 	asm("rep; bsf %1,%0"
365 		: "=r" (word)
366 		: "r" (~word));
367 	return word;
368 }
369 
370 /*
371  * __fls: find last set bit in word
372  * @word: The word to search
373  *
374  * Undefined if no set bit exists, so code should check against 0 first.
375  */
376 static inline unsigned long __fls(unsigned long word)
377 {
378 	asm("bsr %1,%0"
379 	    : "=r" (word)
380 	    : "rm" (word));
381 	return word;
382 }
383 
384 #undef ADDR
385 
386 #ifdef __KERNEL__
387 /**
388  * ffs - find first set bit in word
389  * @x: the word to search
390  *
391  * This is defined the same way as the libc and compiler builtin ffs
392  * routines, therefore differs in spirit from the other bitops.
393  *
394  * ffs(value) returns 0 if value is 0 or the position of the first
395  * set bit if value is nonzero. The first (least significant) bit
396  * is at position 1.
397  */
398 static inline int ffs(int x)
399 {
400 	int r;
401 
402 #ifdef CONFIG_X86_64
403 	/*
404 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
405 	 * dest reg is undefined if x==0, but their CPU architect says its
406 	 * value is written to set it to the same as before, except that the
407 	 * top 32 bits will be cleared.
408 	 *
409 	 * We cannot do this on 32 bits because at the very least some
410 	 * 486 CPUs did not behave this way.
411 	 */
412 	asm("bsfl %1,%0"
413 	    : "=r" (r)
414 	    : "rm" (x), "0" (-1));
415 #elif defined(CONFIG_X86_CMOV)
416 	asm("bsfl %1,%0\n\t"
417 	    "cmovzl %2,%0"
418 	    : "=&r" (r) : "rm" (x), "r" (-1));
419 #else
420 	asm("bsfl %1,%0\n\t"
421 	    "jnz 1f\n\t"
422 	    "movl $-1,%0\n"
423 	    "1:" : "=r" (r) : "rm" (x));
424 #endif
425 	return r + 1;
426 }
427 
428 /**
429  * fls - find last set bit in word
430  * @x: the word to search
431  *
432  * This is defined in a similar way as the libc and compiler builtin
433  * ffs, but returns the position of the most significant set bit.
434  *
435  * fls(value) returns 0 if value is 0 or the position of the last
436  * set bit if value is nonzero. The last (most significant) bit is
437  * at position 32.
438  */
439 static inline int fls(int x)
440 {
441 	int r;
442 
443 #ifdef CONFIG_X86_64
444 	/*
445 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
446 	 * dest reg is undefined if x==0, but their CPU architect says its
447 	 * value is written to set it to the same as before, except that the
448 	 * top 32 bits will be cleared.
449 	 *
450 	 * We cannot do this on 32 bits because at the very least some
451 	 * 486 CPUs did not behave this way.
452 	 */
453 	asm("bsrl %1,%0"
454 	    : "=r" (r)
455 	    : "rm" (x), "0" (-1));
456 #elif defined(CONFIG_X86_CMOV)
457 	asm("bsrl %1,%0\n\t"
458 	    "cmovzl %2,%0"
459 	    : "=&r" (r) : "rm" (x), "rm" (-1));
460 #else
461 	asm("bsrl %1,%0\n\t"
462 	    "jnz 1f\n\t"
463 	    "movl $-1,%0\n"
464 	    "1:" : "=r" (r) : "rm" (x));
465 #endif
466 	return r + 1;
467 }
468 
469 /**
470  * fls64 - find last set bit in a 64-bit word
471  * @x: the word to search
472  *
473  * This is defined in a similar way as the libc and compiler builtin
474  * ffsll, but returns the position of the most significant set bit.
475  *
476  * fls64(value) returns 0 if value is 0 or the position of the last
477  * set bit if value is nonzero. The last (most significant) bit is
478  * at position 64.
479  */
480 #ifdef CONFIG_X86_64
481 static __always_inline int fls64(__u64 x)
482 {
483 	int bitpos = -1;
484 	/*
485 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
486 	 * dest reg is undefined if x==0, but their CPU architect says its
487 	 * value is written to set it to the same as before.
488 	 */
489 	asm("bsrq %1,%q0"
490 	    : "+r" (bitpos)
491 	    : "rm" (x));
492 	return bitpos + 1;
493 }
494 #else
495 #include <asm-generic/bitops/fls64.h>
496 #endif
497 
498 #include <asm-generic/bitops/find.h>
499 
500 #include <asm-generic/bitops/sched.h>
501 
502 #define ARCH_HAS_FAST_MULTIPLIER 1
503 
504 #include <asm/arch_hweight.h>
505 
506 #include <asm-generic/bitops/const_hweight.h>
507 
508 #include <asm-generic/bitops/le.h>
509 
510 #include <asm-generic/bitops/ext2-atomic-setbit.h>
511 
512 #endif /* __KERNEL__ */
513 #endif /* _ASM_X86_BITOPS_H */
514