1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_BITOPS_H
31965aae3SH. Peter Anvin #define _ASM_X86_BITOPS_H
4bb898558SAl Viro
5bb898558SAl Viro /*
6bb898558SAl Viro * Copyright 1992, Linus Torvalds.
7c8399943SAndi Kleen *
8c8399943SAndi Kleen * Note: inlines with more than a single statement should be marked
9c8399943SAndi Kleen * __always_inline to avoid problems with older gcc's inlining heuristics.
10bb898558SAl Viro */
11bb898558SAl Viro
12bb898558SAl Viro #ifndef _LINUX_BITOPS_H
13bb898558SAl Viro #error only <linux/bitops.h> can be included directly
14bb898558SAl Viro #endif
15bb898558SAl Viro
16bb898558SAl Viro #include <linux/compiler.h>
17bb898558SAl Viro #include <asm/alternative.h>
180c44c2d0SPeter Zijlstra #include <asm/rmwcc.h>
19d00a5692SPeter Zijlstra #include <asm/barrier.h>
20bb898558SAl Viro
219b710506SH. Peter Anvin #if BITS_PER_LONG == 32
229b710506SH. Peter Anvin # define _BITOPS_LONG_SHIFT 5
239b710506SH. Peter Anvin #elif BITS_PER_LONG == 64
249b710506SH. Peter Anvin # define _BITOPS_LONG_SHIFT 6
259b710506SH. Peter Anvin #else
269b710506SH. Peter Anvin # error "Unexpected BITS_PER_LONG"
279b710506SH. Peter Anvin #endif
289b710506SH. Peter Anvin
29e8f380e0SBorislav Petkov #define BIT_64(n) (U64_C(1) << (n))
30e8f380e0SBorislav Petkov
31bb898558SAl Viro /*
32bb898558SAl Viro * These have to be done with inline assembly: that way the bit-setting
33bb898558SAl Viro * is guaranteed to be atomic. All bit operations return 0 if the bit
34bb898558SAl Viro * was cleared before the operation and != 0 if it was not.
35bb898558SAl Viro *
36bb898558SAl Viro * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
37bb898558SAl Viro */
38bb898558SAl Viro
395b77e95dSAlexander Potapenko #define RLONG_ADDR(x) "m" (*(volatile long *) (x))
405b77e95dSAlexander Potapenko #define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
41bb898558SAl Viro
425b77e95dSAlexander Potapenko #define ADDR RLONG_ADDR(addr)
43bb898558SAl Viro
44bb898558SAl Viro /*
45bb898558SAl Viro * We do the locked ops that don't return the old value as
46bb898558SAl Viro * a mask operation on a byte.
47bb898558SAl Viro */
485b77e95dSAlexander Potapenko #define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
49bb898558SAl Viro #define CONST_MASK(nr) (1 << ((nr) & 7))
50bb898558SAl Viro
51c8399943SAndi Kleen static __always_inline void
arch_set_bit(long nr,volatile unsigned long * addr)52751ad98dSMarco Elver arch_set_bit(long nr, volatile unsigned long *addr)
53bb898558SAl Viro {
54bdd50d74SMasahiro Yamada if (__builtin_constant_p(nr)) {
55c071b0f1SNick Desaulniers asm volatile(LOCK_PREFIX "orb %b1,%0"
56bb898558SAl Viro : CONST_MASK_ADDR(nr, addr)
57c071b0f1SNick Desaulniers : "iq" (CONST_MASK(nr))
58bb898558SAl Viro : "memory");
59bb898558SAl Viro } else {
6022636f8cSJan Beulich asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
615b77e95dSAlexander Potapenko : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
62bb898558SAl Viro }
63bb898558SAl Viro }
64bb898558SAl Viro
65751ad98dSMarco Elver static __always_inline void
arch___set_bit(unsigned long nr,volatile unsigned long * addr)660e862838SAlexander Lobakin arch___set_bit(unsigned long nr, volatile unsigned long *addr)
67bb898558SAl Viro {
685b77e95dSAlexander Potapenko asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
69bb898558SAl Viro }
70bb898558SAl Viro
71c8399943SAndi Kleen static __always_inline void
arch_clear_bit(long nr,volatile unsigned long * addr)72751ad98dSMarco Elver arch_clear_bit(long nr, volatile unsigned long *addr)
73bb898558SAl Viro {
74bdd50d74SMasahiro Yamada if (__builtin_constant_p(nr)) {
75c071b0f1SNick Desaulniers asm volatile(LOCK_PREFIX "andb %b1,%0"
76bb898558SAl Viro : CONST_MASK_ADDR(nr, addr)
77c071b0f1SNick Desaulniers : "iq" (~CONST_MASK(nr)));
78bb898558SAl Viro } else {
7922636f8cSJan Beulich asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
805b77e95dSAlexander Potapenko : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
81bb898558SAl Viro }
82bb898558SAl Viro }
83bb898558SAl Viro
84751ad98dSMarco Elver static __always_inline void
arch_clear_bit_unlock(long nr,volatile unsigned long * addr)85751ad98dSMarco Elver arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
86bb898558SAl Viro {
87bb898558SAl Viro barrier();
88751ad98dSMarco Elver arch_clear_bit(nr, addr);
89bb898558SAl Viro }
90bb898558SAl Viro
91751ad98dSMarco Elver static __always_inline void
arch___clear_bit(unsigned long nr,volatile unsigned long * addr)920e862838SAlexander Lobakin arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
93bb898558SAl Viro {
945b77e95dSAlexander Potapenko asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
95bb898558SAl Viro }
96bb898558SAl Viro
97751ad98dSMarco Elver static __always_inline bool
arch_clear_bit_unlock_is_negative_byte(long nr,volatile unsigned long * addr)98751ad98dSMarco Elver arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
99b91e1302SLinus Torvalds {
100b91e1302SLinus Torvalds bool negative;
1013c52b5c6SUros Bizjak asm volatile(LOCK_PREFIX "andb %2,%1"
102b91e1302SLinus Torvalds CC_SET(s)
1035b77e95dSAlexander Potapenko : CC_OUT(s) (negative), WBYTE_ADDR(addr)
104b91e1302SLinus Torvalds : "ir" ((char) ~(1 << nr)) : "memory");
105b91e1302SLinus Torvalds return negative;
106b91e1302SLinus Torvalds }
107751ad98dSMarco Elver #define arch_clear_bit_unlock_is_negative_byte \
108751ad98dSMarco Elver arch_clear_bit_unlock_is_negative_byte
109b91e1302SLinus Torvalds
110751ad98dSMarco Elver static __always_inline void
arch___clear_bit_unlock(long nr,volatile unsigned long * addr)111751ad98dSMarco Elver arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
112bb898558SAl Viro {
113751ad98dSMarco Elver arch___clear_bit(nr, addr);
114bb898558SAl Viro }
115bb898558SAl Viro
116751ad98dSMarco Elver static __always_inline void
arch___change_bit(unsigned long nr,volatile unsigned long * addr)1170e862838SAlexander Lobakin arch___change_bit(unsigned long nr, volatile unsigned long *addr)
118bb898558SAl Viro {
1195b77e95dSAlexander Potapenko asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
120bb898558SAl Viro }
121bb898558SAl Viro
122751ad98dSMarco Elver static __always_inline void
arch_change_bit(long nr,volatile unsigned long * addr)123751ad98dSMarco Elver arch_change_bit(long nr, volatile unsigned long *addr)
124bb898558SAl Viro {
125bdd50d74SMasahiro Yamada if (__builtin_constant_p(nr)) {
126c071b0f1SNick Desaulniers asm volatile(LOCK_PREFIX "xorb %b1,%0"
127838e8bb7SUros Bizjak : CONST_MASK_ADDR(nr, addr)
128c071b0f1SNick Desaulniers : "iq" (CONST_MASK(nr)));
129838e8bb7SUros Bizjak } else {
13022636f8cSJan Beulich asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
1315b77e95dSAlexander Potapenko : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
132838e8bb7SUros Bizjak }
133bb898558SAl Viro }
134bb898558SAl Viro
135751ad98dSMarco Elver static __always_inline bool
arch_test_and_set_bit(long nr,volatile unsigned long * addr)136751ad98dSMarco Elver arch_test_and_set_bit(long nr, volatile unsigned long *addr)
137bb898558SAl Viro {
138288e4521SPeter Zijlstra return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
139bb898558SAl Viro }
140bb898558SAl Viro
141117780eeSH. Peter Anvin static __always_inline bool
arch_test_and_set_bit_lock(long nr,volatile unsigned long * addr)142751ad98dSMarco Elver arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
143bb898558SAl Viro {
144751ad98dSMarco Elver return arch_test_and_set_bit(nr, addr);
145bb898558SAl Viro }
146bb898558SAl Viro
147751ad98dSMarco Elver static __always_inline bool
arch___test_and_set_bit(unsigned long nr,volatile unsigned long * addr)1480e862838SAlexander Lobakin arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
149bb898558SAl Viro {
150117780eeSH. Peter Anvin bool oldbit;
151bb898558SAl Viro
15222636f8cSJan Beulich asm(__ASM_SIZE(bts) " %2,%1"
15386b61240SH. Peter Anvin CC_SET(c)
1545b77e95dSAlexander Potapenko : CC_OUT(c) (oldbit)
1555b77e95dSAlexander Potapenko : ADDR, "Ir" (nr) : "memory");
156bb898558SAl Viro return oldbit;
157bb898558SAl Viro }
158bb898558SAl Viro
159751ad98dSMarco Elver static __always_inline bool
arch_test_and_clear_bit(long nr,volatile unsigned long * addr)160751ad98dSMarco Elver arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
161bb898558SAl Viro {
162288e4521SPeter Zijlstra return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
163bb898558SAl Viro }
164bb898558SAl Viro
165751ad98dSMarco Elver /*
166d0a69d63SMichael S. Tsirkin * Note: the operation is performed atomically with respect to
167d0a69d63SMichael S. Tsirkin * the local CPU, but not other CPUs. Portable code should not
168d0a69d63SMichael S. Tsirkin * rely on this behaviour.
169d0a69d63SMichael S. Tsirkin * KVM relies on this behaviour on x86 for modifying memory that is also
170d0a69d63SMichael S. Tsirkin * accessed from a hypervisor on the same CPU if running in a VM: don't change
171d0a69d63SMichael S. Tsirkin * this without also updating arch/x86/kernel/kvm.c
172bb898558SAl Viro */
173751ad98dSMarco Elver static __always_inline bool
arch___test_and_clear_bit(unsigned long nr,volatile unsigned long * addr)1740e862838SAlexander Lobakin arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
175bb898558SAl Viro {
176117780eeSH. Peter Anvin bool oldbit;
177bb898558SAl Viro
17822636f8cSJan Beulich asm volatile(__ASM_SIZE(btr) " %2,%1"
17986b61240SH. Peter Anvin CC_SET(c)
1805b77e95dSAlexander Potapenko : CC_OUT(c) (oldbit)
1815b77e95dSAlexander Potapenko : ADDR, "Ir" (nr) : "memory");
182bb898558SAl Viro return oldbit;
183bb898558SAl Viro }
184bb898558SAl Viro
185751ad98dSMarco Elver static __always_inline bool
arch___test_and_change_bit(unsigned long nr,volatile unsigned long * addr)1860e862838SAlexander Lobakin arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
187bb898558SAl Viro {
188117780eeSH. Peter Anvin bool oldbit;
189bb898558SAl Viro
19022636f8cSJan Beulich asm volatile(__ASM_SIZE(btc) " %2,%1"
19186b61240SH. Peter Anvin CC_SET(c)
1925b77e95dSAlexander Potapenko : CC_OUT(c) (oldbit)
1935b77e95dSAlexander Potapenko : ADDR, "Ir" (nr) : "memory");
194bb898558SAl Viro
195bb898558SAl Viro return oldbit;
196bb898558SAl Viro }
197bb898558SAl Viro
198751ad98dSMarco Elver static __always_inline bool
arch_test_and_change_bit(long nr,volatile unsigned long * addr)199751ad98dSMarco Elver arch_test_and_change_bit(long nr, volatile unsigned long *addr)
200bb898558SAl Viro {
201288e4521SPeter Zijlstra return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
202bb898558SAl Viro }
203bb898558SAl Viro
constant_test_bit(long nr,const volatile unsigned long * addr)204e8258733SPeter Zijlstra static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
205bb898558SAl Viro {
2069b710506SH. Peter Anvin return ((1UL << (nr & (BITS_PER_LONG-1))) &
2079b710506SH. Peter Anvin (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
208bb898558SAl Viro }
209bb898558SAl Viro
constant_test_bit_acquire(long nr,const volatile unsigned long * addr)2108238b457SMikulas Patocka static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
2118238b457SMikulas Patocka {
2128238b457SMikulas Patocka bool oldbit;
2138238b457SMikulas Patocka
2148238b457SMikulas Patocka asm volatile("testb %2,%1"
2158238b457SMikulas Patocka CC_SET(nz)
2168238b457SMikulas Patocka : CC_OUT(nz) (oldbit)
2178238b457SMikulas Patocka : "m" (((unsigned char *)addr)[nr >> 3]),
2188238b457SMikulas Patocka "i" (1 << (nr & 7))
2198238b457SMikulas Patocka :"memory");
2208238b457SMikulas Patocka
2218238b457SMikulas Patocka return oldbit;
2228238b457SMikulas Patocka }
2238238b457SMikulas Patocka
variable_test_bit(long nr,volatile const unsigned long * addr)224117780eeSH. Peter Anvin static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
225bb898558SAl Viro {
226117780eeSH. Peter Anvin bool oldbit;
227bb898558SAl Viro
22822636f8cSJan Beulich asm volatile(__ASM_SIZE(bt) " %2,%1"
22986b61240SH. Peter Anvin CC_SET(c)
23086b61240SH. Peter Anvin : CC_OUT(c) (oldbit)
2315b77e95dSAlexander Potapenko : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
232bb898558SAl Viro
233bb898558SAl Viro return oldbit;
234bb898558SAl Viro }
235bb898558SAl Viro
2360e862838SAlexander Lobakin static __always_inline bool
arch_test_bit(unsigned long nr,const volatile unsigned long * addr)2370e862838SAlexander Lobakin arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
2380e862838SAlexander Lobakin {
2390e862838SAlexander Lobakin return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
2400e862838SAlexander Lobakin variable_test_bit(nr, addr);
2410e862838SAlexander Lobakin }
242bb898558SAl Viro
2438238b457SMikulas Patocka static __always_inline bool
arch_test_bit_acquire(unsigned long nr,const volatile unsigned long * addr)2448238b457SMikulas Patocka arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
2458238b457SMikulas Patocka {
2468238b457SMikulas Patocka return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
2478238b457SMikulas Patocka variable_test_bit(nr, addr);
2488238b457SMikulas Patocka }
2498238b457SMikulas Patocka
variable__ffs(unsigned long word)250*fdb6649aSVincent Mailhol static __always_inline unsigned long variable__ffs(unsigned long word)
251*fdb6649aSVincent Mailhol {
252*fdb6649aSVincent Mailhol asm("rep; bsf %1,%0"
253*fdb6649aSVincent Mailhol : "=r" (word)
254*fdb6649aSVincent Mailhol : "rm" (word));
255*fdb6649aSVincent Mailhol return word;
256*fdb6649aSVincent Mailhol }
257*fdb6649aSVincent Mailhol
258bb898558SAl Viro /**
259bb898558SAl Viro * __ffs - find first set bit in word
260bb898558SAl Viro * @word: The word to search
261bb898558SAl Viro *
262bb898558SAl Viro * Undefined if no bit exists, so code should check against 0 first.
263bb898558SAl Viro */
264*fdb6649aSVincent Mailhol #define __ffs(word) \
265*fdb6649aSVincent Mailhol (__builtin_constant_p(word) ? \
266*fdb6649aSVincent Mailhol (unsigned long)__builtin_ctzl(word) : \
267*fdb6649aSVincent Mailhol variable__ffs(word))
268*fdb6649aSVincent Mailhol
variable_ffz(unsigned long word)269*fdb6649aSVincent Mailhol static __always_inline unsigned long variable_ffz(unsigned long word)
270bb898558SAl Viro {
271e26a44a2SJan Beulich asm("rep; bsf %1,%0"
272bb898558SAl Viro : "=r" (word)
273*fdb6649aSVincent Mailhol : "r" (~word));
274bb898558SAl Viro return word;
275bb898558SAl Viro }
276bb898558SAl Viro
277bb898558SAl Viro /**
278bb898558SAl Viro * ffz - find first zero bit in word
279bb898558SAl Viro * @word: The word to search
280bb898558SAl Viro *
281bb898558SAl Viro * Undefined if no zero exists, so code should check against ~0UL first.
282bb898558SAl Viro */
283*fdb6649aSVincent Mailhol #define ffz(word) \
284*fdb6649aSVincent Mailhol (__builtin_constant_p(word) ? \
285*fdb6649aSVincent Mailhol (unsigned long)__builtin_ctzl(~word) : \
286*fdb6649aSVincent Mailhol variable_ffz(word))
287bb898558SAl Viro
288bb898558SAl Viro /*
289bb898558SAl Viro * __fls: find last set bit in word
290bb898558SAl Viro * @word: The word to search
291bb898558SAl Viro *
292bb898558SAl Viro * Undefined if no set bit exists, so code should check against 0 first.
293bb898558SAl Viro */
__fls(unsigned long word)2948dd5032dSDenys Vlasenko static __always_inline unsigned long __fls(unsigned long word)
295bb898558SAl Viro {
296bb898558SAl Viro asm("bsr %1,%0"
297bb898558SAl Viro : "=r" (word)
298bb898558SAl Viro : "rm" (word));
299bb898558SAl Viro return word;
300bb898558SAl Viro }
301bb898558SAl Viro
30283d99df7SH. Peter Anvin #undef ADDR
30383d99df7SH. Peter Anvin
304bb898558SAl Viro #ifdef __KERNEL__
variable_ffs(int x)305146034feSVincent Mailhol static __always_inline int variable_ffs(int x)
306bb898558SAl Viro {
307bb898558SAl Viro int r;
308ca3d30ccSDavid Howells
309ca3d30ccSDavid Howells #ifdef CONFIG_X86_64
310ca3d30ccSDavid Howells /*
311ca3d30ccSDavid Howells * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
312ca3d30ccSDavid Howells * dest reg is undefined if x==0, but their CPU architect says its
313ca3d30ccSDavid Howells * value is written to set it to the same as before, except that the
314ca3d30ccSDavid Howells * top 32 bits will be cleared.
315ca3d30ccSDavid Howells *
316ca3d30ccSDavid Howells * We cannot do this on 32 bits because at the very least some
317ca3d30ccSDavid Howells * 486 CPUs did not behave this way.
318ca3d30ccSDavid Howells */
319ca3d30ccSDavid Howells asm("bsfl %1,%0"
320ca3d30ccSDavid Howells : "=r" (r)
3211edfbb41SJan Beulich : "rm" (x), "0" (-1));
322ca3d30ccSDavid Howells #elif defined(CONFIG_X86_CMOV)
323bb898558SAl Viro asm("bsfl %1,%0\n\t"
324bb898558SAl Viro "cmovzl %2,%0"
325ca3d30ccSDavid Howells : "=&r" (r) : "rm" (x), "r" (-1));
326bb898558SAl Viro #else
327bb898558SAl Viro asm("bsfl %1,%0\n\t"
328bb898558SAl Viro "jnz 1f\n\t"
329bb898558SAl Viro "movl $-1,%0\n"
330bb898558SAl Viro "1:" : "=r" (r) : "rm" (x));
331bb898558SAl Viro #endif
332bb898558SAl Viro return r + 1;
333bb898558SAl Viro }
334bb898558SAl Viro
335bb898558SAl Viro /**
336146034feSVincent Mailhol * ffs - find first set bit in word
337146034feSVincent Mailhol * @x: the word to search
338146034feSVincent Mailhol *
339146034feSVincent Mailhol * This is defined the same way as the libc and compiler builtin ffs
340146034feSVincent Mailhol * routines, therefore differs in spirit from the other bitops.
341146034feSVincent Mailhol *
342146034feSVincent Mailhol * ffs(value) returns 0 if value is 0 or the position of the first
343146034feSVincent Mailhol * set bit if value is nonzero. The first (least significant) bit
344146034feSVincent Mailhol * is at position 1.
345146034feSVincent Mailhol */
346146034feSVincent Mailhol #define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
347146034feSVincent Mailhol
348146034feSVincent Mailhol /**
349bb898558SAl Viro * fls - find last set bit in word
350bb898558SAl Viro * @x: the word to search
351bb898558SAl Viro *
352bb898558SAl Viro * This is defined in a similar way as the libc and compiler builtin
353bb898558SAl Viro * ffs, but returns the position of the most significant set bit.
354bb898558SAl Viro *
355bb898558SAl Viro * fls(value) returns 0 if value is 0 or the position of the last
356bb898558SAl Viro * set bit if value is nonzero. The last (most significant) bit is
357bb898558SAl Viro * at position 32.
358bb898558SAl Viro */
fls(unsigned int x)3593fc2579eSMatthew Wilcox static __always_inline int fls(unsigned int x)
360bb898558SAl Viro {
361bb898558SAl Viro int r;
362ca3d30ccSDavid Howells
363ca3d30ccSDavid Howells #ifdef CONFIG_X86_64
364ca3d30ccSDavid Howells /*
365ca3d30ccSDavid Howells * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
366ca3d30ccSDavid Howells * dest reg is undefined if x==0, but their CPU architect says its
367ca3d30ccSDavid Howells * value is written to set it to the same as before, except that the
368ca3d30ccSDavid Howells * top 32 bits will be cleared.
369ca3d30ccSDavid Howells *
370ca3d30ccSDavid Howells * We cannot do this on 32 bits because at the very least some
371ca3d30ccSDavid Howells * 486 CPUs did not behave this way.
372ca3d30ccSDavid Howells */
373ca3d30ccSDavid Howells asm("bsrl %1,%0"
374ca3d30ccSDavid Howells : "=r" (r)
3751edfbb41SJan Beulich : "rm" (x), "0" (-1));
376ca3d30ccSDavid Howells #elif defined(CONFIG_X86_CMOV)
377bb898558SAl Viro asm("bsrl %1,%0\n\t"
378bb898558SAl Viro "cmovzl %2,%0"
379bb898558SAl Viro : "=&r" (r) : "rm" (x), "rm" (-1));
380bb898558SAl Viro #else
381bb898558SAl Viro asm("bsrl %1,%0\n\t"
382bb898558SAl Viro "jnz 1f\n\t"
383bb898558SAl Viro "movl $-1,%0\n"
384bb898558SAl Viro "1:" : "=r" (r) : "rm" (x));
385bb898558SAl Viro #endif
386bb898558SAl Viro return r + 1;
387bb898558SAl Viro }
388bb898558SAl Viro
389ca3d30ccSDavid Howells /**
390ca3d30ccSDavid Howells * fls64 - find last set bit in a 64-bit word
391ca3d30ccSDavid Howells * @x: the word to search
392ca3d30ccSDavid Howells *
393ca3d30ccSDavid Howells * This is defined in a similar way as the libc and compiler builtin
394ca3d30ccSDavid Howells * ffsll, but returns the position of the most significant set bit.
395ca3d30ccSDavid Howells *
396ca3d30ccSDavid Howells * fls64(value) returns 0 if value is 0 or the position of the last
397ca3d30ccSDavid Howells * set bit if value is nonzero. The last (most significant) bit is
398ca3d30ccSDavid Howells * at position 64.
399ca3d30ccSDavid Howells */
400ca3d30ccSDavid Howells #ifdef CONFIG_X86_64
fls64(__u64 x)401ca3d30ccSDavid Howells static __always_inline int fls64(__u64 x)
402ca3d30ccSDavid Howells {
4031edfbb41SJan Beulich int bitpos = -1;
404ca3d30ccSDavid Howells /*
405ca3d30ccSDavid Howells * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
406ca3d30ccSDavid Howells * dest reg is undefined if x==0, but their CPU architect says its
407ca3d30ccSDavid Howells * value is written to set it to the same as before.
408ca3d30ccSDavid Howells */
4091edfbb41SJan Beulich asm("bsrq %1,%q0"
410ca3d30ccSDavid Howells : "+r" (bitpos)
411ca3d30ccSDavid Howells : "rm" (x));
412ca3d30ccSDavid Howells return bitpos + 1;
413ca3d30ccSDavid Howells }
414ca3d30ccSDavid Howells #else
415ca3d30ccSDavid Howells #include <asm-generic/bitops/fls64.h>
416ca3d30ccSDavid Howells #endif
417ca3d30ccSDavid Howells
418bb898558SAl Viro #include <asm-generic/bitops/sched.h>
419bb898558SAl Viro
420d61931d8SBorislav Petkov #include <asm/arch_hweight.h>
421d61931d8SBorislav Petkov
422d61931d8SBorislav Petkov #include <asm-generic/bitops/const_hweight.h>
423bb898558SAl Viro
42481d2c6f8SDaniel Axtens #include <asm-generic/bitops/instrumented-atomic.h>
42581d2c6f8SDaniel Axtens #include <asm-generic/bitops/instrumented-non-atomic.h>
42681d2c6f8SDaniel Axtens #include <asm-generic/bitops/instrumented-lock.h>
427751ad98dSMarco Elver
428861b5ae7SAkinobu Mita #include <asm-generic/bitops/le.h>
429bb898558SAl Viro
430148817baSAkinobu Mita #include <asm-generic/bitops/ext2-atomic-setbit.h>
431bb898558SAl Viro
432bb898558SAl Viro #endif /* __KERNEL__ */
4331965aae3SH. Peter Anvin #endif /* _ASM_X86_BITOPS_H */
434