Lines Matching +full:x +full:- +full:mask
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * big-endian system because, unlike little endian, the number of each
22 * There are a few little-endian macros used mostly for filesystem
24 * byte-oriented:
27 * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
28 * number field needs to be reversed compared to the big-endian bit
42 #include <asm/asm-compat.h>
46 #define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
48 #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
54 #define PPC_BITLSHIFT32(be) (32 - 1 - (be))
56 #define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
58 #define PPC_BITLSHIFT8(be) (8 - 1 - (be))
60 #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
66 static inline void fn(unsigned long mask, \
76 "bne- 1b\n" \
78 : "rK" (mask), "r" (p) \
85 static __always_inline bool is_rlwinm_mask_valid(unsigned long x) in is_rlwinm_mask_valid() argument
87 if (!x) in is_rlwinm_mask_valid()
89 if (x & 1) in is_rlwinm_mask_valid()
90 x = ~x; // make the mask non-wrapping in is_rlwinm_mask_valid()
91 x += x & -x; // adding the low set bit results in at most one bit set in is_rlwinm_mask_valid()
93 return !(x & (x - 1)); in is_rlwinm_mask_valid()
97 static inline void fn(unsigned long mask, volatile unsigned long *_p) \
103 __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
109 "bne- 1b\n" \
111 : "n" (~mask), "r" (p) \
119 "bne- 1b\n" \
121 : "r" (mask), "r" (p) \
153 unsigned long mask, \
163 "bne- 1b\n" \
166 : "rK" (mask), "r" (p), "n" (eh) \
168 return (old & mask); \
178 static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p) in test_and_clear_bits() argument
184 __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) { in test_and_clear_bits()
190 "bne- 1b\n" in test_and_clear_bits()
193 : "n" (~mask), "r" (p) in test_and_clear_bits()
201 "bne- 1b\n" in test_and_clear_bits()
204 : "r" (mask), "r" (p) in test_and_clear_bits()
208 return (old & mask); in test_and_clear_bits()
242 unsigned long mask = BIT_MASK(nr); in clear_bit_unlock_return_word() local
249 "bne- 1b\n" in clear_bit_unlock_return_word()
251 : "r" (mask), "r" (p) in clear_bit_unlock_return_word()
266 #include <asm-generic/bitops/non-atomic.h>
275 * Return the zero-based bit position (LE, not IBM bit numbering) of
276 * the most significant 1-bit in a double word.
278 #define __ilog2(x) ilog2(x) argument
280 #include <asm-generic/bitops/ffz.h>
282 #include <asm-generic/bitops/builtin-__ffs.h>
284 #include <asm-generic/bitops/builtin-ffs.h>
287 * fls: find last (most-significant) bit set.
290 static __always_inline int fls(unsigned int x) in fls() argument
294 if (__builtin_constant_p(x)) in fls()
295 return x ? 32 - __builtin_clz(x) : 0; in fls()
296 asm("cntlzw %0,%1" : "=r" (lz) : "r" (x)); in fls()
297 return 32 - lz; in fls()
300 #include <asm-generic/bitops/builtin-__fls.h>
303 * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
304 * instruction; for 32-bit we use the generic version, which does two
305 * 32-bit fls calls.
308 static __always_inline int fls64(__u64 x) in fls64() argument
312 if (__builtin_constant_p(x)) in fls64()
313 return x ? 64 - __builtin_clzll(x) : 0; in fls64()
314 asm("cntlzd %0,%1" : "=r" (lz) : "r" (x)); in fls64()
315 return 64 - lz; in fls64()
318 #include <asm-generic/bitops/fls64.h>
326 #include <asm-generic/bitops/const_hweight.h>
328 #include <asm-generic/bitops/hweight.h>
332 #include <asm-generic/bitops/instrumented-atomic.h>
333 #include <asm-generic/bitops/instrumented-lock.h>
335 /* Little-endian versions */
336 #include <asm-generic/bitops/le.h>
340 #include <asm-generic/bitops/ext2-atomic-setbit.h>
342 #include <asm-generic/bitops/sched.h>