xref: /openbmc/linux/arch/s390/include/asm/bitops.h (revision dd21bfa4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *    Copyright IBM Corp. 1999,2013
4  *
5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6  *
7  * The description below was taken in large parts from the powerpc
8  * bitops header file:
9  * Within a word, bits are numbered LSB first.  Lot's of places make
10  * this assumption by directly testing bits with (val & (1<<nr)).
11  * This can cause confusion for large (> 1 word) bitmaps on a
12  * big-endian system because, unlike little endian, the number of each
13  * bit depends on the word size.
14  *
15  * The bitop functions are defined to work on unsigned longs, so the bits
16  * end up numbered:
17  *   |63..............0|127............64|191...........128|255...........192|
18  *
19  * We also have special functions which work with an MSB0 encoding.
20  * The bits are numbered:
21  *   |0..............63|64............127|128...........191|192...........255|
22  *
23  * The main difference is that bit 0-63 in the bit number field needs to be
24  * reversed compared to the LSB0 encoded bit fields. This can be achieved by
25  * XOR with 0x3f.
26  *
27  */
28 
29 #ifndef _S390_BITOPS_H
30 #define _S390_BITOPS_H
31 
32 #ifndef _LINUX_BITOPS_H
33 #error only <linux/bitops.h> can be included directly
34 #endif
35 
36 #include <linux/typecheck.h>
37 #include <linux/compiler.h>
38 #include <linux/types.h>
39 #include <asm/atomic_ops.h>
40 #include <asm/barrier.h>
41 
42 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
43 
44 static inline unsigned long *
45 __bitops_word(unsigned long nr, const volatile unsigned long *ptr)
46 {
47 	unsigned long addr;
48 
49 	addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
50 	return (unsigned long *)addr;
51 }
52 
53 static inline unsigned long __bitops_mask(unsigned long nr)
54 {
55 	return 1UL << (nr & (BITS_PER_LONG - 1));
56 }
57 
58 static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
59 {
60 	unsigned long *addr = __bitops_word(nr, ptr);
61 	unsigned long mask = __bitops_mask(nr);
62 
63 	__atomic64_or(mask, (long *)addr);
64 }
65 
66 static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
67 {
68 	unsigned long *addr = __bitops_word(nr, ptr);
69 	unsigned long mask = __bitops_mask(nr);
70 
71 	__atomic64_and(~mask, (long *)addr);
72 }
73 
74 static __always_inline void arch_change_bit(unsigned long nr,
75 					    volatile unsigned long *ptr)
76 {
77 	unsigned long *addr = __bitops_word(nr, ptr);
78 	unsigned long mask = __bitops_mask(nr);
79 
80 	__atomic64_xor(mask, (long *)addr);
81 }
82 
83 static inline bool arch_test_and_set_bit(unsigned long nr,
84 					 volatile unsigned long *ptr)
85 {
86 	unsigned long *addr = __bitops_word(nr, ptr);
87 	unsigned long mask = __bitops_mask(nr);
88 	unsigned long old;
89 
90 	old = __atomic64_or_barrier(mask, (long *)addr);
91 	return old & mask;
92 }
93 
94 static inline bool arch_test_and_clear_bit(unsigned long nr,
95 					   volatile unsigned long *ptr)
96 {
97 	unsigned long *addr = __bitops_word(nr, ptr);
98 	unsigned long mask = __bitops_mask(nr);
99 	unsigned long old;
100 
101 	old = __atomic64_and_barrier(~mask, (long *)addr);
102 	return old & mask;
103 }
104 
105 static inline bool arch_test_and_change_bit(unsigned long nr,
106 					    volatile unsigned long *ptr)
107 {
108 	unsigned long *addr = __bitops_word(nr, ptr);
109 	unsigned long mask = __bitops_mask(nr);
110 	unsigned long old;
111 
112 	old = __atomic64_xor_barrier(mask, (long *)addr);
113 	return old & mask;
114 }
115 
116 static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
117 {
118 	unsigned long *addr = __bitops_word(nr, ptr);
119 	unsigned long mask = __bitops_mask(nr);
120 
121 	*addr |= mask;
122 }
123 
124 static inline void arch___clear_bit(unsigned long nr,
125 				    volatile unsigned long *ptr)
126 {
127 	unsigned long *addr = __bitops_word(nr, ptr);
128 	unsigned long mask = __bitops_mask(nr);
129 
130 	*addr &= ~mask;
131 }
132 
133 static inline void arch___change_bit(unsigned long nr,
134 				     volatile unsigned long *ptr)
135 {
136 	unsigned long *addr = __bitops_word(nr, ptr);
137 	unsigned long mask = __bitops_mask(nr);
138 
139 	*addr ^= mask;
140 }
141 
142 static inline bool arch___test_and_set_bit(unsigned long nr,
143 					   volatile unsigned long *ptr)
144 {
145 	unsigned long *addr = __bitops_word(nr, ptr);
146 	unsigned long mask = __bitops_mask(nr);
147 	unsigned long old;
148 
149 	old = *addr;
150 	*addr |= mask;
151 	return old & mask;
152 }
153 
154 static inline bool arch___test_and_clear_bit(unsigned long nr,
155 					     volatile unsigned long *ptr)
156 {
157 	unsigned long *addr = __bitops_word(nr, ptr);
158 	unsigned long mask = __bitops_mask(nr);
159 	unsigned long old;
160 
161 	old = *addr;
162 	*addr &= ~mask;
163 	return old & mask;
164 }
165 
166 static inline bool arch___test_and_change_bit(unsigned long nr,
167 					      volatile unsigned long *ptr)
168 {
169 	unsigned long *addr = __bitops_word(nr, ptr);
170 	unsigned long mask = __bitops_mask(nr);
171 	unsigned long old;
172 
173 	old = *addr;
174 	*addr ^= mask;
175 	return old & mask;
176 }
177 
178 static inline bool arch_test_bit(unsigned long nr,
179 				 const volatile unsigned long *ptr)
180 {
181 	const volatile unsigned long *addr = __bitops_word(nr, ptr);
182 	unsigned long mask = __bitops_mask(nr);
183 
184 	return *addr & mask;
185 }
186 
187 static inline bool arch_test_and_set_bit_lock(unsigned long nr,
188 					      volatile unsigned long *ptr)
189 {
190 	if (arch_test_bit(nr, ptr))
191 		return true;
192 	return arch_test_and_set_bit(nr, ptr);
193 }
194 
195 static inline void arch_clear_bit_unlock(unsigned long nr,
196 					 volatile unsigned long *ptr)
197 {
198 	smp_mb__before_atomic();
199 	arch_clear_bit(nr, ptr);
200 }
201 
202 static inline void arch___clear_bit_unlock(unsigned long nr,
203 					   volatile unsigned long *ptr)
204 {
205 	smp_mb();
206 	arch___clear_bit(nr, ptr);
207 }
208 
209 #include <asm-generic/bitops/instrumented-atomic.h>
210 #include <asm-generic/bitops/instrumented-non-atomic.h>
211 #include <asm-generic/bitops/instrumented-lock.h>
212 
213 /*
214  * Functions which use MSB0 bit numbering.
215  * The bits are numbered:
216  *   |0..............63|64............127|128...........191|192...........255|
217  */
218 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
219 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
220 				unsigned long offset);
221 
222 #define for_each_set_bit_inv(bit, addr, size)				\
223 	for ((bit) = find_first_bit_inv((addr), (size));		\
224 	     (bit) < (size);						\
225 	     (bit) = find_next_bit_inv((addr), (size), (bit) + 1))
226 
227 static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
228 {
229 	return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
230 }
231 
232 static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
233 {
234 	return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
235 }
236 
237 static inline bool test_and_clear_bit_inv(unsigned long nr,
238 					  volatile unsigned long *ptr)
239 {
240 	return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
241 }
242 
243 static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
244 {
245 	return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
246 }
247 
248 static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
249 {
250 	return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
251 }
252 
253 static inline bool test_bit_inv(unsigned long nr,
254 				const volatile unsigned long *ptr)
255 {
256 	return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
257 }
258 
259 #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
260 
261 /**
262  * __flogr - find leftmost one
263  * @word - The word to search
264  *
265  * Returns the bit number of the most significant bit set,
266  * where the most significant bit has bit number 0.
267  * If no bit is set this function returns 64.
268  */
269 static inline unsigned char __flogr(unsigned long word)
270 {
271 	if (__builtin_constant_p(word)) {
272 		unsigned long bit = 0;
273 
274 		if (!word)
275 			return 64;
276 		if (!(word & 0xffffffff00000000UL)) {
277 			word <<= 32;
278 			bit += 32;
279 		}
280 		if (!(word & 0xffff000000000000UL)) {
281 			word <<= 16;
282 			bit += 16;
283 		}
284 		if (!(word & 0xff00000000000000UL)) {
285 			word <<= 8;
286 			bit += 8;
287 		}
288 		if (!(word & 0xf000000000000000UL)) {
289 			word <<= 4;
290 			bit += 4;
291 		}
292 		if (!(word & 0xc000000000000000UL)) {
293 			word <<= 2;
294 			bit += 2;
295 		}
296 		if (!(word & 0x8000000000000000UL)) {
297 			word <<= 1;
298 			bit += 1;
299 		}
300 		return bit;
301 	} else {
302 		union register_pair rp;
303 
304 		rp.even = word;
305 		asm volatile(
306 			"       flogr   %[rp],%[rp]\n"
307 			: [rp] "+d" (rp.pair) : : "cc");
308 		return rp.even;
309 	}
310 }
311 
312 /**
313  * __ffs - find first bit in word.
314  * @word: The word to search
315  *
316  * Undefined if no bit exists, so code should check against 0 first.
317  */
318 static inline unsigned long __ffs(unsigned long word)
319 {
320 	return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
321 }
322 
323 /**
324  * ffs - find first bit set
325  * @word: the word to search
326  *
327  * This is defined the same way as the libc and
328  * compiler builtin ffs routines (man ffs).
329  */
330 static inline int ffs(int word)
331 {
332 	unsigned long mask = 2 * BITS_PER_LONG - 1;
333 	unsigned int val = (unsigned int)word;
334 
335 	return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
336 }
337 
338 /**
339  * __fls - find last (most-significant) set bit in a long word
340  * @word: the word to search
341  *
342  * Undefined if no set bit exists, so code should check against 0 first.
343  */
344 static inline unsigned long __fls(unsigned long word)
345 {
346 	return __flogr(word) ^ (BITS_PER_LONG - 1);
347 }
348 
349 /**
350  * fls64 - find last set bit in a 64-bit word
351  * @word: the word to search
352  *
353  * This is defined in a similar way as the libc and compiler builtin
354  * ffsll, but returns the position of the most significant set bit.
355  *
356  * fls64(value) returns 0 if value is 0 or the position of the last
357  * set bit if value is nonzero. The last (most significant) bit is
358  * at position 64.
359  */
360 static inline int fls64(unsigned long word)
361 {
362 	unsigned long mask = 2 * BITS_PER_LONG - 1;
363 
364 	return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
365 }
366 
367 /**
368  * fls - find last (most-significant) bit set
369  * @word: the word to search
370  *
371  * This is defined the same way as ffs.
372  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
373  */
374 static inline int fls(unsigned int word)
375 {
376 	return fls64(word);
377 }
378 
379 #else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
380 
381 #include <asm-generic/bitops/__ffs.h>
382 #include <asm-generic/bitops/ffs.h>
383 #include <asm-generic/bitops/__fls.h>
384 #include <asm-generic/bitops/fls.h>
385 #include <asm-generic/bitops/fls64.h>
386 
387 #endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
388 
389 #include <asm-generic/bitops/ffz.h>
390 #include <asm-generic/bitops/hweight.h>
391 #include <asm-generic/bitops/sched.h>
392 #include <asm-generic/bitops/le.h>
393 #include <asm-generic/bitops/ext2-atomic-setbit.h>
394 
395 #endif /* _S390_BITOPS_H */
396