xref: /openbmc/linux/lib/find_bit.c (revision 20e2fc42)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* bit search implementation
3  *
4  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  * Copyright (C) 2008 IBM Corporation
8  * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
9  * (Inspired by David Howell's find_next_bit implementation)
10  *
11  * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
12  * size and improve performance, 2015.
13  */
14 
15 #include <linux/bitops.h>
16 #include <linux/bitmap.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 
20 #if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
21 		!defined(find_next_and_bit)
22 
23 /*
24  * This is a common helper function for find_next_bit, find_next_zero_bit, and
25  * find_next_and_bit. The differences are:
26  *  - The "invert" argument, which is XORed with each fetched word before
27  *    searching it for one bits.
28  *  - The optional "addr2", which is anded with "addr1" if present.
29  */
30 static inline unsigned long _find_next_bit(const unsigned long *addr1,
31 		const unsigned long *addr2, unsigned long nbits,
32 		unsigned long start, unsigned long invert)
33 {
34 	unsigned long tmp;
35 
36 	if (unlikely(start >= nbits))
37 		return nbits;
38 
39 	tmp = addr1[start / BITS_PER_LONG];
40 	if (addr2)
41 		tmp &= addr2[start / BITS_PER_LONG];
42 	tmp ^= invert;
43 
44 	/* Handle 1st word. */
45 	tmp &= BITMAP_FIRST_WORD_MASK(start);
46 	start = round_down(start, BITS_PER_LONG);
47 
48 	while (!tmp) {
49 		start += BITS_PER_LONG;
50 		if (start >= nbits)
51 			return nbits;
52 
53 		tmp = addr1[start / BITS_PER_LONG];
54 		if (addr2)
55 			tmp &= addr2[start / BITS_PER_LONG];
56 		tmp ^= invert;
57 	}
58 
59 	return min(start + __ffs(tmp), nbits);
60 }
61 #endif
62 
63 #ifndef find_next_bit
64 /*
65  * Find the next set bit in a memory region.
66  */
67 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
68 			    unsigned long offset)
69 {
70 	return _find_next_bit(addr, NULL, size, offset, 0UL);
71 }
72 EXPORT_SYMBOL(find_next_bit);
73 #endif
74 
75 #ifndef find_next_zero_bit
76 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
77 				 unsigned long offset)
78 {
79 	return _find_next_bit(addr, NULL, size, offset, ~0UL);
80 }
81 EXPORT_SYMBOL(find_next_zero_bit);
82 #endif
83 
84 #if !defined(find_next_and_bit)
85 unsigned long find_next_and_bit(const unsigned long *addr1,
86 		const unsigned long *addr2, unsigned long size,
87 		unsigned long offset)
88 {
89 	return _find_next_bit(addr1, addr2, size, offset, 0UL);
90 }
91 EXPORT_SYMBOL(find_next_and_bit);
92 #endif
93 
94 #ifndef find_first_bit
95 /*
96  * Find the first set bit in a memory region.
97  */
98 unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
99 {
100 	unsigned long idx;
101 
102 	for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
103 		if (addr[idx])
104 			return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
105 	}
106 
107 	return size;
108 }
109 EXPORT_SYMBOL(find_first_bit);
110 #endif
111 
112 #ifndef find_first_zero_bit
113 /*
114  * Find the first cleared bit in a memory region.
115  */
116 unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
117 {
118 	unsigned long idx;
119 
120 	for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
121 		if (addr[idx] != ~0UL)
122 			return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
123 	}
124 
125 	return size;
126 }
127 EXPORT_SYMBOL(find_first_zero_bit);
128 #endif
129 
130 #ifndef find_last_bit
131 unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
132 {
133 	if (size) {
134 		unsigned long val = BITMAP_LAST_WORD_MASK(size);
135 		unsigned long idx = (size-1) / BITS_PER_LONG;
136 
137 		do {
138 			val &= addr[idx];
139 			if (val)
140 				return idx * BITS_PER_LONG + __fls(val);
141 
142 			val = ~0ul;
143 		} while (idx--);
144 	}
145 	return size;
146 }
147 EXPORT_SYMBOL(find_last_bit);
148 #endif
149 
150 #ifdef __BIG_ENDIAN
151 
152 /* include/linux/byteorder does not support "unsigned long" type */
153 static inline unsigned long ext2_swab(const unsigned long y)
154 {
155 #if BITS_PER_LONG == 64
156 	return (unsigned long) __swab64((u64) y);
157 #elif BITS_PER_LONG == 32
158 	return (unsigned long) __swab32((u32) y);
159 #else
160 #error BITS_PER_LONG not defined
161 #endif
162 }
163 
164 #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
165 static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
166 		const unsigned long *addr2, unsigned long nbits,
167 		unsigned long start, unsigned long invert)
168 {
169 	unsigned long tmp;
170 
171 	if (unlikely(start >= nbits))
172 		return nbits;
173 
174 	tmp = addr1[start / BITS_PER_LONG];
175 	if (addr2)
176 		tmp &= addr2[start / BITS_PER_LONG];
177 	tmp ^= invert;
178 
179 	/* Handle 1st word. */
180 	tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
181 	start = round_down(start, BITS_PER_LONG);
182 
183 	while (!tmp) {
184 		start += BITS_PER_LONG;
185 		if (start >= nbits)
186 			return nbits;
187 
188 		tmp = addr1[start / BITS_PER_LONG];
189 		if (addr2)
190 			tmp &= addr2[start / BITS_PER_LONG];
191 		tmp ^= invert;
192 	}
193 
194 	return min(start + __ffs(ext2_swab(tmp)), nbits);
195 }
196 #endif
197 
198 #ifndef find_next_zero_bit_le
199 unsigned long find_next_zero_bit_le(const void *addr, unsigned
200 		long size, unsigned long offset)
201 {
202 	return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
203 }
204 EXPORT_SYMBOL(find_next_zero_bit_le);
205 #endif
206 
207 #ifndef find_next_bit_le
208 unsigned long find_next_bit_le(const void *addr, unsigned
209 		long size, unsigned long offset)
210 {
211 	return _find_next_bit_le(addr, NULL, size, offset, 0UL);
212 }
213 EXPORT_SYMBOL(find_next_bit_le);
214 #endif
215 
216 #endif /* __BIG_ENDIAN */
217