1 #ifndef _MICROBLAZE_BITOPS_H
2 #define _MICROBLAZE_BITOPS_H
3 
4 /*
5  * Copyright 1992, Linus Torvalds.
6  */
7 
8 #include <asm/byteorder.h>	/* swab32 */
9 #include <asm/system.h>		/* save_flags */
10 #include <asm-generic/bitops/fls.h>
11 #include <asm-generic/bitops/__fls.h>
12 #include <asm-generic/bitops/fls64.h>
13 #include <asm-generic/bitops/__ffs.h>
14 
15 #ifdef __KERNEL__
16 /*
17  * Function prototypes to keep gcc -Wall happy
18  */
19 
20 /*
21  * The __ functions are not atomic
22  */
23 
24 extern void set_bit(int nr, volatile void * addr);
25 extern void __set_bit(int nr, volatile void * addr);
26 
27 extern void clear_bit(int nr, volatile void * addr);
28 #define __clear_bit(nr, addr) clear_bit(nr, addr)
29 #define PLATFORM__CLEAR_BIT
30 
31 extern void change_bit(int nr, volatile void * addr);
32 extern void __change_bit(int nr, volatile void * addr);
33 extern int test_and_set_bit(int nr, volatile void * addr);
34 extern int __test_and_set_bit(int nr, volatile void * addr);
35 extern int test_and_clear_bit(int nr, volatile void * addr);
36 extern int __test_and_clear_bit(int nr, volatile void * addr);
37 extern int test_and_change_bit(int nr, volatile void * addr);
38 extern int __test_and_change_bit(int nr, volatile void * addr);
39 extern int __constant_test_bit(int nr, const volatile void * addr);
40 extern int __test_bit(int nr, volatile void * addr);
41 extern int find_first_zero_bit(void * addr, unsigned size);
42 extern int find_next_zero_bit (void * addr, int size, int offset);
43 
44 /*
45  * ffz = Find First Zero in word. Undefined if no zero exists,
46  * so code should check against ~0UL first..
47  */
48 extern __inline__ unsigned long ffz(unsigned long word)
49 {
50 	unsigned long result = 0;
51 
52 	while(word & 1) {
53 		result++;
54 		word >>= 1;
55 	}
56 	return result;
57 }
58 
59 
60 extern __inline__ void set_bit(int nr, volatile void * addr)
61 {
62 	int	* a = (int *) addr;
63 	int	mask;
64 	unsigned long flags;
65 
66 	a += nr >> 5;
67 	mask = 1 << (nr & 0x1f);
68 	save_flags_cli(flags);
69 	*a |= mask;
70 	restore_flags(flags);
71 }
72 
73 extern __inline__ void __set_bit(int nr, volatile void * addr)
74 {
75 	int	* a = (int *) addr;
76 	int	mask;
77 
78 	a += nr >> 5;
79 	mask = 1 << (nr & 0x1f);
80 	*a |= mask;
81 }
82 #define PLATFORM__SET_BIT
83 
84 /*
85  * clear_bit() doesn't provide any barrier for the compiler.
86  */
87 #define smp_mb__before_clear_bit()	barrier()
88 #define smp_mb__after_clear_bit()	barrier()
89 
90 extern __inline__ void clear_bit(int nr, volatile void * addr)
91 {
92 	int	* a = (int *) addr;
93 	int	mask;
94 	unsigned long flags;
95 
96 	a += nr >> 5;
97 	mask = 1 << (nr & 0x1f);
98 	save_flags_cli(flags);
99 	*a &= ~mask;
100 	restore_flags(flags);
101 }
102 
103 extern __inline__ void change_bit(int nr, volatile void * addr)
104 {
105 	int mask;
106 	unsigned long flags;
107 	unsigned long *ADDR = (unsigned long *) addr;
108 
109 	ADDR += nr >> 5;
110 	mask = 1 << (nr & 31);
111 	save_flags_cli(flags);
112 	*ADDR ^= mask;
113 	restore_flags(flags);
114 }
115 
116 extern __inline__ void __change_bit(int nr, volatile void * addr)
117 {
118 	int mask;
119 	unsigned long *ADDR = (unsigned long *) addr;
120 
121 	ADDR += nr >> 5;
122 	mask = 1 << (nr & 31);
123 	*ADDR ^= mask;
124 }
125 
126 extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
127 {
128 	int	mask, retval;
129 	volatile unsigned int *a = (volatile unsigned int *) addr;
130 	unsigned long flags;
131 
132 	a += nr >> 5;
133 	mask = 1 << (nr & 0x1f);
134 	save_flags_cli(flags);
135 	retval = (mask & *a) != 0;
136 	*a |= mask;
137 	restore_flags(flags);
138 
139 	return retval;
140 }
141 
142 extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
143 {
144 	int	mask, retval;
145 	volatile unsigned int *a = (volatile unsigned int *) addr;
146 
147 	a += nr >> 5;
148 	mask = 1 << (nr & 0x1f);
149 	retval = (mask & *a) != 0;
150 	*a |= mask;
151 	return retval;
152 }
153 
154 extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
155 {
156 	int	mask, retval;
157 	volatile unsigned int *a = (volatile unsigned int *) addr;
158 	unsigned long flags;
159 
160 	a += nr >> 5;
161 	mask = 1 << (nr & 0x1f);
162 	save_flags_cli(flags);
163 	retval = (mask & *a) != 0;
164 	*a &= ~mask;
165 	restore_flags(flags);
166 
167 	return retval;
168 }
169 
170 extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
171 {
172 	int	mask, retval;
173 	volatile unsigned int *a = (volatile unsigned int *) addr;
174 
175 	a += nr >> 5;
176 	mask = 1 << (nr & 0x1f);
177 	retval = (mask & *a) != 0;
178 	*a &= ~mask;
179 	return retval;
180 }
181 
182 extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
183 {
184 	int	mask, retval;
185 	volatile unsigned int *a = (volatile unsigned int *) addr;
186 	unsigned long flags;
187 
188 	a += nr >> 5;
189 	mask = 1 << (nr & 0x1f);
190 	save_flags_cli(flags);
191 	retval = (mask & *a) != 0;
192 	*a ^= mask;
193 	restore_flags(flags);
194 
195 	return retval;
196 }
197 
198 extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
199 {
200 	int	mask, retval;
201 	volatile unsigned int *a = (volatile unsigned int *) addr;
202 
203 	a += nr >> 5;
204 	mask = 1 << (nr & 0x1f);
205 	retval = (mask & *a) != 0;
206 	*a ^= mask;
207 	return retval;
208 }
209 
210 /*
211  * This routine doesn't need to be atomic.
212  */
213 extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
214 {
215 	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
216 }
217 
218 extern __inline__ int __test_bit(int nr, volatile void * addr)
219 {
220 	int	* a = (int *) addr;
221 	int	mask;
222 
223 	a += nr >> 5;
224 	mask = 1 << (nr & 0x1f);
225 	return ((mask & *a) != 0);
226 }
227 
228 #define test_bit(nr,addr) \
229 (__builtin_constant_p(nr) ? \
230  __constant_test_bit((nr),(addr)) : \
231  __test_bit((nr),(addr)))
232 
233 #define find_first_zero_bit(addr, size) \
234 	find_next_zero_bit((addr), (size), 0)
235 
236 extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
237 {
238 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
239 	unsigned long result = offset & ~31UL;
240 	unsigned long tmp;
241 
242 	if (offset >= size)
243 		return size;
244 	size -= result;
245 	offset &= 31UL;
246 	if (offset) {
247 		tmp = *(p++);
248 		tmp |= ~0UL >> (32-offset);
249 		if (size < 32)
250 			goto found_first;
251 		if (~tmp)
252 			goto found_middle;
253 		size -= 32;
254 		result += 32;
255 	}
256 	while (size & ~31UL) {
257 		if (~(tmp = *(p++)))
258 			goto found_middle;
259 		result += 32;
260 		size -= 32;
261 	}
262 	if (!size)
263 		return result;
264 	tmp = *p;
265 
266 found_first:
267 	tmp |= ~0UL >> size;
268 found_middle:
269 	return result + ffz(tmp);
270 }
271 
272 /*
273  * hweightN: returns the hamming weight (i.e. the number
274  * of bits set) of a N-bit word
275  */
276 
277 #define hweight32(x) generic_hweight32(x)
278 #define hweight16(x) generic_hweight16(x)
279 #define hweight8(x) generic_hweight8(x)
280 
281 
282 extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
283 {
284 	int		mask, retval;
285 	unsigned long	flags;
286 	volatile unsigned char	*ADDR = (unsigned char *) addr;
287 
288 	ADDR += nr >> 3;
289 	mask = 1 << (nr & 0x07);
290 	save_flags_cli(flags);
291 	retval = (mask & *ADDR) != 0;
292 	*ADDR |= mask;
293 	restore_flags(flags);
294 	return retval;
295 }
296 
297 extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
298 {
299 	int		mask, retval;
300 	unsigned long	flags;
301 	volatile unsigned char	*ADDR = (unsigned char *) addr;
302 
303 	ADDR += nr >> 3;
304 	mask = 1 << (nr & 0x07);
305 	save_flags_cli(flags);
306 	retval = (mask & *ADDR) != 0;
307 	*ADDR &= ~mask;
308 	restore_flags(flags);
309 	return retval;
310 }
311 
312 extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
313 {
314 	int			mask;
315 	const volatile unsigned char	*ADDR = (const unsigned char *) addr;
316 
317 	ADDR += nr >> 3;
318 	mask = 1 << (nr & 0x07);
319 	return ((mask & *ADDR) != 0);
320 }
321 
322 #define ext2_find_first_zero_bit(addr, size) \
323 	ext2_find_next_zero_bit((addr), (size), 0)
324 
325 static inline unsigned long ext2_find_next_zero_bit(void *addr,
326 				unsigned long size, unsigned long offset)
327 {
328 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
329 	unsigned long result = offset & ~31UL;
330 	unsigned long tmp;
331 
332 	if (offset >= size)
333 		return size;
334 	size -= result;
335 	offset &= 31UL;
336 	if(offset) {
337 		/* We hold the little endian value in tmp, but then the
338 		 * shift is illegal. So we could keep a big endian value
339 		 * in tmp, like this:
340 		 *
341 		 * tmp = __swab32(*(p++));
342 		 * tmp |= ~0UL >> (32-offset);
343 		 *
344 		 * but this would decrease preformance, so we change the
345 		 * shift:
346 		 */
347 		tmp = *(p++);
348 		tmp |= __swab32(~0UL >> (32-offset));
349 		if(size < 32)
350 			goto found_first;
351 		if(~tmp)
352 			goto found_middle;
353 		size -= 32;
354 		result += 32;
355 	}
356 	while(size & ~31UL) {
357 		if(~(tmp = *(p++)))
358 			goto found_middle;
359 		result += 32;
360 		size -= 32;
361 	}
362 	if(!size)
363 		return result;
364 	tmp = *p;
365 
366 found_first:
367 	/* tmp is little endian, so we would have to swab the shift,
368 	 * see above. But then we have to swab tmp below for ffz, so
369 	 * we might as well do this here.
370 	 */
371 	return result + ffz(__swab32(tmp) | (~0UL << size));
372 found_middle:
373 	return result + ffz(__swab32(tmp));
374 }
375 
376 /* Bitmap functions for the minix filesystem.  */
377 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
378 #define minix_set_bit(nr,addr) set_bit(nr,addr)
379 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
380 #define minix_test_bit(nr,addr) test_bit(nr,addr)
381 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
382 
383 /**
384  * hweightN - returns the hamming weight of a N-bit word
385  * @x: the word to weigh
386  *
387  * The Hamming Weight of a number is the total number of bits set in it.
388  */
389 
390 #define hweight32(x) generic_hweight32(x)
391 #define hweight16(x) generic_hweight16(x)
392 #define hweight8(x) generic_hweight8(x)
393 
394 #endif /* __KERNEL__ */
395 
396 #endif /* _MICROBLAZE_BITOPS_H */
397