1 #ifndef _MICROBLAZE_BITOPS_H
2 #define _MICROBLAZE_BITOPS_H
3 
4 /*
5  * Copyright 1992, Linus Torvalds.
6  */
7 
8 #include <linux/config.h>
9 #include <asm/byteorder.h>	/* swab32 */
10 #include <asm/system.h>		/* save_flags */
11 
12 #ifdef __KERNEL__
13 /*
14  * Function prototypes to keep gcc -Wall happy
15  */
16 
17 /*
18  * The __ functions are not atomic
19  */
20 
21 extern void set_bit(int nr, volatile void * addr);
22 extern void __set_bit(int nr, volatile void * addr);
23 
24 extern void clear_bit(int nr, volatile void * addr);
25 #define __clear_bit(nr, addr) clear_bit(nr, addr)
26 #define PLATFORM__CLEAR_BIT
27 
28 extern void change_bit(int nr, volatile void * addr);
29 extern void __change_bit(int nr, volatile void * addr);
30 extern int test_and_set_bit(int nr, volatile void * addr);
31 extern int __test_and_set_bit(int nr, volatile void * addr);
32 extern int test_and_clear_bit(int nr, volatile void * addr);
33 extern int __test_and_clear_bit(int nr, volatile void * addr);
34 extern int test_and_change_bit(int nr, volatile void * addr);
35 extern int __test_and_change_bit(int nr, volatile void * addr);
36 extern int __constant_test_bit(int nr, const volatile void * addr);
37 extern int __test_bit(int nr, volatile void * addr);
38 extern int find_first_zero_bit(void * addr, unsigned size);
39 extern int find_next_zero_bit (void * addr, int size, int offset);
40 
41 /*
42  * ffz = Find First Zero in word. Undefined if no zero exists,
43  * so code should check against ~0UL first..
44  */
45 extern __inline__ unsigned long ffz(unsigned long word)
46 {
47 	unsigned long result = 0;
48 
49 	while(word & 1) {
50 		result++;
51 		word >>= 1;
52 	}
53 	return result;
54 }
55 
56 
57 extern __inline__ void set_bit(int nr, volatile void * addr)
58 {
59 	int	* a = (int *) addr;
60 	int	mask;
61 	unsigned long flags;
62 
63 	a += nr >> 5;
64 	mask = 1 << (nr & 0x1f);
65 	save_flags_cli(flags);
66 	*a |= mask;
67 	restore_flags(flags);
68 }
69 
70 extern __inline__ void __set_bit(int nr, volatile void * addr)
71 {
72 	int	* a = (int *) addr;
73 	int	mask;
74 
75 	a += nr >> 5;
76 	mask = 1 << (nr & 0x1f);
77 	*a |= mask;
78 }
79 #define PLATFORM__SET_BIT
80 
81 /*
82  * clear_bit() doesn't provide any barrier for the compiler.
83  */
84 #define smp_mb__before_clear_bit()	barrier()
85 #define smp_mb__after_clear_bit()	barrier()
86 
87 extern __inline__ void clear_bit(int nr, volatile void * addr)
88 {
89 	int	* a = (int *) addr;
90 	int	mask;
91 	unsigned long flags;
92 
93 	a += nr >> 5;
94 	mask = 1 << (nr & 0x1f);
95 	save_flags_cli(flags);
96 	*a &= ~mask;
97 	restore_flags(flags);
98 }
99 
100 extern __inline__ void change_bit(int nr, volatile void * addr)
101 {
102 	int mask;
103 	unsigned long flags;
104 	unsigned long *ADDR = (unsigned long *) addr;
105 
106 	ADDR += nr >> 5;
107 	mask = 1 << (nr & 31);
108 	save_flags_cli(flags);
109 	*ADDR ^= mask;
110 	restore_flags(flags);
111 }
112 
113 extern __inline__ void __change_bit(int nr, volatile void * addr)
114 {
115 	int mask;
116 	unsigned long *ADDR = (unsigned long *) addr;
117 
118 	ADDR += nr >> 5;
119 	mask = 1 << (nr & 31);
120 	*ADDR ^= mask;
121 }
122 
123 extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
124 {
125 	int	mask, retval;
126 	volatile unsigned int *a = (volatile unsigned int *) addr;
127 	unsigned long flags;
128 
129 	a += nr >> 5;
130 	mask = 1 << (nr & 0x1f);
131 	save_flags_cli(flags);
132 	retval = (mask & *a) != 0;
133 	*a |= mask;
134 	restore_flags(flags);
135 
136 	return retval;
137 }
138 
139 extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
140 {
141 	int	mask, retval;
142 	volatile unsigned int *a = (volatile unsigned int *) addr;
143 
144 	a += nr >> 5;
145 	mask = 1 << (nr & 0x1f);
146 	retval = (mask & *a) != 0;
147 	*a |= mask;
148 	return retval;
149 }
150 
151 extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
152 {
153 	int	mask, retval;
154 	volatile unsigned int *a = (volatile unsigned int *) addr;
155 	unsigned long flags;
156 
157 	a += nr >> 5;
158 	mask = 1 << (nr & 0x1f);
159 	save_flags_cli(flags);
160 	retval = (mask & *a) != 0;
161 	*a &= ~mask;
162 	restore_flags(flags);
163 
164 	return retval;
165 }
166 
167 extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
168 {
169 	int	mask, retval;
170 	volatile unsigned int *a = (volatile unsigned int *) addr;
171 
172 	a += nr >> 5;
173 	mask = 1 << (nr & 0x1f);
174 	retval = (mask & *a) != 0;
175 	*a &= ~mask;
176 	return retval;
177 }
178 
179 extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
180 {
181 	int	mask, retval;
182 	volatile unsigned int *a = (volatile unsigned int *) addr;
183 	unsigned long flags;
184 
185 	a += nr >> 5;
186 	mask = 1 << (nr & 0x1f);
187 	save_flags_cli(flags);
188 	retval = (mask & *a) != 0;
189 	*a ^= mask;
190 	restore_flags(flags);
191 
192 	return retval;
193 }
194 
195 extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
196 {
197 	int	mask, retval;
198 	volatile unsigned int *a = (volatile unsigned int *) addr;
199 
200 	a += nr >> 5;
201 	mask = 1 << (nr & 0x1f);
202 	retval = (mask & *a) != 0;
203 	*a ^= mask;
204 	return retval;
205 }
206 
207 /*
208  * This routine doesn't need to be atomic.
209  */
210 extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
211 {
212 	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
213 }
214 
215 extern __inline__ int __test_bit(int nr, volatile void * addr)
216 {
217 	int	* a = (int *) addr;
218 	int	mask;
219 
220 	a += nr >> 5;
221 	mask = 1 << (nr & 0x1f);
222 	return ((mask & *a) != 0);
223 }
224 
225 #define test_bit(nr,addr) \
226 (__builtin_constant_p(nr) ? \
227  __constant_test_bit((nr),(addr)) : \
228  __test_bit((nr),(addr)))
229 
230 #define find_first_zero_bit(addr, size) \
231 	find_next_zero_bit((addr), (size), 0)
232 
233 extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
234 {
235 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
236 	unsigned long result = offset & ~31UL;
237 	unsigned long tmp;
238 
239 	if (offset >= size)
240 		return size;
241 	size -= result;
242 	offset &= 31UL;
243 	if (offset) {
244 		tmp = *(p++);
245 		tmp |= ~0UL >> (32-offset);
246 		if (size < 32)
247 			goto found_first;
248 		if (~tmp)
249 			goto found_middle;
250 		size -= 32;
251 		result += 32;
252 	}
253 	while (size & ~31UL) {
254 		if (~(tmp = *(p++)))
255 			goto found_middle;
256 		result += 32;
257 		size -= 32;
258 	}
259 	if (!size)
260 		return result;
261 	tmp = *p;
262 
263 found_first:
264 	tmp |= ~0UL >> size;
265 found_middle:
266 	return result + ffz(tmp);
267 }
268 
269 /*
270  * hweightN: returns the hamming weight (i.e. the number
271  * of bits set) of a N-bit word
272  */
273 
274 #define hweight32(x) generic_hweight32(x)
275 #define hweight16(x) generic_hweight16(x)
276 #define hweight8(x) generic_hweight8(x)
277 
278 
279 extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
280 {
281 	int		mask, retval;
282 	unsigned long	flags;
283 	volatile unsigned char	*ADDR = (unsigned char *) addr;
284 
285 	ADDR += nr >> 3;
286 	mask = 1 << (nr & 0x07);
287 	save_flags_cli(flags);
288 	retval = (mask & *ADDR) != 0;
289 	*ADDR |= mask;
290 	restore_flags(flags);
291 	return retval;
292 }
293 
294 extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
295 {
296 	int		mask, retval;
297 	unsigned long	flags;
298 	volatile unsigned char	*ADDR = (unsigned char *) addr;
299 
300 	ADDR += nr >> 3;
301 	mask = 1 << (nr & 0x07);
302 	save_flags_cli(flags);
303 	retval = (mask & *ADDR) != 0;
304 	*ADDR &= ~mask;
305 	restore_flags(flags);
306 	return retval;
307 }
308 
309 extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
310 {
311 	int			mask;
312 	const volatile unsigned char	*ADDR = (const unsigned char *) addr;
313 
314 	ADDR += nr >> 3;
315 	mask = 1 << (nr & 0x07);
316 	return ((mask & *ADDR) != 0);
317 }
318 
319 #define ext2_find_first_zero_bit(addr, size) \
320 	ext2_find_next_zero_bit((addr), (size), 0)
321 
322 static inline unsigned long ext2_find_next_zero_bit(void *addr,
323 				unsigned long size, unsigned long offset)
324 {
325 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
326 	unsigned long result = offset & ~31UL;
327 	unsigned long tmp;
328 
329 	if (offset >= size)
330 		return size;
331 	size -= result;
332 	offset &= 31UL;
333 	if(offset) {
334 		/* We hold the little endian value in tmp, but then the
335 		 * shift is illegal. So we could keep a big endian value
336 		 * in tmp, like this:
337 		 *
338 		 * tmp = __swab32(*(p++));
339 		 * tmp |= ~0UL >> (32-offset);
340 		 *
341 		 * but this would decrease preformance, so we change the
342 		 * shift:
343 		 */
344 		tmp = *(p++);
345 		tmp |= __swab32(~0UL >> (32-offset));
346 		if(size < 32)
347 			goto found_first;
348 		if(~tmp)
349 			goto found_middle;
350 		size -= 32;
351 		result += 32;
352 	}
353 	while(size & ~31UL) {
354 		if(~(tmp = *(p++)))
355 			goto found_middle;
356 		result += 32;
357 		size -= 32;
358 	}
359 	if(!size)
360 		return result;
361 	tmp = *p;
362 
363 found_first:
364 	/* tmp is little endian, so we would have to swab the shift,
365 	 * see above. But then we have to swab tmp below for ffz, so
366 	 * we might as well do this here.
367 	 */
368 	return result + ffz(__swab32(tmp) | (~0UL << size));
369 found_middle:
370 	return result + ffz(__swab32(tmp));
371 }
372 
373 /* Bitmap functions for the minix filesystem.  */
374 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
375 #define minix_set_bit(nr,addr) set_bit(nr,addr)
376 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
377 #define minix_test_bit(nr,addr) test_bit(nr,addr)
378 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
379 
380 /**
381  * hweightN - returns the hamming weight of a N-bit word
382  * @x: the word to weigh
383  *
384  * The Hamming Weight of a number is the total number of bits set in it.
385  */
386 
387 #define hweight32(x) generic_hweight32(x)
388 #define hweight16(x) generic_hweight16(x)
389 #define hweight8(x) generic_hweight8(x)
390 
391 #endif /* __KERNEL__ */
392 
393 #endif /* _MICROBLAZE_BITOPS_H */
394