1 #ifndef _MICROBLAZE_BITOPS_H
2 #define _MICROBLAZE_BITOPS_H
3 
4 /*
5  * Copyright 1992, Linus Torvalds.
6  */
7 
8 #include <asm/byteorder.h>	/* swab32 */
9 #include <asm/system.h>		/* save_flags */
10 
11 #ifdef __KERNEL__
12 /*
13  * Function prototypes to keep gcc -Wall happy
14  */
15 
16 /*
17  * The __ functions are not atomic
18  */
19 
20 extern void set_bit(int nr, volatile void * addr);
21 extern void __set_bit(int nr, volatile void * addr);
22 
23 extern void clear_bit(int nr, volatile void * addr);
24 #define __clear_bit(nr, addr) clear_bit(nr, addr)
25 #define PLATFORM__CLEAR_BIT
26 
27 extern void change_bit(int nr, volatile void * addr);
28 extern void __change_bit(int nr, volatile void * addr);
29 extern int test_and_set_bit(int nr, volatile void * addr);
30 extern int __test_and_set_bit(int nr, volatile void * addr);
31 extern int test_and_clear_bit(int nr, volatile void * addr);
32 extern int __test_and_clear_bit(int nr, volatile void * addr);
33 extern int test_and_change_bit(int nr, volatile void * addr);
34 extern int __test_and_change_bit(int nr, volatile void * addr);
35 extern int __constant_test_bit(int nr, const volatile void * addr);
36 extern int __test_bit(int nr, volatile void * addr);
37 extern int find_first_zero_bit(void * addr, unsigned size);
38 extern int find_next_zero_bit (void * addr, int size, int offset);
39 
40 /*
41  * ffz = Find First Zero in word. Undefined if no zero exists,
42  * so code should check against ~0UL first..
43  */
44 extern __inline__ unsigned long ffz(unsigned long word)
45 {
46 	unsigned long result = 0;
47 
48 	while(word & 1) {
49 		result++;
50 		word >>= 1;
51 	}
52 	return result;
53 }
54 
55 
56 extern __inline__ void set_bit(int nr, volatile void * addr)
57 {
58 	int	* a = (int *) addr;
59 	int	mask;
60 	unsigned long flags;
61 
62 	a += nr >> 5;
63 	mask = 1 << (nr & 0x1f);
64 	save_flags_cli(flags);
65 	*a |= mask;
66 	restore_flags(flags);
67 }
68 
69 extern __inline__ void __set_bit(int nr, volatile void * addr)
70 {
71 	int	* a = (int *) addr;
72 	int	mask;
73 
74 	a += nr >> 5;
75 	mask = 1 << (nr & 0x1f);
76 	*a |= mask;
77 }
78 #define PLATFORM__SET_BIT
79 
80 /*
81  * clear_bit() doesn't provide any barrier for the compiler.
82  */
83 #define smp_mb__before_clear_bit()	barrier()
84 #define smp_mb__after_clear_bit()	barrier()
85 
86 extern __inline__ void clear_bit(int nr, volatile void * addr)
87 {
88 	int	* a = (int *) addr;
89 	int	mask;
90 	unsigned long flags;
91 
92 	a += nr >> 5;
93 	mask = 1 << (nr & 0x1f);
94 	save_flags_cli(flags);
95 	*a &= ~mask;
96 	restore_flags(flags);
97 }
98 
99 extern __inline__ void change_bit(int nr, volatile void * addr)
100 {
101 	int mask;
102 	unsigned long flags;
103 	unsigned long *ADDR = (unsigned long *) addr;
104 
105 	ADDR += nr >> 5;
106 	mask = 1 << (nr & 31);
107 	save_flags_cli(flags);
108 	*ADDR ^= mask;
109 	restore_flags(flags);
110 }
111 
112 extern __inline__ void __change_bit(int nr, volatile void * addr)
113 {
114 	int mask;
115 	unsigned long *ADDR = (unsigned long *) addr;
116 
117 	ADDR += nr >> 5;
118 	mask = 1 << (nr & 31);
119 	*ADDR ^= mask;
120 }
121 
122 extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
123 {
124 	int	mask, retval;
125 	volatile unsigned int *a = (volatile unsigned int *) addr;
126 	unsigned long flags;
127 
128 	a += nr >> 5;
129 	mask = 1 << (nr & 0x1f);
130 	save_flags_cli(flags);
131 	retval = (mask & *a) != 0;
132 	*a |= mask;
133 	restore_flags(flags);
134 
135 	return retval;
136 }
137 
138 extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
139 {
140 	int	mask, retval;
141 	volatile unsigned int *a = (volatile unsigned int *) addr;
142 
143 	a += nr >> 5;
144 	mask = 1 << (nr & 0x1f);
145 	retval = (mask & *a) != 0;
146 	*a |= mask;
147 	return retval;
148 }
149 
150 extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
151 {
152 	int	mask, retval;
153 	volatile unsigned int *a = (volatile unsigned int *) addr;
154 	unsigned long flags;
155 
156 	a += nr >> 5;
157 	mask = 1 << (nr & 0x1f);
158 	save_flags_cli(flags);
159 	retval = (mask & *a) != 0;
160 	*a &= ~mask;
161 	restore_flags(flags);
162 
163 	return retval;
164 }
165 
166 extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
167 {
168 	int	mask, retval;
169 	volatile unsigned int *a = (volatile unsigned int *) addr;
170 
171 	a += nr >> 5;
172 	mask = 1 << (nr & 0x1f);
173 	retval = (mask & *a) != 0;
174 	*a &= ~mask;
175 	return retval;
176 }
177 
178 extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
179 {
180 	int	mask, retval;
181 	volatile unsigned int *a = (volatile unsigned int *) addr;
182 	unsigned long flags;
183 
184 	a += nr >> 5;
185 	mask = 1 << (nr & 0x1f);
186 	save_flags_cli(flags);
187 	retval = (mask & *a) != 0;
188 	*a ^= mask;
189 	restore_flags(flags);
190 
191 	return retval;
192 }
193 
194 extern __inline__ int __test_and_change_bit(int nr, volatile void * addr)
195 {
196 	int	mask, retval;
197 	volatile unsigned int *a = (volatile unsigned int *) addr;
198 
199 	a += nr >> 5;
200 	mask = 1 << (nr & 0x1f);
201 	retval = (mask & *a) != 0;
202 	*a ^= mask;
203 	return retval;
204 }
205 
206 /*
207  * This routine doesn't need to be atomic.
208  */
209 extern __inline__ int __constant_test_bit(int nr, const volatile void * addr)
210 {
211 	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
212 }
213 
214 extern __inline__ int __test_bit(int nr, volatile void * addr)
215 {
216 	int	* a = (int *) addr;
217 	int	mask;
218 
219 	a += nr >> 5;
220 	mask = 1 << (nr & 0x1f);
221 	return ((mask & *a) != 0);
222 }
223 
224 #define test_bit(nr,addr) \
225 (__builtin_constant_p(nr) ? \
226  __constant_test_bit((nr),(addr)) : \
227  __test_bit((nr),(addr)))
228 
229 #define find_first_zero_bit(addr, size) \
230 	find_next_zero_bit((addr), (size), 0)
231 
232 extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
233 {
234 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
235 	unsigned long result = offset & ~31UL;
236 	unsigned long tmp;
237 
238 	if (offset >= size)
239 		return size;
240 	size -= result;
241 	offset &= 31UL;
242 	if (offset) {
243 		tmp = *(p++);
244 		tmp |= ~0UL >> (32-offset);
245 		if (size < 32)
246 			goto found_first;
247 		if (~tmp)
248 			goto found_middle;
249 		size -= 32;
250 		result += 32;
251 	}
252 	while (size & ~31UL) {
253 		if (~(tmp = *(p++)))
254 			goto found_middle;
255 		result += 32;
256 		size -= 32;
257 	}
258 	if (!size)
259 		return result;
260 	tmp = *p;
261 
262 found_first:
263 	tmp |= ~0UL >> size;
264 found_middle:
265 	return result + ffz(tmp);
266 }
267 
268 /*
269  * hweightN: returns the hamming weight (i.e. the number
270  * of bits set) of a N-bit word
271  */
272 
273 #define hweight32(x) generic_hweight32(x)
274 #define hweight16(x) generic_hweight16(x)
275 #define hweight8(x) generic_hweight8(x)
276 
277 
278 extern __inline__ int ext2_set_bit(int nr, volatile void * addr)
279 {
280 	int		mask, retval;
281 	unsigned long	flags;
282 	volatile unsigned char	*ADDR = (unsigned char *) addr;
283 
284 	ADDR += nr >> 3;
285 	mask = 1 << (nr & 0x07);
286 	save_flags_cli(flags);
287 	retval = (mask & *ADDR) != 0;
288 	*ADDR |= mask;
289 	restore_flags(flags);
290 	return retval;
291 }
292 
293 extern __inline__ int ext2_clear_bit(int nr, volatile void * addr)
294 {
295 	int		mask, retval;
296 	unsigned long	flags;
297 	volatile unsigned char	*ADDR = (unsigned char *) addr;
298 
299 	ADDR += nr >> 3;
300 	mask = 1 << (nr & 0x07);
301 	save_flags_cli(flags);
302 	retval = (mask & *ADDR) != 0;
303 	*ADDR &= ~mask;
304 	restore_flags(flags);
305 	return retval;
306 }
307 
308 extern __inline__ int ext2_test_bit(int nr, const volatile void * addr)
309 {
310 	int			mask;
311 	const volatile unsigned char	*ADDR = (const unsigned char *) addr;
312 
313 	ADDR += nr >> 3;
314 	mask = 1 << (nr & 0x07);
315 	return ((mask & *ADDR) != 0);
316 }
317 
318 #define ext2_find_first_zero_bit(addr, size) \
319 	ext2_find_next_zero_bit((addr), (size), 0)
320 
321 static inline unsigned long ext2_find_next_zero_bit(void *addr,
322 				unsigned long size, unsigned long offset)
323 {
324 	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
325 	unsigned long result = offset & ~31UL;
326 	unsigned long tmp;
327 
328 	if (offset >= size)
329 		return size;
330 	size -= result;
331 	offset &= 31UL;
332 	if(offset) {
333 		/* We hold the little endian value in tmp, but then the
334 		 * shift is illegal. So we could keep a big endian value
335 		 * in tmp, like this:
336 		 *
337 		 * tmp = __swab32(*(p++));
338 		 * tmp |= ~0UL >> (32-offset);
339 		 *
340 		 * but this would decrease preformance, so we change the
341 		 * shift:
342 		 */
343 		tmp = *(p++);
344 		tmp |= __swab32(~0UL >> (32-offset));
345 		if(size < 32)
346 			goto found_first;
347 		if(~tmp)
348 			goto found_middle;
349 		size -= 32;
350 		result += 32;
351 	}
352 	while(size & ~31UL) {
353 		if(~(tmp = *(p++)))
354 			goto found_middle;
355 		result += 32;
356 		size -= 32;
357 	}
358 	if(!size)
359 		return result;
360 	tmp = *p;
361 
362 found_first:
363 	/* tmp is little endian, so we would have to swab the shift,
364 	 * see above. But then we have to swab tmp below for ffz, so
365 	 * we might as well do this here.
366 	 */
367 	return result + ffz(__swab32(tmp) | (~0UL << size));
368 found_middle:
369 	return result + ffz(__swab32(tmp));
370 }
371 
372 /* Bitmap functions for the minix filesystem.  */
373 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
374 #define minix_set_bit(nr,addr) set_bit(nr,addr)
375 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
376 #define minix_test_bit(nr,addr) test_bit(nr,addr)
377 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
378 
379 /**
380  * hweightN - returns the hamming weight of a N-bit word
381  * @x: the word to weigh
382  *
383  * The Hamming Weight of a number is the total number of bits set in it.
384  */
385 
386 #define hweight32(x) generic_hweight32(x)
387 #define hweight16(x) generic_hweight16(x)
388 #define hweight8(x) generic_hweight8(x)
389 
390 #endif /* __KERNEL__ */
391 
392 #endif /* _MICROBLAZE_BITOPS_H */
393