bitops.h (2576c28e3f623ed401db7e6197241865328620ef) bitops.h (04e2eee4b02edcafce96c9c37b31b1a3318291a4)
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8

--- 4 unchanged lines hidden (view full) ---

13#error only <linux/bitops.h> can be included directly
14#endif
15
16#ifndef __ASSEMBLY__
17
18#include <linux/types.h>
19#include <linux/compiler.h>
20#include <asm/barrier.h>
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8

--- 4 unchanged lines hidden (view full) ---

13#error only <linux/bitops.h> can be included directly
14#endif
15
16#ifndef __ASSEMBLY__
17
18#include <linux/types.h>
19#include <linux/compiler.h>
20#include <asm/barrier.h>
21#ifndef CONFIG_ARC_HAS_LLSC
22#include <asm/smp.h>
23#endif
21
24
25#if defined(CONFIG_ARC_HAS_LLSC)
26
22/*
27/*
23 * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
24 * The Kconfig glue ensures that in SMP, this is only set if the container
25 * SoC/platform has cross-core coherent LLOCK/SCOND
28 * Hardware assisted Atomic-R-M-W
26 */
29 */
27#if defined(CONFIG_ARC_HAS_LLSC)
28
30
29static inline void set_bit(unsigned long nr, volatile unsigned long *m)
30{
31 unsigned int temp;
32
33 m += nr >> 5;
34
35 /*
36 * ARC ISA micro-optimization:
37 *
38 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
39 * e.g (x << 33) is handled like (x << 1) by ASL instruction
40 * (mem pointer still needs adjustment to point to next word)
41 *
42 * Hence the masking to clamp @nr arg can be elided in general.
43 *
44 * However if @nr is a constant (above assumed it in a register),
45 * and greater than 31, gcc can optimize away (x << 33) to 0,
46 * as overflow, given the 32-bit ISA. Thus masking needs to be done
47 * for constant @nr, but no code is generated due to const prop.
48 */
49 if (__builtin_constant_p(nr))
50 nr &= 0x1f;
51
52 __asm__ __volatile__(
53 "1: llock %0, [%1] \n"
54 " bset %0, %0, %2 \n"
55 " scond %0, [%1] \n"
56 " bnz 1b \n"
57 : "=&r"(temp)
58 : "r"(m), "ir"(nr)
59 : "cc");
31#define BIT_OP(op, c_op, asm_op) \
32static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
33{ \
34 unsigned int temp; \
35 \
36 m += nr >> 5; \
37 \
38 /* \
39 * ARC ISA micro-optimization: \
40 * \
41 * Instructions dealing with bitpos only consider lower 5 bits \
42 * e.g (x << 33) is handled like (x << 1) by ASL instruction \
43 * (mem pointer still needs adjustment to point to next word) \
44 * \
45 * Hence the masking to clamp @nr arg can be elided in general. \
46 * \
47 * However if @nr is a constant (above assumed in a register), \
48 * and greater than 31, gcc can optimize away (x << 33) to 0, \
49 * as overflow, given the 32-bit ISA. Thus masking needs to be \
50 * done for const @nr, but no code is generated due to gcc \
51 * const prop. \
52 */ \
53 if (__builtin_constant_p(nr)) \
54 nr &= 0x1f; \
55 \
56 __asm__ __volatile__( \
57 "1: llock %0, [%1] \n" \
58 " " #asm_op " %0, %0, %2 \n" \
59 " scond %0, [%1] \n" \
60 " bnz 1b \n" \
61 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
62 : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
63 "ir"(nr) \
64 : "cc"); \
60}
61
65}
66
62static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
63{
64 unsigned int temp;
65
66 m += nr >> 5;
67
68 if (__builtin_constant_p(nr))
69 nr &= 0x1f;
70
71 __asm__ __volatile__(
72 "1: llock %0, [%1] \n"
73 " bclr %0, %0, %2 \n"
74 " scond %0, [%1] \n"
75 " bnz 1b \n"
76 : "=&r"(temp)
77 : "r"(m), "ir"(nr)
78 : "cc");
79}
80
81static inline void change_bit(unsigned long nr, volatile unsigned long *m)
82{
83 unsigned int temp;
84
85 m += nr >> 5;
86
87 if (__builtin_constant_p(nr))
88 nr &= 0x1f;
89
90 __asm__ __volatile__(
91 "1: llock %0, [%1] \n"
92 " bxor %0, %0, %2 \n"
93 " scond %0, [%1] \n"
94 " bnz 1b \n"
95 : "=&r"(temp)
96 : "r"(m), "ir"(nr)
97 : "cc");
98}
99
100/*
101 * Semantically:
102 * Test the bit
103 * if clear
104 * set it and return 0 (old value)
105 * else
106 * return 1 (old value).
107 *
108 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
109 * and the old value of bit is returned
110 */
67/*
68 * Semantically:
69 * Test the bit
70 * if clear
71 * set it and return 0 (old value)
72 * else
73 * return 1 (old value).
74 *
75 * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
76 * and the old value of bit is returned
77 */
111static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
112{
113 unsigned long old, temp;
114
115 m += nr >> 5;
116
117 if (__builtin_constant_p(nr))
118 nr &= 0x1f;
119
120 /*
121 * Explicit full memory barrier needed before/after as
122 * LLOCK/SCOND themselves don't provide any such semantics
123 */
124 smp_mb();
125
126 __asm__ __volatile__(
127 "1: llock %0, [%2] \n"
128 " bset %1, %0, %3 \n"
129 " scond %1, [%2] \n"
130 " bnz 1b \n"
131 : "=&r"(old), "=&r"(temp)
132 : "r"(m), "ir"(nr)
133 : "cc");
134
135 smp_mb();
136
137 return (old & (1 << nr)) != 0;
78#define TEST_N_BIT_OP(op, c_op, asm_op) \
79static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
80{ \
81 unsigned long old, temp; \
82 \
83 m += nr >> 5; \
84 \
85 if (__builtin_constant_p(nr)) \
86 nr &= 0x1f; \
87 \
88 /* \
89 * Explicit full memory barrier needed before/after as \
90 * LLOCK/SCOND themselves don't provide any such smenatic \
91 */ \
92 smp_mb(); \
93 \
94 __asm__ __volatile__( \
95 "1: llock %0, [%2] \n" \
96 " " #asm_op " %1, %0, %3 \n" \
97 " scond %1, [%2] \n" \
98 " bnz 1b \n" \
99 : "=&r"(old), "=&r"(temp) \
100 : "r"(m), "ir"(nr) \
101 : "cc"); \
102 \
103 smp_mb(); \
104 \
105 return (old & (1 << nr)) != 0; \
138}
139
106}
107
140static inline int
141test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
142{
143 unsigned int old, temp;
144
145 m += nr >> 5;
146
147 if (__builtin_constant_p(nr))
148 nr &= 0x1f;
149
150 smp_mb();
151
152 __asm__ __volatile__(
153 "1: llock %0, [%2] \n"
154 " bclr %1, %0, %3 \n"
155 " scond %1, [%2] \n"
156 " bnz 1b \n"
157 : "=&r"(old), "=&r"(temp)
158 : "r"(m), "ir"(nr)
159 : "cc");
160
161 smp_mb();
162
163 return (old & (1 << nr)) != 0;
164}
165
166static inline int
167test_and_change_bit(unsigned long nr, volatile unsigned long *m)
168{
169 unsigned int old, temp;
170
171 m += nr >> 5;
172
173 if (__builtin_constant_p(nr))
174 nr &= 0x1f;
175
176 smp_mb();
177
178 __asm__ __volatile__(
179 "1: llock %0, [%2] \n"
180 " bxor %1, %0, %3 \n"
181 " scond %1, [%2] \n"
182 " bnz 1b \n"
183 : "=&r"(old), "=&r"(temp)
184 : "r"(m), "ir"(nr)
185 : "cc");
186
187 smp_mb();
188
189 return (old & (1 << nr)) != 0;
190}
191
192#else /* !CONFIG_ARC_HAS_LLSC */
193
108#else /* !CONFIG_ARC_HAS_LLSC */
109
194#include <asm/smp.h>
195
196/*
197 * Non hardware assisted Atomic-R-M-W
198 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
199 *
200 * There's "significant" micro-optimization in writing our own variants of
201 * bitops (over generic variants)
202 *
203 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
204 * This avoids extra code to be generated for pointer arithmatic, since
205 * is "not sure" that index is NOT -ve
206 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
207 * only consider bottom 5 bits of @nr, so NO need to mask them off.
208 * (GCC Quirk: however for constant @nr we still need to do the masking
209 * at compile time)
210 */
211
110/*
111 * Non hardware assisted Atomic-R-M-W
112 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
113 *
114 * There's "significant" micro-optimization in writing our own variants of
115 * bitops (over generic variants)
116 *
117 * (1) The generic APIs have "signed" @nr while we have it "unsigned"
118 * This avoids extra code to be generated for pointer arithmatic, since
119 * is "not sure" that index is NOT -ve
120 * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
121 * only consider bottom 5 bits of @nr, so NO need to mask them off.
122 * (GCC Quirk: however for constant @nr we still need to do the masking
123 * at compile time)
124 */
125
212static inline void set_bit(unsigned long nr, volatile unsigned long *m)
213{
214 unsigned long temp, flags;
215 m += nr >> 5;
216
217 if (__builtin_constant_p(nr))
218 nr &= 0x1f;
219
220 bitops_lock(flags);
221
222 temp = *m;
223 *m = temp | (1UL << nr);
224
225 bitops_unlock(flags);
126#define BIT_OP(op, c_op, asm_op) \
127static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
128{ \
129 unsigned long temp, flags; \
130 m += nr >> 5; \
131 \
132 if (__builtin_constant_p(nr)) \
133 nr &= 0x1f; \
134 \
135 /* \
136 * spin lock/unlock provide the needed smp_mb() before/after \
137 */ \
138 bitops_lock(flags); \
139 \
140 temp = *m; \
141 *m = temp c_op (1UL << nr); \
142 \
143 bitops_unlock(flags); \
226}
227
144}
145
228static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
229{
230 unsigned long temp, flags;
231 m += nr >> 5;
232
233 if (__builtin_constant_p(nr))
234 nr &= 0x1f;
235
236 bitops_lock(flags);
237
238 temp = *m;
239 *m = temp & ~(1UL << nr);
240
241 bitops_unlock(flags);
146#define TEST_N_BIT_OP(op, c_op, asm_op) \
147static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
148{ \
149 unsigned long old, flags; \
150 m += nr >> 5; \
151 \
152 if (__builtin_constant_p(nr)) \
153 nr &= 0x1f; \
154 \
155 bitops_lock(flags); \
156 \
157 old = *m; \
158 *m = old c_op (1 << nr); \
159 \
160 bitops_unlock(flags); \
161 \
162 return (old & (1 << nr)) != 0; \
242}
243
163}
164
244static inline void change_bit(unsigned long nr, volatile unsigned long *m)
245{
246 unsigned long temp, flags;
247 m += nr >> 5;
248
249 if (__builtin_constant_p(nr))
250 nr &= 0x1f;
251
252 bitops_lock(flags);
253
254 temp = *m;
255 *m = temp ^ (1UL << nr);
256
257 bitops_unlock(flags);
258}
259
260static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
261{
262 unsigned long old, flags;
263 m += nr >> 5;
264
265 if (__builtin_constant_p(nr))
266 nr &= 0x1f;
267
268 /*
269 * spin lock/unlock provide the needed smp_mb() before/after
270 */
271 bitops_lock(flags);
272
273 old = *m;
274 *m = old | (1 << nr);
275
276 bitops_unlock(flags);
277
278 return (old & (1 << nr)) != 0;
279}
280
281static inline int
282test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
283{
284 unsigned long old, flags;
285 m += nr >> 5;
286
287 if (__builtin_constant_p(nr))
288 nr &= 0x1f;
289
290 bitops_lock(flags);
291
292 old = *m;
293 *m = old & ~(1 << nr);
294
295 bitops_unlock(flags);
296
297 return (old & (1 << nr)) != 0;
298}
299
300static inline int
301test_and_change_bit(unsigned long nr, volatile unsigned long *m)
302{
303 unsigned long old, flags;
304 m += nr >> 5;
305
306 if (__builtin_constant_p(nr))
307 nr &= 0x1f;
308
309 bitops_lock(flags);
310
311 old = *m;
312 *m = old ^ (1 << nr);
313
314 bitops_unlock(flags);
315
316 return (old & (1 << nr)) != 0;
317}
318
319#endif /* CONFIG_ARC_HAS_LLSC */
320
321/***************************************
322 * Non atomic variants
323 **************************************/
324
165#endif /* CONFIG_ARC_HAS_LLSC */
166
167/***************************************
168 * Non atomic variants
169 **************************************/
170
325static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
326{
327 unsigned long temp;
328 m += nr >> 5;
329
330 if (__builtin_constant_p(nr))
331 nr &= 0x1f;
332
333 temp = *m;
334 *m = temp | (1UL << nr);
171#define __BIT_OP(op, c_op, asm_op) \
172static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
173{ \
174 unsigned long temp; \
175 m += nr >> 5; \
176 \
177 if (__builtin_constant_p(nr)) \
178 nr &= 0x1f; \
179 \
180 temp = *m; \
181 *m = temp c_op (1UL << nr); \
335}
336
182}
183
337static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
338{
339 unsigned long temp;
340 m += nr >> 5;
341
342 if (__builtin_constant_p(nr))
343 nr &= 0x1f;
344
345 temp = *m;
346 *m = temp & ~(1UL << nr);
184#define __TEST_N_BIT_OP(op, c_op, asm_op) \
185static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
186{ \
187 unsigned long old; \
188 m += nr >> 5; \
189 \
190 if (__builtin_constant_p(nr)) \
191 nr &= 0x1f; \
192 \
193 old = *m; \
194 *m = old c_op (1 << nr); \
195 \
196 return (old & (1 << nr)) != 0; \
347}
348
197}
198
349static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
350{
351 unsigned long temp;
352 m += nr >> 5;
199#define BIT_OPS(op, c_op, asm_op) \
200 \
201 /* set_bit(), clear_bit(), change_bit() */ \
202 BIT_OP(op, c_op, asm_op) \
203 \
204 /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
205 TEST_N_BIT_OP(op, c_op, asm_op) \
206 \
207 /* __set_bit(), __clear_bit(), __change_bit() */ \
208 __BIT_OP(op, c_op, asm_op) \
209 \
210 /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
211 __TEST_N_BIT_OP(op, c_op, asm_op)
353
212
354 if (__builtin_constant_p(nr))
355 nr &= 0x1f;
213BIT_OPS(set, |, bset)
214BIT_OPS(clear, & ~, bclr)
215BIT_OPS(change, ^, bxor)
356
216
357 temp = *m;
358 *m = temp ^ (1UL << nr);
359}
360
361static inline int
362__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
363{
364 unsigned long old;
365 m += nr >> 5;
366
367 if (__builtin_constant_p(nr))
368 nr &= 0x1f;
369
370 old = *m;
371 *m = old | (1 << nr);
372
373 return (old & (1 << nr)) != 0;
374}
375
376static inline int
377__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
378{
379 unsigned long old;
380 m += nr >> 5;
381
382 if (__builtin_constant_p(nr))
383 nr &= 0x1f;
384
385 old = *m;
386 *m = old & ~(1 << nr);
387
388 return (old & (1 << nr)) != 0;
389}
390
391static inline int
392__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
393{
394 unsigned long old;
395 m += nr >> 5;
396
397 if (__builtin_constant_p(nr))
398 nr &= 0x1f;
399
400 old = *m;
401 *m = old ^ (1 << nr);
402
403 return (old & (1 << nr)) != 0;
404}
405
406/*
407 * This routine doesn't need to be atomic.
408 */
409static inline int
410test_bit(unsigned int nr, const volatile unsigned long *addr)
411{
412 unsigned long mask;
413

--- 191 unchanged lines hidden ---
217/*
218 * This routine doesn't need to be atomic.
219 */
220static inline int
221test_bit(unsigned int nr, const volatile unsigned long *addr)
222{
223 unsigned long mask;
224

--- 191 unchanged lines hidden ---