atomic.h (3eb66e91a25497065c5322b1268cbc3953642227) atomic.h (ef4cdc09260e2b0576423ca708e245e7549aa8e0)
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 235 unchanged lines hidden (view full) ---

244#undef ATOMIC_FETCH_OP
245#undef ATOMIC_OP_RETURN
246#undef ATOMIC_OP
247
248#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
249
250#ifndef CONFIG_GENERIC_ATOMIC64
251typedef struct {
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as

--- 235 unchanged lines hidden (view full) ---

244#undef ATOMIC_FETCH_OP
245#undef ATOMIC_OP_RETURN
246#undef ATOMIC_OP
247
248#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
249
250#ifndef CONFIG_GENERIC_ATOMIC64
251typedef struct {
252 long long counter;
252 s64 counter;
253} atomic64_t;
254
255#define ATOMIC64_INIT(i) { (i) }
256
257#ifdef CONFIG_ARM_LPAE
253} atomic64_t;
254
255#define ATOMIC64_INIT(i) { (i) }
256
257#ifdef CONFIG_ARM_LPAE
258static inline long long atomic64_read(const atomic64_t *v)
258static inline s64 atomic64_read(const atomic64_t *v)
259{
259{
260 long long result;
260 s64 result;
261
262 __asm__ __volatile__("@ atomic64_read\n"
263" ldrd %0, %H0, [%1]"
264 : "=&r" (result)
265 : "r" (&v->counter), "Qo" (v->counter)
266 );
267
268 return result;
269}
270
261
262 __asm__ __volatile__("@ atomic64_read\n"
263" ldrd %0, %H0, [%1]"
264 : "=&r" (result)
265 : "r" (&v->counter), "Qo" (v->counter)
266 );
267
268 return result;
269}
270
271static inline void atomic64_set(atomic64_t *v, long long i)
271static inline void atomic64_set(atomic64_t *v, s64 i)
272{
273 __asm__ __volatile__("@ atomic64_set\n"
274" strd %2, %H2, [%1]"
275 : "=Qo" (v->counter)
276 : "r" (&v->counter), "r" (i)
277 );
278}
279#else
272{
273 __asm__ __volatile__("@ atomic64_set\n"
274" strd %2, %H2, [%1]"
275 : "=Qo" (v->counter)
276 : "r" (&v->counter), "r" (i)
277 );
278}
279#else
280static inline long long atomic64_read(const atomic64_t *v)
280static inline s64 atomic64_read(const atomic64_t *v)
281{
281{
282 long long result;
282 s64 result;
283
284 __asm__ __volatile__("@ atomic64_read\n"
285" ldrexd %0, %H0, [%1]"
286 : "=&r" (result)
287 : "r" (&v->counter), "Qo" (v->counter)
288 );
289
290 return result;
291}
292
283
284 __asm__ __volatile__("@ atomic64_read\n"
285" ldrexd %0, %H0, [%1]"
286 : "=&r" (result)
287 : "r" (&v->counter), "Qo" (v->counter)
288 );
289
290 return result;
291}
292
293static inline void atomic64_set(atomic64_t *v, long long i)
293static inline void atomic64_set(atomic64_t *v, s64 i)
294{
294{
295 long long tmp;
295 s64 tmp;
296
297 prefetchw(&v->counter);
298 __asm__ __volatile__("@ atomic64_set\n"
299"1: ldrexd %0, %H0, [%2]\n"
300" strexd %0, %3, %H3, [%2]\n"
301" teq %0, #0\n"
302" bne 1b"
303 : "=&r" (tmp), "=Qo" (v->counter)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306}
307#endif
308
309#define ATOMIC64_OP(op, op1, op2) \
296
297 prefetchw(&v->counter);
298 __asm__ __volatile__("@ atomic64_set\n"
299"1: ldrexd %0, %H0, [%2]\n"
300" strexd %0, %3, %H3, [%2]\n"
301" teq %0, #0\n"
302" bne 1b"
303 : "=&r" (tmp), "=Qo" (v->counter)
304 : "r" (&v->counter), "r" (i)
305 : "cc");
306}
307#endif
308
309#define ATOMIC64_OP(op, op1, op2) \
310static inline void atomic64_##op(long long i, atomic64_t *v) \
310static inline void atomic64_##op(s64 i, atomic64_t *v) \
311{ \
311{ \
312 long long result; \
312 s64 result; \
313 unsigned long tmp; \
314 \
315 prefetchw(&v->counter); \
316 __asm__ __volatile__("@ atomic64_" #op "\n" \
317"1: ldrexd %0, %H0, [%3]\n" \
318" " #op1 " %Q0, %Q0, %Q4\n" \
319" " #op2 " %R0, %R0, %R4\n" \
320" strexd %1, %0, %H0, [%3]\n" \
321" teq %1, #0\n" \
322" bne 1b" \
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
324 : "r" (&v->counter), "r" (i) \
325 : "cc"); \
326} \
327
328#define ATOMIC64_OP_RETURN(op, op1, op2) \
313 unsigned long tmp; \
314 \
315 prefetchw(&v->counter); \
316 __asm__ __volatile__("@ atomic64_" #op "\n" \
317"1: ldrexd %0, %H0, [%3]\n" \
318" " #op1 " %Q0, %Q0, %Q4\n" \
319" " #op2 " %R0, %R0, %R4\n" \
320" strexd %1, %0, %H0, [%3]\n" \
321" teq %1, #0\n" \
322" bne 1b" \
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
324 : "r" (&v->counter), "r" (i) \
325 : "cc"); \
326} \
327
328#define ATOMIC64_OP_RETURN(op, op1, op2) \
329static inline long long \
330atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
329static inline s64 \
330atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
331{ \
331{ \
332 long long result; \
332 s64 result; \
333 unsigned long tmp; \
334 \
335 prefetchw(&v->counter); \
336 \
337 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
338"1: ldrexd %0, %H0, [%3]\n" \
339" " #op1 " %Q0, %Q0, %Q4\n" \
340" " #op2 " %R0, %R0, %R4\n" \
341" strexd %1, %0, %H0, [%3]\n" \
342" teq %1, #0\n" \
343" bne 1b" \
344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
345 : "r" (&v->counter), "r" (i) \
346 : "cc"); \
347 \
348 return result; \
349}
350
351#define ATOMIC64_FETCH_OP(op, op1, op2) \
333 unsigned long tmp; \
334 \
335 prefetchw(&v->counter); \
336 \
337 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
338"1: ldrexd %0, %H0, [%3]\n" \
339" " #op1 " %Q0, %Q0, %Q4\n" \
340" " #op2 " %R0, %R0, %R4\n" \
341" strexd %1, %0, %H0, [%3]\n" \
342" teq %1, #0\n" \
343" bne 1b" \
344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
345 : "r" (&v->counter), "r" (i) \
346 : "cc"); \
347 \
348 return result; \
349}
350
351#define ATOMIC64_FETCH_OP(op, op1, op2) \
352static inline long long \
353atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
352static inline s64 \
353atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
354{ \
354{ \
355 long long result, val; \
355 s64 result, val; \
356 unsigned long tmp; \
357 \
358 prefetchw(&v->counter); \
359 \
360 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
361"1: ldrexd %0, %H0, [%4]\n" \
362" " #op1 " %Q1, %Q0, %Q5\n" \
363" " #op2 " %R1, %R0, %R5\n" \

--- 37 unchanged lines hidden (view full) ---

401#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
402#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
403
404#undef ATOMIC64_OPS
405#undef ATOMIC64_FETCH_OP
406#undef ATOMIC64_OP_RETURN
407#undef ATOMIC64_OP
408
356 unsigned long tmp; \
357 \
358 prefetchw(&v->counter); \
359 \
360 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
361"1: ldrexd %0, %H0, [%4]\n" \
362" " #op1 " %Q1, %Q0, %Q5\n" \
363" " #op2 " %R1, %R0, %R5\n" \

--- 37 unchanged lines hidden (view full) ---

401#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
402#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
403
404#undef ATOMIC64_OPS
405#undef ATOMIC64_FETCH_OP
406#undef ATOMIC64_OP_RETURN
407#undef ATOMIC64_OP
408
409static inline long long
410atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
409static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
411{
410{
412 long long oldval;
411 s64 oldval;
413 unsigned long res;
414
415 prefetchw(&ptr->counter);
416
417 do {
418 __asm__ __volatile__("@ atomic64_cmpxchg\n"
419 "ldrexd %1, %H1, [%3]\n"
420 "mov %0, #0\n"

--- 4 unchanged lines hidden (view full) ---

425 : "r" (&ptr->counter), "r" (old), "r" (new)
426 : "cc");
427 } while (res);
428
429 return oldval;
430}
431#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
432
412 unsigned long res;
413
414 prefetchw(&ptr->counter);
415
416 do {
417 __asm__ __volatile__("@ atomic64_cmpxchg\n"
418 "ldrexd %1, %H1, [%3]\n"
419 "mov %0, #0\n"

--- 4 unchanged lines hidden (view full) ---

424 : "r" (&ptr->counter), "r" (old), "r" (new)
425 : "cc");
426 } while (res);
427
428 return oldval;
429}
430#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
431
433static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
432static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
434{
433{
435 long long result;
434 s64 result;
436 unsigned long tmp;
437
438 prefetchw(&ptr->counter);
439
440 __asm__ __volatile__("@ atomic64_xchg\n"
441"1: ldrexd %0, %H0, [%3]\n"
442" strexd %1, %4, %H4, [%3]\n"
443" teq %1, #0\n"
444" bne 1b"
445 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
446 : "r" (&ptr->counter), "r" (new)
447 : "cc");
448
449 return result;
450}
451#define atomic64_xchg_relaxed atomic64_xchg_relaxed
452
435 unsigned long tmp;
436
437 prefetchw(&ptr->counter);
438
439 __asm__ __volatile__("@ atomic64_xchg\n"
440"1: ldrexd %0, %H0, [%3]\n"
441" strexd %1, %4, %H4, [%3]\n"
442" teq %1, #0\n"
443" bne 1b"
444 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
445 : "r" (&ptr->counter), "r" (new)
446 : "cc");
447
448 return result;
449}
450#define atomic64_xchg_relaxed atomic64_xchg_relaxed
451
453static inline long long atomic64_dec_if_positive(atomic64_t *v)
452static inline s64 atomic64_dec_if_positive(atomic64_t *v)
454{
453{
455 long long result;
454 s64 result;
456 unsigned long tmp;
457
458 smp_mb();
459 prefetchw(&v->counter);
460
461 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
462"1: ldrexd %0, %H0, [%3]\n"
463" subs %Q0, %Q0, #1\n"

--- 9 unchanged lines hidden (view full) ---

473 : "cc");
474
475 smp_mb();
476
477 return result;
478}
479#define atomic64_dec_if_positive atomic64_dec_if_positive
480
455 unsigned long tmp;
456
457 smp_mb();
458 prefetchw(&v->counter);
459
460 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
461"1: ldrexd %0, %H0, [%3]\n"
462" subs %Q0, %Q0, #1\n"

--- 9 unchanged lines hidden (view full) ---

472 : "cc");
473
474 smp_mb();
475
476 return result;
477}
478#define atomic64_dec_if_positive atomic64_dec_if_positive
479
481static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
482 long long u)
480static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
483{
481{
484 long long oldval, newval;
482 s64 oldval, newval;
485 unsigned long tmp;
486
487 smp_mb();
488 prefetchw(&v->counter);
489
490 __asm__ __volatile__("@ atomic64_add_unless\n"
491"1: ldrexd %0, %H0, [%4]\n"
492" teq %0, %5\n"

--- 22 unchanged lines hidden ---
483 unsigned long tmp;
484
485 smp_mb();
486 prefetchw(&v->counter);
487
488 __asm__ __volatile__("@ atomic64_add_unless\n"
489"1: ldrexd %0, %H0, [%4]\n"
490" teq %0, %5\n"

--- 22 unchanged lines hidden ---