atomic.h (27eb2c4b3d3e13f376a359e293c212a2e9407af5) | atomic.h (f38d999c4d16fc0fce4270374f15fbb2d8713c09) |
---|---|
1/* 2 * arch/arm/include/asm/atomic.h 3 * 4 * Copyright (C) 1996 Russell King. 5 * Copyright (C) 2002 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#ifndef __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H 13 14#include <linux/compiler.h> | 1/* 2 * arch/arm/include/asm/atomic.h 3 * 4 * Copyright (C) 1996 Russell King. 5 * Copyright (C) 2002 Deep Blue Solutions Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#ifndef __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H 13 14#include <linux/compiler.h> |
15#include <linux/prefetch.h> |
|
15#include <linux/types.h> 16#include <linux/irqflags.h> 17#include <asm/barrier.h> 18#include <asm/cmpxchg.h> 19 20#define ATOMIC_INIT(i) { (i) } 21 22#ifdef __KERNEL__ --- 13 unchanged lines hidden (view full) --- 36 * store exclusive to ensure that these are atomic. We may loop 37 * to ensure that the update happens. 38 */ 39static inline void atomic_add(int i, atomic_t *v) 40{ 41 unsigned long tmp; 42 int result; 43 | 16#include <linux/types.h> 17#include <linux/irqflags.h> 18#include <asm/barrier.h> 19#include <asm/cmpxchg.h> 20 21#define ATOMIC_INIT(i) { (i) } 22 23#ifdef __KERNEL__ --- 13 unchanged lines hidden (view full) --- 37 * store exclusive to ensure that these are atomic. We may loop 38 * to ensure that the update happens. 39 */ 40static inline void atomic_add(int i, atomic_t *v) 41{ 42 unsigned long tmp; 43 int result; 44 |
45 prefetchw(&v->counter); |
|
44 __asm__ __volatile__("@ atomic_add\n" 45"1: ldrex %0, [%3]\n" 46" add %0, %0, %4\n" 47" strex %1, %0, [%3]\n" 48" teq %1, #0\n" 49" bne 1b" 50 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 51 : "r" (&v->counter), "Ir" (i) --- 22 unchanged lines hidden (view full) --- 74 return result; 75} 76 77static inline void atomic_sub(int i, atomic_t *v) 78{ 79 unsigned long tmp; 80 int result; 81 | 46 __asm__ __volatile__("@ atomic_add\n" 47"1: ldrex %0, [%3]\n" 48" add %0, %0, %4\n" 49" strex %1, %0, [%3]\n" 50" teq %1, #0\n" 51" bne 1b" 52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 53 : "r" (&v->counter), "Ir" (i) --- 22 unchanged lines hidden (view full) --- 76 return result; 77} 78 79static inline void atomic_sub(int i, atomic_t *v) 80{ 81 unsigned long tmp; 82 int result; 83 |
84 prefetchw(&v->counter); |
|
82 __asm__ __volatile__("@ atomic_sub\n" 83"1: ldrex %0, [%3]\n" 84" sub %0, %0, %4\n" 85" strex %1, %0, [%3]\n" 86" teq %1, #0\n" 87" bne 1b" 88 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 89 : "r" (&v->counter), "Ir" (i) --- 43 unchanged lines hidden (view full) --- 133 134 return oldval; 135} 136 137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 138{ 139 unsigned long tmp, tmp2; 140 | 85 __asm__ __volatile__("@ atomic_sub\n" 86"1: ldrex %0, [%3]\n" 87" sub %0, %0, %4\n" 88" strex %1, %0, [%3]\n" 89" teq %1, #0\n" 90" bne 1b" 91 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 92 : "r" (&v->counter), "Ir" (i) --- 43 unchanged lines hidden (view full) --- 136 137 return oldval; 138} 139 140static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 141{ 142 unsigned long tmp, tmp2; 143 |
144 prefetchw(addr); |
|
141 __asm__ __volatile__("@ atomic_clear_mask\n" 142"1: ldrex %0, [%3]\n" 143" bic %0, %0, %4\n" 144" strex %1, %0, [%3]\n" 145" teq %1, #0\n" 146" bne 1b" 147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) 148 : "r" (addr), "Ir" (mask) --- 129 unchanged lines hidden (view full) --- 278 279 return result; 280} 281 282static inline void atomic64_set(atomic64_t *v, u64 i) 283{ 284 u64 tmp; 285 | 145 __asm__ __volatile__("@ atomic_clear_mask\n" 146"1: ldrex %0, [%3]\n" 147" bic %0, %0, %4\n" 148" strex %1, %0, [%3]\n" 149" teq %1, #0\n" 150" bne 1b" 151 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) 152 : "r" (addr), "Ir" (mask) --- 129 unchanged lines hidden (view full) --- 282 283 return result; 284} 285 286static inline void atomic64_set(atomic64_t *v, u64 i) 287{ 288 u64 tmp; 289 |
290 prefetchw(&v->counter); |
|
286 __asm__ __volatile__("@ atomic64_set\n" 287"1: ldrexd %0, %H0, [%2]\n" 288" strexd %0, %3, %H3, [%2]\n" 289" teq %0, #0\n" 290" bne 1b" 291 : "=&r" (tmp), "=Qo" (v->counter) 292 : "r" (&v->counter), "r" (i) 293 : "cc"); 294} 295#endif 296 297static inline void atomic64_add(u64 i, atomic64_t *v) 298{ 299 u64 result; 300 unsigned long tmp; 301 | 291 __asm__ __volatile__("@ atomic64_set\n" 292"1: ldrexd %0, %H0, [%2]\n" 293" strexd %0, %3, %H3, [%2]\n" 294" teq %0, #0\n" 295" bne 1b" 296 : "=&r" (tmp), "=Qo" (v->counter) 297 : "r" (&v->counter), "r" (i) 298 : "cc"); 299} 300#endif 301 302static inline void atomic64_add(u64 i, atomic64_t *v) 303{ 304 u64 result; 305 unsigned long tmp; 306 |
307 prefetchw(&v->counter); |
|
302 __asm__ __volatile__("@ atomic64_add\n" 303"1: ldrexd %0, %H0, [%3]\n" 304" adds %0, %0, %4\n" 305" adc %H0, %H0, %H4\n" 306" strexd %1, %0, %H0, [%3]\n" 307" teq %1, #0\n" 308" bne 1b" 309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) --- 24 unchanged lines hidden (view full) --- 334 return result; 335} 336 337static inline void atomic64_sub(u64 i, atomic64_t *v) 338{ 339 u64 result; 340 unsigned long tmp; 341 | 308 __asm__ __volatile__("@ atomic64_add\n" 309"1: ldrexd %0, %H0, [%3]\n" 310" adds %0, %0, %4\n" 311" adc %H0, %H0, %H4\n" 312" strexd %1, %0, %H0, [%3]\n" 313" teq %1, #0\n" 314" bne 1b" 315 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) --- 24 unchanged lines hidden (view full) --- 340 return result; 341} 342 343static inline void atomic64_sub(u64 i, atomic64_t *v) 344{ 345 u64 result; 346 unsigned long tmp; 347 |
348 prefetchw(&v->counter); |
|
342 __asm__ __volatile__("@ atomic64_sub\n" 343"1: ldrexd %0, %H0, [%3]\n" 344" subs %0, %0, %4\n" 345" sbc %H0, %H0, %H4\n" 346" strexd %1, %0, %H0, [%3]\n" 347" teq %1, #0\n" 348" bne 1b" 349 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) --- 141 unchanged lines hidden --- | 349 __asm__ __volatile__("@ atomic64_sub\n" 350"1: ldrexd %0, %H0, [%3]\n" 351" subs %0, %0, %4\n" 352" sbc %H0, %H0, %H4\n" 353" strexd %1, %0, %H0, [%3]\n" 354" teq %1, #0\n" 355" bne 1b" 356 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) --- 141 unchanged lines hidden --- |