13f7e212dSArnd Bergmann /* 2acac43e2SArun Sharma * Generic C implementation of atomic counter operations. Usable on 3acac43e2SArun Sharma * UP systems only. Do not include in machine independent code. 4acac43e2SArun Sharma * 53f7e212dSArnd Bergmann * Originally implemented for MN10300. 63f7e212dSArnd Bergmann * 73f7e212dSArnd Bergmann * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 83f7e212dSArnd Bergmann * Written by David Howells (dhowells@redhat.com) 93f7e212dSArnd Bergmann * 103f7e212dSArnd Bergmann * This program is free software; you can redistribute it and/or 113f7e212dSArnd Bergmann * modify it under the terms of the GNU General Public Licence 123f7e212dSArnd Bergmann * as published by the Free Software Foundation; either version 133f7e212dSArnd Bergmann * 2 of the Licence, or (at your option) any later version. 143f7e212dSArnd Bergmann */ 153f7e212dSArnd Bergmann #ifndef __ASM_GENERIC_ATOMIC_H 163f7e212dSArnd Bergmann #define __ASM_GENERIC_ATOMIC_H 173f7e212dSArnd Bergmann 183f7e212dSArnd Bergmann #ifdef CONFIG_SMP 19*7505cb60SMike Frysinger /* Force people to define core atomics */ 20*7505cb60SMike Frysinger # if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ 21*7505cb60SMike Frysinger !defined(atomic_clear_mask) || !defined(atomic_set_mask) 22*7505cb60SMike Frysinger # error "SMP requires a little arch-specific magic" 23*7505cb60SMike Frysinger # endif 243f7e212dSArnd Bergmann #endif 253f7e212dSArnd Bergmann 263f7e212dSArnd Bergmann /* 273f7e212dSArnd Bergmann * Atomic operations that C can't guarantee us. Useful for 283f7e212dSArnd Bergmann * resource counting etc.. 293f7e212dSArnd Bergmann */ 303f7e212dSArnd Bergmann 313f7e212dSArnd Bergmann #define ATOMIC_INIT(i) { (i) } 323f7e212dSArnd Bergmann 333f7e212dSArnd Bergmann #ifdef __KERNEL__ 343f7e212dSArnd Bergmann 353f7e212dSArnd Bergmann /** 363f7e212dSArnd Bergmann * atomic_read - read atomic variable 373f7e212dSArnd Bergmann * @v: pointer of type atomic_t 383f7e212dSArnd Bergmann * 3937682177SPeter Fritzsche * Atomically reads the value of @v. 403f7e212dSArnd Bergmann */ 41*7505cb60SMike Frysinger #ifndef atomic_read 42f3d46f9dSAnton Blanchard #define atomic_read(v) (*(volatile int *)&(v)->counter) 43*7505cb60SMike Frysinger #endif 443f7e212dSArnd Bergmann 453f7e212dSArnd Bergmann /** 463f7e212dSArnd Bergmann * atomic_set - set atomic variable 473f7e212dSArnd Bergmann * @v: pointer of type atomic_t 483f7e212dSArnd Bergmann * @i: required value 493f7e212dSArnd Bergmann * 5037682177SPeter Fritzsche * Atomically sets the value of @v to @i. 513f7e212dSArnd Bergmann */ 523f7e212dSArnd Bergmann #define atomic_set(v, i) (((v)->counter) = (i)) 533f7e212dSArnd Bergmann 54df9ee292SDavid Howells #include <linux/irqflags.h> 553f7e212dSArnd Bergmann #include <asm/system.h> 563f7e212dSArnd Bergmann 573f7e212dSArnd Bergmann /** 583f7e212dSArnd Bergmann * atomic_add_return - add integer to atomic variable 593f7e212dSArnd Bergmann * @i: integer value to add 603f7e212dSArnd Bergmann * @v: pointer of type atomic_t 613f7e212dSArnd Bergmann * 623f7e212dSArnd Bergmann * Atomically adds @i to @v and returns the result 633f7e212dSArnd Bergmann */ 64*7505cb60SMike Frysinger #ifndef atomic_add_return 653f7e212dSArnd Bergmann static inline int atomic_add_return(int i, atomic_t *v) 663f7e212dSArnd Bergmann { 673f7e212dSArnd Bergmann unsigned long flags; 683f7e212dSArnd Bergmann int temp; 693f7e212dSArnd Bergmann 70df9ee292SDavid Howells raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 713f7e212dSArnd Bergmann temp = v->counter; 723f7e212dSArnd Bergmann temp += i; 733f7e212dSArnd Bergmann v->counter = temp; 749e58143dSMichal Simek raw_local_irq_restore(flags); 753f7e212dSArnd Bergmann 763f7e212dSArnd Bergmann return temp; 773f7e212dSArnd Bergmann } 78*7505cb60SMike Frysinger #endif 793f7e212dSArnd Bergmann 803f7e212dSArnd Bergmann /** 813f7e212dSArnd Bergmann * atomic_sub_return - subtract integer from atomic variable 823f7e212dSArnd Bergmann * @i: integer value to subtract 833f7e212dSArnd Bergmann * @v: pointer of type atomic_t 843f7e212dSArnd Bergmann * 853f7e212dSArnd Bergmann * Atomically subtracts @i from @v and returns the result 863f7e212dSArnd Bergmann */ 87*7505cb60SMike Frysinger #ifndef atomic_sub_return 883f7e212dSArnd Bergmann static inline int atomic_sub_return(int i, atomic_t *v) 893f7e212dSArnd Bergmann { 903f7e212dSArnd Bergmann unsigned long flags; 913f7e212dSArnd Bergmann int temp; 923f7e212dSArnd Bergmann 93df9ee292SDavid Howells raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 943f7e212dSArnd Bergmann temp = v->counter; 953f7e212dSArnd Bergmann temp -= i; 963f7e212dSArnd Bergmann v->counter = temp; 979e58143dSMichal Simek raw_local_irq_restore(flags); 983f7e212dSArnd Bergmann 993f7e212dSArnd Bergmann return temp; 1003f7e212dSArnd Bergmann } 101*7505cb60SMike Frysinger #endif 1023f7e212dSArnd Bergmann 1033f7e212dSArnd Bergmann static inline int atomic_add_negative(int i, atomic_t *v) 1043f7e212dSArnd Bergmann { 1053f7e212dSArnd Bergmann return atomic_add_return(i, v) < 0; 1063f7e212dSArnd Bergmann } 1073f7e212dSArnd Bergmann 1083f7e212dSArnd Bergmann static inline void atomic_add(int i, atomic_t *v) 1093f7e212dSArnd Bergmann { 1103f7e212dSArnd Bergmann atomic_add_return(i, v); 1113f7e212dSArnd Bergmann } 1123f7e212dSArnd Bergmann 1133f7e212dSArnd Bergmann static inline void atomic_sub(int i, atomic_t *v) 1143f7e212dSArnd Bergmann { 1153f7e212dSArnd Bergmann atomic_sub_return(i, v); 1163f7e212dSArnd Bergmann } 1173f7e212dSArnd Bergmann 1183f7e212dSArnd Bergmann static inline void atomic_inc(atomic_t *v) 1193f7e212dSArnd Bergmann { 1203f7e212dSArnd Bergmann atomic_add_return(1, v); 1213f7e212dSArnd Bergmann } 1223f7e212dSArnd Bergmann 1233f7e212dSArnd Bergmann static inline void atomic_dec(atomic_t *v) 1243f7e212dSArnd Bergmann { 1253f7e212dSArnd Bergmann atomic_sub_return(1, v); 1263f7e212dSArnd Bergmann } 1273f7e212dSArnd Bergmann 1283f7e212dSArnd Bergmann #define atomic_dec_return(v) atomic_sub_return(1, (v)) 1293f7e212dSArnd Bergmann #define atomic_inc_return(v) atomic_add_return(1, (v)) 1303f7e212dSArnd Bergmann 1313f7e212dSArnd Bergmann #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 1323eea44eaSMike Frysinger #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 1333eea44eaSMike Frysinger #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 1343f7e212dSArnd Bergmann 1358b9d4069SMathieu Lacage #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 1368b9d4069SMathieu Lacage #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 1378b9d4069SMathieu Lacage 1388b9d4069SMathieu Lacage #define cmpxchg_local(ptr, o, n) \ 1398b9d4069SMathieu Lacage ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 1408b9d4069SMathieu Lacage (unsigned long)(n), sizeof(*(ptr)))) 1418b9d4069SMathieu Lacage 1428b9d4069SMathieu Lacage #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 1438b9d4069SMathieu Lacage 144f24219b4SArun Sharma static inline int __atomic_add_unless(atomic_t *v, int a, int u) 1458b9d4069SMathieu Lacage { 1468b9d4069SMathieu Lacage int c, old; 1478b9d4069SMathieu Lacage c = atomic_read(v); 1488b9d4069SMathieu Lacage while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 1498b9d4069SMathieu Lacage c = old; 150f24219b4SArun Sharma return c; 1518b9d4069SMathieu Lacage } 1523f7e212dSArnd Bergmann 153f6081bd3SMike Frysinger /** 154f6081bd3SMike Frysinger * atomic_clear_mask - Atomically clear bits in atomic variable 155f6081bd3SMike Frysinger * @mask: Mask of the bits to be cleared 156f6081bd3SMike Frysinger * @v: pointer of type atomic_t 157f6081bd3SMike Frysinger * 158f6081bd3SMike Frysinger * Atomically clears the bits set in @mask from @v 159f6081bd3SMike Frysinger */ 160*7505cb60SMike Frysinger #ifndef atomic_clear_mask 161f6081bd3SMike Frysinger static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) 1623f7e212dSArnd Bergmann { 1633f7e212dSArnd Bergmann unsigned long flags; 1643f7e212dSArnd Bergmann 1653f7e212dSArnd Bergmann mask = ~mask; 1669e58143dSMichal Simek raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 167f6081bd3SMike Frysinger v->counter &= mask; 1689e58143dSMichal Simek raw_local_irq_restore(flags); 1693f7e212dSArnd Bergmann } 170*7505cb60SMike Frysinger #endif 1713f7e212dSArnd Bergmann 17200b3c28bSMike Frysinger /** 17300b3c28bSMike Frysinger * atomic_set_mask - Atomically set bits in atomic variable 17400b3c28bSMike Frysinger * @mask: Mask of the bits to be set 17500b3c28bSMike Frysinger * @v: pointer of type atomic_t 17600b3c28bSMike Frysinger * 17700b3c28bSMike Frysinger * Atomically sets the bits set in @mask in @v 17800b3c28bSMike Frysinger */ 179*7505cb60SMike Frysinger #ifndef atomic_set_mask 18000b3c28bSMike Frysinger static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 18100b3c28bSMike Frysinger { 18200b3c28bSMike Frysinger unsigned long flags; 18300b3c28bSMike Frysinger 18400b3c28bSMike Frysinger raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 18500b3c28bSMike Frysinger v->counter |= mask; 18600b3c28bSMike Frysinger raw_local_irq_restore(flags); 18700b3c28bSMike Frysinger } 188*7505cb60SMike Frysinger #endif 18900b3c28bSMike Frysinger 1903f7e212dSArnd Bergmann /* Assume that atomic operations are already serializing */ 1913f7e212dSArnd Bergmann #define smp_mb__before_atomic_dec() barrier() 1923f7e212dSArnd Bergmann #define smp_mb__after_atomic_dec() barrier() 1933f7e212dSArnd Bergmann #define smp_mb__before_atomic_inc() barrier() 1943f7e212dSArnd Bergmann #define smp_mb__after_atomic_inc() barrier() 1953f7e212dSArnd Bergmann 1963f7e212dSArnd Bergmann #endif /* __KERNEL__ */ 1973f7e212dSArnd Bergmann #endif /* __ASM_GENERIC_ATOMIC_H */ 198