11da177e4SLinus Torvalds #ifndef _ASM_GENERIC_LOCAL_H 21da177e4SLinus Torvalds #define _ASM_GENERIC_LOCAL_H 31da177e4SLinus Torvalds 41da177e4SLinus Torvalds #include <linux/config.h> 51da177e4SLinus Torvalds #include <linux/percpu.h> 61da177e4SLinus Torvalds #include <linux/hardirq.h> 7f5f5370dSKyle McMartin #include <asm/atomic.h> 81da177e4SLinus Torvalds #include <asm/types.h> 91da177e4SLinus Torvalds 10*2cf8d82dSAndrew Morton /* 11*2cf8d82dSAndrew Morton * A signed long type for operations which are atomic for a single CPU. 12*2cf8d82dSAndrew Morton * Usually used in combination with per-cpu variables. 13*2cf8d82dSAndrew Morton * 14*2cf8d82dSAndrew Morton * This is the default implementation, which uses atomic_long_t. Which is 15*2cf8d82dSAndrew Morton * rather pointless. The whole point behind local_t is that some processors 16*2cf8d82dSAndrew Morton * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs 17*2cf8d82dSAndrew Morton * running on this CPU. local_t allows exploitation of such capabilities. 18*2cf8d82dSAndrew Morton */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* Implement in terms of atomics. */ 211da177e4SLinus Torvalds 221da177e4SLinus Torvalds /* Don't use typedef: don't want them to be mixed with atomic_t's. */ 231da177e4SLinus Torvalds typedef struct 241da177e4SLinus Torvalds { 25f5f5370dSKyle McMartin atomic_long_t a; 261da177e4SLinus Torvalds } local_t; 271da177e4SLinus Torvalds 28f5f5370dSKyle McMartin #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } 291da177e4SLinus Torvalds 30*2cf8d82dSAndrew Morton #define local_read(l) atomic_long_read(&(l)->a) 31f5f5370dSKyle McMartin #define local_set(l,i) atomic_long_set((&(l)->a),(i)) 32f5f5370dSKyle McMartin #define local_inc(l) atomic_long_inc(&(l)->a) 33f5f5370dSKyle McMartin #define local_dec(l) atomic_long_dec(&(l)->a) 34f5f5370dSKyle McMartin #define local_add(i,l) atomic_long_add((i),(&(l)->a)) 35f5f5370dSKyle McMartin #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) 361da177e4SLinus Torvalds 371da177e4SLinus Torvalds /* Non-atomic variants, ie. preemption disabled and won't be touched 381da177e4SLinus Torvalds * in interrupt, etc. Some archs can optimize this case well. */ 391da177e4SLinus Torvalds #define __local_inc(l) local_set((l), local_read(l) + 1) 401da177e4SLinus Torvalds #define __local_dec(l) local_set((l), local_read(l) - 1) 411da177e4SLinus Torvalds #define __local_add(i,l) local_set((l), local_read(l) + (i)) 421da177e4SLinus Torvalds #define __local_sub(i,l) local_set((l), local_read(l) - (i)) 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds /* Use these for per-cpu local_t variables: on some archs they are 451da177e4SLinus Torvalds * much more efficient than these naive implementations. Note they take 461da177e4SLinus Torvalds * a variable (eg. mystruct.foo), not an address. 471da177e4SLinus Torvalds */ 481da177e4SLinus Torvalds #define cpu_local_read(v) local_read(&__get_cpu_var(v)) 491da177e4SLinus Torvalds #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 501da177e4SLinus Torvalds #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 511da177e4SLinus Torvalds #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 521da177e4SLinus Torvalds #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 531da177e4SLinus Torvalds #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* Non-atomic increments, ie. preemption disabled and won't be touched 561da177e4SLinus Torvalds * in interrupt, etc. Some archs can optimize this case well. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds #define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) 591da177e4SLinus Torvalds #define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) 601da177e4SLinus Torvalds #define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) 611da177e4SLinus Torvalds #define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) 621da177e4SLinus Torvalds 631da177e4SLinus Torvalds #endif /* _ASM_GENERIC_LOCAL_H */ 64