xref: /openbmc/linux/include/asm-generic/atomic.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1b4d0d230SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
23f7e212dSArnd Bergmann /*
32609a195SMark Rutland  * Generic C implementation of atomic counter operations. Do not include in
42609a195SMark Rutland  * machine independent code.
5acac43e2SArun Sharma  *
63f7e212dSArnd Bergmann  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
73f7e212dSArnd Bergmann  * Written by David Howells (dhowells@redhat.com)
83f7e212dSArnd Bergmann  */
93f7e212dSArnd Bergmann #ifndef __ASM_GENERIC_ATOMIC_H
103f7e212dSArnd Bergmann #define __ASM_GENERIC_ATOMIC_H
113f7e212dSArnd Bergmann 
1234484277SDavid Howells #include <asm/cmpxchg.h>
13febdbfe8SPeter Zijlstra #include <asm/barrier.h>
1434484277SDavid Howells 
153f7e212dSArnd Bergmann #ifdef CONFIG_SMP
16560cb12aSPeter Zijlstra 
17560cb12aSPeter Zijlstra /* we can build all atomic primitives from cmpxchg */
18560cb12aSPeter Zijlstra 
19560cb12aSPeter Zijlstra #define ATOMIC_OP(op, c_op)						\
20f8b6455aSMark Rutland static inline void generic_atomic_##op(int i, atomic_t *v)		\
21560cb12aSPeter Zijlstra {									\
22560cb12aSPeter Zijlstra 	int c, old;							\
23560cb12aSPeter Zijlstra 									\
24560cb12aSPeter Zijlstra 	c = v->counter;							\
253c188518SMark Rutland 	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
26560cb12aSPeter Zijlstra 		c = old;						\
27560cb12aSPeter Zijlstra }
28560cb12aSPeter Zijlstra 
29560cb12aSPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op)					\
30f8b6455aSMark Rutland static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
31560cb12aSPeter Zijlstra {									\
32560cb12aSPeter Zijlstra 	int c, old;							\
33560cb12aSPeter Zijlstra 									\
34560cb12aSPeter Zijlstra 	c = v->counter;							\
353c188518SMark Rutland 	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
36560cb12aSPeter Zijlstra 		c = old;						\
37560cb12aSPeter Zijlstra 									\
38560cb12aSPeter Zijlstra 	return c c_op i;						\
39560cb12aSPeter Zijlstra }
40560cb12aSPeter Zijlstra 
4128aa2bdaSPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op)					\
42f8b6455aSMark Rutland static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
4328aa2bdaSPeter Zijlstra {									\
4428aa2bdaSPeter Zijlstra 	int c, old;							\
4528aa2bdaSPeter Zijlstra 									\
4628aa2bdaSPeter Zijlstra 	c = v->counter;							\
473c188518SMark Rutland 	while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c)	\
4828aa2bdaSPeter Zijlstra 		c = old;						\
4928aa2bdaSPeter Zijlstra 									\
5028aa2bdaSPeter Zijlstra 	return c;							\
5128aa2bdaSPeter Zijlstra }
5228aa2bdaSPeter Zijlstra 
53560cb12aSPeter Zijlstra #else
54560cb12aSPeter Zijlstra 
55560cb12aSPeter Zijlstra #include <linux/irqflags.h>
56560cb12aSPeter Zijlstra 
57560cb12aSPeter Zijlstra #define ATOMIC_OP(op, c_op)						\
58f8b6455aSMark Rutland static inline void generic_atomic_##op(int i, atomic_t *v)		\
59560cb12aSPeter Zijlstra {									\
60560cb12aSPeter Zijlstra 	unsigned long flags;						\
61560cb12aSPeter Zijlstra 									\
62560cb12aSPeter Zijlstra 	raw_local_irq_save(flags);					\
63560cb12aSPeter Zijlstra 	v->counter = v->counter c_op i;					\
64560cb12aSPeter Zijlstra 	raw_local_irq_restore(flags);					\
65560cb12aSPeter Zijlstra }
66560cb12aSPeter Zijlstra 
67560cb12aSPeter Zijlstra #define ATOMIC_OP_RETURN(op, c_op)					\
68f8b6455aSMark Rutland static inline int generic_atomic_##op##_return(int i, atomic_t *v)	\
69560cb12aSPeter Zijlstra {									\
70560cb12aSPeter Zijlstra 	unsigned long flags;						\
71560cb12aSPeter Zijlstra 	int ret;							\
72560cb12aSPeter Zijlstra 									\
73560cb12aSPeter Zijlstra 	raw_local_irq_save(flags);					\
74560cb12aSPeter Zijlstra 	ret = (v->counter = v->counter c_op i);				\
75560cb12aSPeter Zijlstra 	raw_local_irq_restore(flags);					\
76560cb12aSPeter Zijlstra 									\
77560cb12aSPeter Zijlstra 	return ret;							\
78560cb12aSPeter Zijlstra }
79560cb12aSPeter Zijlstra 
8028aa2bdaSPeter Zijlstra #define ATOMIC_FETCH_OP(op, c_op)					\
81f8b6455aSMark Rutland static inline int generic_atomic_fetch_##op(int i, atomic_t *v)		\
8228aa2bdaSPeter Zijlstra {									\
8328aa2bdaSPeter Zijlstra 	unsigned long flags;						\
8428aa2bdaSPeter Zijlstra 	int ret;							\
8528aa2bdaSPeter Zijlstra 									\
8628aa2bdaSPeter Zijlstra 	raw_local_irq_save(flags);					\
8728aa2bdaSPeter Zijlstra 	ret = v->counter;						\
8828aa2bdaSPeter Zijlstra 	v->counter = v->counter c_op i;					\
8928aa2bdaSPeter Zijlstra 	raw_local_irq_restore(flags);					\
9028aa2bdaSPeter Zijlstra 									\
9128aa2bdaSPeter Zijlstra 	return ret;							\
9228aa2bdaSPeter Zijlstra }
9328aa2bdaSPeter Zijlstra 
94560cb12aSPeter Zijlstra #endif /* CONFIG_SMP */
95560cb12aSPeter Zijlstra 
96560cb12aSPeter Zijlstra ATOMIC_OP_RETURN(add, +)
97560cb12aSPeter Zijlstra ATOMIC_OP_RETURN(sub, -)
983f7e212dSArnd Bergmann 
9928aa2bdaSPeter Zijlstra ATOMIC_FETCH_OP(add, +)
10028aa2bdaSPeter Zijlstra ATOMIC_FETCH_OP(sub, -)
10128aa2bdaSPeter Zijlstra ATOMIC_FETCH_OP(and, &)
10228aa2bdaSPeter Zijlstra ATOMIC_FETCH_OP(or, |)
10328aa2bdaSPeter Zijlstra ATOMIC_FETCH_OP(xor, ^)
10428aa2bdaSPeter Zijlstra 
105d0e03218SMark Rutland ATOMIC_OP(add, +)
106d0e03218SMark Rutland ATOMIC_OP(sub, -)
107560cb12aSPeter Zijlstra ATOMIC_OP(and, &)
108560cb12aSPeter Zijlstra ATOMIC_OP(or, |)
109e6942b7dSPeter Zijlstra ATOMIC_OP(xor, ^)
110560cb12aSPeter Zijlstra 
11128aa2bdaSPeter Zijlstra #undef ATOMIC_FETCH_OP
112560cb12aSPeter Zijlstra #undef ATOMIC_OP_RETURN
113560cb12aSPeter Zijlstra #undef ATOMIC_OP
114560cb12aSPeter Zijlstra 
115f8b6455aSMark Rutland #define arch_atomic_add_return			generic_atomic_add_return
116f8b6455aSMark Rutland #define arch_atomic_sub_return			generic_atomic_sub_return
117f8b6455aSMark Rutland 
118f8b6455aSMark Rutland #define arch_atomic_fetch_add			generic_atomic_fetch_add
119f8b6455aSMark Rutland #define arch_atomic_fetch_sub			generic_atomic_fetch_sub
120f8b6455aSMark Rutland #define arch_atomic_fetch_and			generic_atomic_fetch_and
121f8b6455aSMark Rutland #define arch_atomic_fetch_or			generic_atomic_fetch_or
122f8b6455aSMark Rutland #define arch_atomic_fetch_xor			generic_atomic_fetch_xor
123f8b6455aSMark Rutland 
124f8b6455aSMark Rutland #define arch_atomic_add				generic_atomic_add
125f8b6455aSMark Rutland #define arch_atomic_sub				generic_atomic_sub
126f8b6455aSMark Rutland #define arch_atomic_and				generic_atomic_and
127f8b6455aSMark Rutland #define arch_atomic_or				generic_atomic_or
128f8b6455aSMark Rutland #define arch_atomic_xor				generic_atomic_xor
129f8b6455aSMark Rutland 
130f8b6455aSMark Rutland #define arch_atomic_read(v)			READ_ONCE((v)->counter)
131f8b6455aSMark Rutland #define arch_atomic_set(v, i)			WRITE_ONCE(((v)->counter), (i))
132f8b6455aSMark Rutland 
133*656e9007SArnd Bergmann #endif /* __ASM_GENERIC_ATOMIC_H */
134*656e9007SArnd Bergmann