xref: /openbmc/linux/arch/hexagon/include/asm/atomic.h (revision b34e08d5)
1 /*
2  * Atomic operations for the Hexagon architecture
3  *
4  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
5  *
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 and
9  * only version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19  * 02110-1301, USA.
20  */
21 
22 #ifndef _ASM_ATOMIC_H
23 #define _ASM_ATOMIC_H
24 
25 #include <linux/types.h>
26 #include <asm/cmpxchg.h>
27 
28 #define ATOMIC_INIT(i)		{ (i) }
29 
30 /*  Normal writes in our arch don't clear lock reservations  */
31 
32 static inline void atomic_set(atomic_t *v, int new)
33 {
34 	asm volatile(
35 		"1:	r6 = memw_locked(%0);\n"
36 		"	memw_locked(%0,p0) = %1;\n"
37 		"	if (!P0) jump 1b;\n"
38 		:
39 		: "r" (&v->counter), "r" (new)
40 		: "memory", "p0", "r6"
41 	);
42 }
43 
44 /**
45  * atomic_read - reads a word, atomically
46  * @v: pointer to atomic value
47  *
48  * Assumes all word reads on our architecture are atomic.
49  */
50 #define atomic_read(v)		((v)->counter)
51 
52 /**
53  * atomic_xchg - atomic
54  * @v: pointer to memory to change
55  * @new: new value (technically passed in a register -- see xchg)
56  */
57 #define atomic_xchg(v, new)	(xchg(&((v)->counter), (new)))
58 
59 
60 /**
61  * atomic_cmpxchg - atomic compare-and-exchange values
62  * @v: pointer to value to change
63  * @old:  desired old value to match
64  * @new:  new value to put in
65  *
66  * Parameters are then pointer, value-in-register, value-in-register,
67  * and the output is the old value.
68  *
69  * Apparently this is complicated for archs that don't support
70  * the memw_locked like we do (or it's broken or whatever).
71  *
72  * Kind of the lynchpin of the rest of the generically defined routines.
73  * Remember V2 had that bug with dotnew predicate set by memw_locked.
74  *
75  * "old" is "expected" old val, __oldval is actual old value
76  */
77 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
78 {
79 	int __oldval;
80 
81 	asm volatile(
82 		"1:	%0 = memw_locked(%1);\n"
83 		"	{ P0 = cmp.eq(%0,%2);\n"
84 		"	  if (!P0.new) jump:nt 2f; }\n"
85 		"	memw_locked(%1,P0) = %3;\n"
86 		"	if (!P0) jump 1b;\n"
87 		"2:\n"
88 		: "=&r" (__oldval)
89 		: "r" (&v->counter), "r" (old), "r" (new)
90 		: "memory", "p0"
91 	);
92 
93 	return __oldval;
94 }
95 
96 static inline int atomic_add_return(int i, atomic_t *v)
97 {
98 	int output;
99 
100 	__asm__ __volatile__ (
101 		"1:	%0 = memw_locked(%1);\n"
102 		"	%0 = add(%0,%2);\n"
103 		"	memw_locked(%1,P3)=%0;\n"
104 		"	if !P3 jump 1b;\n"
105 		: "=&r" (output)
106 		: "r" (&v->counter), "r" (i)
107 		: "memory", "p3"
108 	);
109 	return output;
110 
111 }
112 
113 #define atomic_add(i, v) atomic_add_return(i, (v))
114 
115 static inline int atomic_sub_return(int i, atomic_t *v)
116 {
117 	int output;
118 	__asm__ __volatile__ (
119 		"1:	%0 = memw_locked(%1);\n"
120 		"	%0 = sub(%0,%2);\n"
121 		"	memw_locked(%1,P3)=%0\n"
122 		"	if !P3 jump 1b;\n"
123 		: "=&r" (output)
124 		: "r" (&v->counter), "r" (i)
125 		: "memory", "p3"
126 	);
127 	return output;
128 }
129 
130 #define atomic_sub(i, v) atomic_sub_return(i, (v))
131 
132 /**
133  * __atomic_add_unless - add unless the number is a given value
134  * @v: pointer to value
135  * @a: amount to add
136  * @u: unless value is equal to u
137  *
138  * Returns old value.
139  *
140  */
141 
142 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
143 {
144 	int __oldval;
145 	register int tmp;
146 
147 	asm volatile(
148 		"1:	%0 = memw_locked(%2);"
149 		"	{"
150 		"		p3 = cmp.eq(%0, %4);"
151 		"		if (p3.new) jump:nt 2f;"
152 		"		%1 = add(%0, %3);"
153 		"	}"
154 		"	memw_locked(%2, p3) = %1;"
155 		"	{"
156 		"		if !p3 jump 1b;"
157 		"	}"
158 		"2:"
159 		: "=&r" (__oldval), "=&r" (tmp)
160 		: "r" (v), "r" (a), "r" (u)
161 		: "memory", "p3"
162 	);
163 	return __oldval;
164 }
165 
166 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
167 
168 #define atomic_inc(v) atomic_add(1, (v))
169 #define atomic_dec(v) atomic_sub(1, (v))
170 
171 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
172 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
173 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
174 #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
175 
176 #define atomic_inc_return(v) (atomic_add_return(1, v))
177 #define atomic_dec_return(v) (atomic_sub_return(1, v))
178 
179 #define smp_mb__before_atomic_dec()	barrier()
180 #define smp_mb__after_atomic_dec()	barrier()
181 #define smp_mb__before_atomic_inc()	barrier()
182 #define smp_mb__after_atomic_inc()	barrier()
183 
184 #endif
185