xref: /openbmc/linux/arch/hexagon/include/asm/atomic.h (revision 65844828)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Atomic operations for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_ATOMIC_H
9 #define _ASM_ATOMIC_H
10 
11 #include <linux/types.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/barrier.h>
14 
15 /*  Normal writes in our arch don't clear lock reservations  */
16 
17 static inline void arch_atomic_set(atomic_t *v, int new)
18 {
19 	asm volatile(
20 		"1:	r6 = memw_locked(%0);\n"
21 		"	memw_locked(%0,p0) = %1;\n"
22 		"	if (!P0) jump 1b;\n"
23 		:
24 		: "r" (&v->counter), "r" (new)
25 		: "memory", "p0", "r6"
26 	);
27 }
28 
29 #define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
30 
31 /**
32  * arch_atomic_read - reads a word, atomically
33  * @v: pointer to atomic value
34  *
35  * Assumes all word reads on our architecture are atomic.
36  */
37 #define arch_atomic_read(v)		READ_ONCE((v)->counter)
38 
39 /**
40  * arch_atomic_xchg - atomic
41  * @v: pointer to memory to change
42  * @new: new value (technically passed in a register -- see xchg)
43  */
44 #define arch_atomic_xchg(v, new)	(arch_xchg(&((v)->counter), (new)))
45 
46 
47 /**
48  * arch_atomic_cmpxchg - atomic compare-and-exchange values
49  * @v: pointer to value to change
50  * @old:  desired old value to match
51  * @new:  new value to put in
52  *
53  * Parameters are then pointer, value-in-register, value-in-register,
54  * and the output is the old value.
55  *
56  * Apparently this is complicated for archs that don't support
57  * the memw_locked like we do (or it's broken or whatever).
58  *
59  * Kind of the lynchpin of the rest of the generically defined routines.
60  * Remember V2 had that bug with dotnew predicate set by memw_locked.
61  *
62  * "old" is "expected" old val, __oldval is actual old value
63  */
64 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
65 {
66 	int __oldval;
67 
68 	asm volatile(
69 		"1:	%0 = memw_locked(%1);\n"
70 		"	{ P0 = cmp.eq(%0,%2);\n"
71 		"	  if (!P0.new) jump:nt 2f; }\n"
72 		"	memw_locked(%1,P0) = %3;\n"
73 		"	if (!P0) jump 1b;\n"
74 		"2:\n"
75 		: "=&r" (__oldval)
76 		: "r" (&v->counter), "r" (old), "r" (new)
77 		: "memory", "p0"
78 	);
79 
80 	return __oldval;
81 }
82 
83 #define ATOMIC_OP(op)							\
84 static inline void arch_atomic_##op(int i, atomic_t *v)			\
85 {									\
86 	int output;							\
87 									\
88 	__asm__ __volatile__ (						\
89 		"1:	%0 = memw_locked(%1);\n"			\
90 		"	%0 = "#op "(%0,%2);\n"				\
91 		"	memw_locked(%1,P3)=%0;\n"			\
92 		"	if (!P3) jump 1b;\n"				\
93 		: "=&r" (output)					\
94 		: "r" (&v->counter), "r" (i)				\
95 		: "memory", "p3"					\
96 	);								\
97 }									\
98 
99 #define ATOMIC_OP_RETURN(op)						\
100 static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
101 {									\
102 	int output;							\
103 									\
104 	__asm__ __volatile__ (						\
105 		"1:	%0 = memw_locked(%1);\n"			\
106 		"	%0 = "#op "(%0,%2);\n"				\
107 		"	memw_locked(%1,P3)=%0;\n"			\
108 		"	if (!P3) jump 1b;\n"				\
109 		: "=&r" (output)					\
110 		: "r" (&v->counter), "r" (i)				\
111 		: "memory", "p3"					\
112 	);								\
113 	return output;							\
114 }
115 
116 #define ATOMIC_FETCH_OP(op)						\
117 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
118 {									\
119 	int output, val;						\
120 									\
121 	__asm__ __volatile__ (						\
122 		"1:	%0 = memw_locked(%2);\n"			\
123 		"	%1 = "#op "(%0,%3);\n"				\
124 		"	memw_locked(%2,P3)=%1;\n"			\
125 		"	if (!P3) jump 1b;\n"				\
126 		: "=&r" (output), "=&r" (val)				\
127 		: "r" (&v->counter), "r" (i)				\
128 		: "memory", "p3"					\
129 	);								\
130 	return output;							\
131 }
132 
133 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
134 
135 ATOMIC_OPS(add)
136 ATOMIC_OPS(sub)
137 
138 #undef ATOMIC_OPS
139 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
140 
141 ATOMIC_OPS(and)
142 ATOMIC_OPS(or)
143 ATOMIC_OPS(xor)
144 
145 #undef ATOMIC_OPS
146 #undef ATOMIC_FETCH_OP
147 #undef ATOMIC_OP_RETURN
148 #undef ATOMIC_OP
149 
150 /**
151  * arch_atomic_fetch_add_unless - add unless the number is a given value
152  * @v: pointer to value
153  * @a: amount to add
154  * @u: unless value is equal to u
155  *
156  * Returns old value.
157  *
158  */
159 
160 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
161 {
162 	int __oldval;
163 	register int tmp;
164 
165 	asm volatile(
166 		"1:	%0 = memw_locked(%2);"
167 		"	{"
168 		"		p3 = cmp.eq(%0, %4);"
169 		"		if (p3.new) jump:nt 2f;"
170 		"		%1 = add(%0, %3);"
171 		"	}"
172 		"	memw_locked(%2, p3) = %1;"
173 		"	{"
174 		"		if (!p3) jump 1b;"
175 		"	}"
176 		"2:"
177 		: "=&r" (__oldval), "=&r" (tmp)
178 		: "r" (v), "r" (a), "r" (u)
179 		: "memory", "p3"
180 	);
181 	return __oldval;
182 }
183 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
184 
185 #endif
186