xref: /openbmc/linux/arch/hexagon/include/asm/atomic.h (revision 5d7800d9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Atomic operations for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_ATOMIC_H
9 #define _ASM_ATOMIC_H
10 
11 #include <linux/types.h>
12 #include <asm/cmpxchg.h>
13 #include <asm/barrier.h>
14 
15 /*  Normal writes in our arch don't clear lock reservations  */
16 
17 static inline void arch_atomic_set(atomic_t *v, int new)
18 {
19 	asm volatile(
20 		"1:	r6 = memw_locked(%0);\n"
21 		"	memw_locked(%0,p0) = %1;\n"
22 		"	if (!P0) jump 1b;\n"
23 		:
24 		: "r" (&v->counter), "r" (new)
25 		: "memory", "p0", "r6"
26 	);
27 }
28 
29 #define arch_atomic_set_release(v, i)	arch_atomic_set((v), (i))
30 
31 #define arch_atomic_read(v)		READ_ONCE((v)->counter)
32 
33 #define ATOMIC_OP(op)							\
34 static inline void arch_atomic_##op(int i, atomic_t *v)			\
35 {									\
36 	int output;							\
37 									\
38 	__asm__ __volatile__ (						\
39 		"1:	%0 = memw_locked(%1);\n"			\
40 		"	%0 = "#op "(%0,%2);\n"				\
41 		"	memw_locked(%1,P3)=%0;\n"			\
42 		"	if (!P3) jump 1b;\n"				\
43 		: "=&r" (output)					\
44 		: "r" (&v->counter), "r" (i)				\
45 		: "memory", "p3"					\
46 	);								\
47 }									\
48 
49 #define ATOMIC_OP_RETURN(op)						\
50 static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
51 {									\
52 	int output;							\
53 									\
54 	__asm__ __volatile__ (						\
55 		"1:	%0 = memw_locked(%1);\n"			\
56 		"	%0 = "#op "(%0,%2);\n"				\
57 		"	memw_locked(%1,P3)=%0;\n"			\
58 		"	if (!P3) jump 1b;\n"				\
59 		: "=&r" (output)					\
60 		: "r" (&v->counter), "r" (i)				\
61 		: "memory", "p3"					\
62 	);								\
63 	return output;							\
64 }
65 
66 #define ATOMIC_FETCH_OP(op)						\
67 static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
68 {									\
69 	int output, val;						\
70 									\
71 	__asm__ __volatile__ (						\
72 		"1:	%0 = memw_locked(%2);\n"			\
73 		"	%1 = "#op "(%0,%3);\n"				\
74 		"	memw_locked(%2,P3)=%1;\n"			\
75 		"	if (!P3) jump 1b;\n"				\
76 		: "=&r" (output), "=&r" (val)				\
77 		: "r" (&v->counter), "r" (i)				\
78 		: "memory", "p3"					\
79 	);								\
80 	return output;							\
81 }
82 
83 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
84 
85 ATOMIC_OPS(add)
86 ATOMIC_OPS(sub)
87 
88 #define arch_atomic_add_return			arch_atomic_add_return
89 #define arch_atomic_sub_return			arch_atomic_sub_return
90 #define arch_atomic_fetch_add			arch_atomic_fetch_add
91 #define arch_atomic_fetch_sub			arch_atomic_fetch_sub
92 
93 #undef ATOMIC_OPS
94 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
95 
96 ATOMIC_OPS(and)
97 ATOMIC_OPS(or)
98 ATOMIC_OPS(xor)
99 
100 #define arch_atomic_fetch_and			arch_atomic_fetch_and
101 #define arch_atomic_fetch_or			arch_atomic_fetch_or
102 #define arch_atomic_fetch_xor			arch_atomic_fetch_xor
103 
104 #undef ATOMIC_OPS
105 #undef ATOMIC_FETCH_OP
106 #undef ATOMIC_OP_RETURN
107 #undef ATOMIC_OP
108 
109 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
110 {
111 	int __oldval;
112 	register int tmp;
113 
114 	asm volatile(
115 		"1:	%0 = memw_locked(%2);"
116 		"	{"
117 		"		p3 = cmp.eq(%0, %4);"
118 		"		if (p3.new) jump:nt 2f;"
119 		"		%1 = add(%0, %3);"
120 		"	}"
121 		"	memw_locked(%2, p3) = %1;"
122 		"	{"
123 		"		if (!p3) jump 1b;"
124 		"	}"
125 		"2:"
126 		: "=&r" (__oldval), "=&r" (tmp)
127 		: "r" (v), "r" (a), "r" (u)
128 		: "memory", "p3"
129 	);
130 	return __oldval;
131 }
132 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
133 
134 #endif
135