xref: /openbmc/linux/arch/sh/include/asm/atomic-grb.h (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 #ifndef __ASM_SH_ATOMIC_GRB_H
2 #define __ASM_SH_ATOMIC_GRB_H
3 
4 static inline void atomic_add(int i, atomic_t *v)
5 {
6 	int tmp;
7 
8 	__asm__ __volatile__ (
9 		"   .align 2              \n\t"
10 		"   mova    1f,   r0      \n\t" /* r0 = end point */
11 		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
12 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
13 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
14 		"   add     %2,   %0      \n\t" /* add */
15 		"   mov.l   %0,   @%1     \n\t" /* store new value */
16 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
17 		: "=&r" (tmp),
18 		  "+r"  (v)
19 		: "r"   (i)
20 		: "memory" , "r0", "r1");
21 }
22 
23 static inline void atomic_sub(int i, atomic_t *v)
24 {
25 	int tmp;
26 
27 	__asm__ __volatile__ (
28 		"   .align 2              \n\t"
29 		"   mova    1f,   r0      \n\t" /* r0 = end point */
30 		"   mov     r15,  r1      \n\t" /* r1 = saved sp */
31 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
32 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
33 		"   sub     %2,   %0      \n\t" /* sub */
34 		"   mov.l   %0,   @%1     \n\t" /* store new value */
35 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
36 		: "=&r" (tmp),
37 		  "+r"  (v)
38 		: "r"   (i)
39 		: "memory" , "r0", "r1");
40 }
41 
42 static inline int atomic_add_return(int i, atomic_t *v)
43 {
44 	int tmp;
45 
46 	__asm__ __volatile__ (
47 		"   .align 2              \n\t"
48 		"   mova    1f,   r0      \n\t" /* r0 = end point */
49 		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
50 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
51 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
52 		"   add     %2,   %0      \n\t" /* add */
53 		"   mov.l   %0,   @%1     \n\t" /* store new value */
54 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
55 		: "=&r" (tmp),
56 		  "+r"  (v)
57 		: "r"   (i)
58 		: "memory" , "r0", "r1");
59 
60 	return tmp;
61 }
62 
63 static inline int atomic_sub_return(int i, atomic_t *v)
64 {
65 	int tmp;
66 
67 	__asm__ __volatile__ (
68 		"   .align 2              \n\t"
69 		"   mova    1f,   r0      \n\t" /* r0 = end point */
70 		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
71 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
72 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
73 		"   sub     %2,   %0      \n\t" /* sub */
74 		"   mov.l   %0,   @%1     \n\t" /* store new value */
75 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
76 		: "=&r" (tmp),
77 		  "+r"  (v)
78 		: "r"   (i)
79 		: "memory", "r0", "r1");
80 
81 	return tmp;
82 }
83 
84 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
85 {
86 	int tmp;
87 	unsigned int _mask = ~mask;
88 
89 	__asm__ __volatile__ (
90 		"   .align 2              \n\t"
91 		"   mova    1f,   r0      \n\t" /* r0 = end point */
92 		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
93 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
94 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
95 		"   and     %2,   %0      \n\t" /* add */
96 		"   mov.l   %0,   @%1     \n\t" /* store new value */
97 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
98 		: "=&r" (tmp),
99 		  "+r"  (v)
100 		: "r"   (_mask)
101 		: "memory" , "r0", "r1");
102 }
103 
104 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
105 {
106 	int tmp;
107 
108 	__asm__ __volatile__ (
109 		"   .align 2              \n\t"
110 		"   mova    1f,   r0      \n\t" /* r0 = end point */
111 		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
112 		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
113 		"   mov.l  @%1,   %0      \n\t" /* load  old value */
114 		"   or      %2,   %0      \n\t" /* or */
115 		"   mov.l   %0,   @%1     \n\t" /* store new value */
116 		"1: mov     r1,   r15     \n\t" /* LOGOUT */
117 		: "=&r" (tmp),
118 		  "+r"  (v)
119 		: "r"   (mask)
120 		: "memory" , "r0", "r1");
121 }
122 
123 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
124 {
125 	int ret;
126 
127 	__asm__ __volatile__ (
128 		"   .align 2		\n\t"
129 		"   mova     1f,  r0	\n\t"
130 		"   nop			\n\t"
131 		"   mov     r15,  r1	\n\t"
132 		"   mov    #-8,  r15	\n\t"
133 		"   mov.l   @%1,  %0	\n\t"
134 		"   cmp/eq   %2,  %0	\n\t"
135 		"   bf	     1f		\n\t"
136 		"   mov.l    %3, @%1	\n\t"
137 		"1: mov      r1,  r15	\n\t"
138 		: "=&r" (ret)
139 		: "r" (v), "r" (old), "r" (new)
140 		: "memory" , "r0", "r1" , "t");
141 
142 	return ret;
143 }
144 
145 static inline int atomic_add_unless(atomic_t *v, int a, int u)
146 {
147 	int ret;
148 	unsigned long tmp;
149 
150 	__asm__ __volatile__ (
151 		"   .align 2		\n\t"
152 		"   mova    1f,   r0	\n\t"
153 		"   nop			\n\t"
154 		"   mov    r15,   r1	\n\t"
155 		"   mov    #-12,  r15	\n\t"
156 		"   mov.l  @%2,   %1	\n\t"
157 		"   mov	    %1,   %0    \n\t"
158 		"   cmp/eq  %4,   %0	\n\t"
159 		"   bt/s    1f		\n\t"
160 		"    add    %3,   %1	\n\t"
161 		"   mov.l   %1,  @%2	\n\t"
162 		"1: mov     r1,   r15	\n\t"
163 		: "=&r" (ret), "=&r" (tmp)
164 		: "r" (v), "r" (a), "r" (u)
165 		: "memory" , "r0", "r1" , "t");
166 
167 	return ret != u;
168 }
169 #endif /* __ASM_SH_ATOMIC_GRB_H */
170