xref: /openbmc/linux/arch/sh/include/asm/atomic-llsc.h (revision 78c99ba1)
1 #ifndef __ASM_SH_ATOMIC_LLSC_H
2 #define __ASM_SH_ATOMIC_LLSC_H
3 
4 /*
5  * To get proper branch prediction for the main line, we must branch
6  * forward to code at the end of this object's .text section, then
7  * branch back to restart the operation.
8  */
9 static inline void atomic_add(int i, atomic_t *v)
10 {
11 	unsigned long tmp;
12 
13 	__asm__ __volatile__ (
14 "1:	movli.l @%2, %0		! atomic_add	\n"
15 "	add	%1, %0				\n"
16 "	movco.l	%0, @%2				\n"
17 "	bf	1b				\n"
18 	: "=&z" (tmp)
19 	: "r" (i), "r" (&v->counter)
20 	: "t");
21 }
22 
23 static inline void atomic_sub(int i, atomic_t *v)
24 {
25 	unsigned long tmp;
26 
27 	__asm__ __volatile__ (
28 "1:	movli.l @%2, %0		! atomic_sub	\n"
29 "	sub	%1, %0				\n"
30 "	movco.l	%0, @%2				\n"
31 "	bf	1b				\n"
32 	: "=&z" (tmp)
33 	: "r" (i), "r" (&v->counter)
34 	: "t");
35 }
36 
37 /*
38  * SH-4A note:
39  *
40  * We basically get atomic_xxx_return() for free compared with
41  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42  * encoding, so the retval is automatically set without having to
43  * do any special work.
44  */
45 static inline int atomic_add_return(int i, atomic_t *v)
46 {
47 	unsigned long temp;
48 
49 	__asm__ __volatile__ (
50 "1:	movli.l @%2, %0		! atomic_add_return	\n"
51 "	add	%1, %0					\n"
52 "	movco.l	%0, @%2					\n"
53 "	bf	1b					\n"
54 "	synco						\n"
55 	: "=&z" (temp)
56 	: "r" (i), "r" (&v->counter)
57 	: "t");
58 
59 	return temp;
60 }
61 
62 static inline int atomic_sub_return(int i, atomic_t *v)
63 {
64 	unsigned long temp;
65 
66 	__asm__ __volatile__ (
67 "1:	movli.l @%2, %0		! atomic_sub_return	\n"
68 "	sub	%1, %0					\n"
69 "	movco.l	%0, @%2					\n"
70 "	bf	1b					\n"
71 "	synco						\n"
72 	: "=&z" (temp)
73 	: "r" (i), "r" (&v->counter)
74 	: "t");
75 
76 	return temp;
77 }
78 
79 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80 {
81 	unsigned long tmp;
82 
83 	__asm__ __volatile__ (
84 "1:	movli.l @%2, %0		! atomic_clear_mask	\n"
85 "	and	%1, %0					\n"
86 "	movco.l	%0, @%2					\n"
87 "	bf	1b					\n"
88 	: "=&z" (tmp)
89 	: "r" (~mask), "r" (&v->counter)
90 	: "t");
91 }
92 
93 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
94 {
95 	unsigned long tmp;
96 
97 	__asm__ __volatile__ (
98 "1:	movli.l @%2, %0		! atomic_set_mask	\n"
99 "	or	%1, %0					\n"
100 "	movco.l	%0, @%2					\n"
101 "	bf	1b					\n"
102 	: "=&z" (tmp)
103 	: "r" (mask), "r" (&v->counter)
104 	: "t");
105 }
106 
107 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
108 
109 /**
110  * atomic_add_unless - add unless the number is a given value
111  * @v: pointer of type atomic_t
112  * @a: the amount to add to v...
113  * @u: ...unless v is equal to u.
114  *
115  * Atomically adds @a to @v, so long as it was not @u.
116  * Returns non-zero if @v was not @u, and zero otherwise.
117  */
118 static inline int atomic_add_unless(atomic_t *v, int a, int u)
119 {
120 	int c, old;
121 	c = atomic_read(v);
122 	for (;;) {
123 		if (unlikely(c == (u)))
124 			break;
125 		old = atomic_cmpxchg((v), c, c + (a));
126 		if (likely(old == c))
127 			break;
128 		c = old;
129 	}
130 
131 	return c != (u);
132 }
133 
134 #endif /* __ASM_SH_ATOMIC_LLSC_H */
135