xref: /openbmc/linux/arch/sh/include/asm/atomic-llsc.h (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 *f15cbe6fSPaul Mundt #ifndef __ASM_SH_ATOMIC_LLSC_H
2 *f15cbe6fSPaul Mundt #define __ASM_SH_ATOMIC_LLSC_H
3 *f15cbe6fSPaul Mundt 
4 *f15cbe6fSPaul Mundt /*
5 *f15cbe6fSPaul Mundt  * To get proper branch prediction for the main line, we must branch
6 *f15cbe6fSPaul Mundt  * forward to code at the end of this object's .text section, then
7 *f15cbe6fSPaul Mundt  * branch back to restart the operation.
8 *f15cbe6fSPaul Mundt  */
9 *f15cbe6fSPaul Mundt static inline void atomic_add(int i, atomic_t *v)
10 *f15cbe6fSPaul Mundt {
11 *f15cbe6fSPaul Mundt 	unsigned long tmp;
12 *f15cbe6fSPaul Mundt 
13 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
14 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_add	\n"
15 *f15cbe6fSPaul Mundt "	add	%1, %0				\n"
16 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2				\n"
17 *f15cbe6fSPaul Mundt "	bf	1b				\n"
18 *f15cbe6fSPaul Mundt 	: "=&z" (tmp)
19 *f15cbe6fSPaul Mundt 	: "r" (i), "r" (&v->counter)
20 *f15cbe6fSPaul Mundt 	: "t");
21 *f15cbe6fSPaul Mundt }
22 *f15cbe6fSPaul Mundt 
23 *f15cbe6fSPaul Mundt static inline void atomic_sub(int i, atomic_t *v)
24 *f15cbe6fSPaul Mundt {
25 *f15cbe6fSPaul Mundt 	unsigned long tmp;
26 *f15cbe6fSPaul Mundt 
27 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
28 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_sub	\n"
29 *f15cbe6fSPaul Mundt "	sub	%1, %0				\n"
30 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2				\n"
31 *f15cbe6fSPaul Mundt "	bf	1b				\n"
32 *f15cbe6fSPaul Mundt 	: "=&z" (tmp)
33 *f15cbe6fSPaul Mundt 	: "r" (i), "r" (&v->counter)
34 *f15cbe6fSPaul Mundt 	: "t");
35 *f15cbe6fSPaul Mundt }
36 *f15cbe6fSPaul Mundt 
37 *f15cbe6fSPaul Mundt /*
38 *f15cbe6fSPaul Mundt  * SH-4A note:
39 *f15cbe6fSPaul Mundt  *
40 *f15cbe6fSPaul Mundt  * We basically get atomic_xxx_return() for free compared with
41 *f15cbe6fSPaul Mundt  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42 *f15cbe6fSPaul Mundt  * encoding, so the retval is automatically set without having to
43 *f15cbe6fSPaul Mundt  * do any special work.
44 *f15cbe6fSPaul Mundt  */
45 *f15cbe6fSPaul Mundt static inline int atomic_add_return(int i, atomic_t *v)
46 *f15cbe6fSPaul Mundt {
47 *f15cbe6fSPaul Mundt 	unsigned long temp;
48 *f15cbe6fSPaul Mundt 
49 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
50 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_add_return	\n"
51 *f15cbe6fSPaul Mundt "	add	%1, %0					\n"
52 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2					\n"
53 *f15cbe6fSPaul Mundt "	bf	1b					\n"
54 *f15cbe6fSPaul Mundt "	synco						\n"
55 *f15cbe6fSPaul Mundt 	: "=&z" (temp)
56 *f15cbe6fSPaul Mundt 	: "r" (i), "r" (&v->counter)
57 *f15cbe6fSPaul Mundt 	: "t");
58 *f15cbe6fSPaul Mundt 
59 *f15cbe6fSPaul Mundt 	return temp;
60 *f15cbe6fSPaul Mundt }
61 *f15cbe6fSPaul Mundt 
62 *f15cbe6fSPaul Mundt static inline int atomic_sub_return(int i, atomic_t *v)
63 *f15cbe6fSPaul Mundt {
64 *f15cbe6fSPaul Mundt 	unsigned long temp;
65 *f15cbe6fSPaul Mundt 
66 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
67 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_sub_return	\n"
68 *f15cbe6fSPaul Mundt "	sub	%1, %0					\n"
69 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2					\n"
70 *f15cbe6fSPaul Mundt "	bf	1b					\n"
71 *f15cbe6fSPaul Mundt "	synco						\n"
72 *f15cbe6fSPaul Mundt 	: "=&z" (temp)
73 *f15cbe6fSPaul Mundt 	: "r" (i), "r" (&v->counter)
74 *f15cbe6fSPaul Mundt 	: "t");
75 *f15cbe6fSPaul Mundt 
76 *f15cbe6fSPaul Mundt 	return temp;
77 *f15cbe6fSPaul Mundt }
78 *f15cbe6fSPaul Mundt 
79 *f15cbe6fSPaul Mundt static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80 *f15cbe6fSPaul Mundt {
81 *f15cbe6fSPaul Mundt 	unsigned long tmp;
82 *f15cbe6fSPaul Mundt 
83 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
84 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_clear_mask	\n"
85 *f15cbe6fSPaul Mundt "	and	%1, %0					\n"
86 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2					\n"
87 *f15cbe6fSPaul Mundt "	bf	1b					\n"
88 *f15cbe6fSPaul Mundt 	: "=&z" (tmp)
89 *f15cbe6fSPaul Mundt 	: "r" (~mask), "r" (&v->counter)
90 *f15cbe6fSPaul Mundt 	: "t");
91 *f15cbe6fSPaul Mundt }
92 *f15cbe6fSPaul Mundt 
93 *f15cbe6fSPaul Mundt static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
94 *f15cbe6fSPaul Mundt {
95 *f15cbe6fSPaul Mundt 	unsigned long tmp;
96 *f15cbe6fSPaul Mundt 
97 *f15cbe6fSPaul Mundt 	__asm__ __volatile__ (
98 *f15cbe6fSPaul Mundt "1:	movli.l @%2, %0		! atomic_set_mask	\n"
99 *f15cbe6fSPaul Mundt "	or	%1, %0					\n"
100 *f15cbe6fSPaul Mundt "	movco.l	%0, @%2					\n"
101 *f15cbe6fSPaul Mundt "	bf	1b					\n"
102 *f15cbe6fSPaul Mundt 	: "=&z" (tmp)
103 *f15cbe6fSPaul Mundt 	: "r" (mask), "r" (&v->counter)
104 *f15cbe6fSPaul Mundt 	: "t");
105 *f15cbe6fSPaul Mundt }
106 *f15cbe6fSPaul Mundt 
107 *f15cbe6fSPaul Mundt #endif /* __ASM_SH_ATOMIC_LLSC_H */
108