xref: /openbmc/linux/arch/arm/include/asm/futex.h (revision f21e49be)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARM_FUTEX_H
3 #define _ASM_ARM_FUTEX_H
4 
5 #ifdef __KERNEL__
6 
7 #include <linux/futex.h>
8 #include <linux/uaccess.h>
9 #include <asm/errno.h>
10 
11 #define __futex_atomic_ex_table(err_reg)			\
12 	"3:\n"							\
13 	"	.pushsection __ex_table,\"a\"\n"		\
14 	"	.align	3\n"					\
15 	"	.long	1b, 4f, 2b, 4f\n"			\
16 	"	.popsection\n"					\
17 	"	.pushsection .text.fixup,\"ax\"\n"		\
18 	"	.align	2\n"					\
19 	"4:	mov	%0, " err_reg "\n"			\
20 	"	b	3b\n"					\
21 	"	.popsection"
22 
23 #ifdef CONFIG_SMP
24 
25 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
26 ({								\
27 	unsigned int __ua_flags;				\
28 	smp_mb();						\
29 	prefetchw(uaddr);					\
30 	__ua_flags = uaccess_save_and_enable();			\
31 	__asm__ __volatile__(					\
32 	"1:	ldrex	%1, [%3]\n"				\
33 	"	" insn "\n"					\
34 	"2:	strex	%2, %0, [%3]\n"				\
35 	"	teq	%2, #0\n"				\
36 	"	bne	1b\n"					\
37 	"	mov	%0, #0\n"				\
38 	__futex_atomic_ex_table("%5")				\
39 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
40 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
41 	: "cc", "memory");					\
42 	uaccess_restore(__ua_flags);				\
43 })
44 
45 static inline int
46 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
47 			      u32 oldval, u32 newval)
48 {
49 	unsigned int __ua_flags;
50 	int ret;
51 	u32 val;
52 
53 	if (!access_ok(uaddr, sizeof(u32)))
54 		return -EFAULT;
55 
56 	smp_mb();
57 	/* Prefetching cannot fault */
58 	prefetchw(uaddr);
59 	__ua_flags = uaccess_save_and_enable();
60 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
61 	"1:	ldrex	%1, [%4]\n"
62 	"	teq	%1, %2\n"
63 	"	ite	eq	@ explicit IT needed for the 2b label\n"
64 	"2:	strexeq	%0, %3, [%4]\n"
65 	"	movne	%0, #0\n"
66 	"	teq	%0, #0\n"
67 	"	bne	1b\n"
68 	__futex_atomic_ex_table("%5")
69 	: "=&r" (ret), "=&r" (val)
70 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
71 	: "cc", "memory");
72 	uaccess_restore(__ua_flags);
73 	smp_mb();
74 
75 	*uval = val;
76 	return ret;
77 }
78 
79 #else /* !SMP, we can work around lack of atomic ops by disabling preemption */
80 
81 #include <linux/preempt.h>
82 #include <asm/domain.h>
83 
84 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
85 ({								\
86 	unsigned int __ua_flags = uaccess_save_and_enable();	\
87 	__asm__ __volatile__(					\
88 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
89 	"	" insn "\n"					\
90 	"2:	" TUSER(str) "	%0, [%3]\n"			\
91 	"	mov	%0, #0\n"				\
92 	__futex_atomic_ex_table("%5")				\
93 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
94 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
95 	: "cc", "memory");					\
96 	uaccess_restore(__ua_flags);				\
97 })
98 
99 static inline int
100 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
101 			      u32 oldval, u32 newval)
102 {
103 	unsigned int __ua_flags;
104 	int ret = 0;
105 	u32 val;
106 
107 	if (!access_ok(uaddr, sizeof(u32)))
108 		return -EFAULT;
109 
110 	preempt_disable();
111 	__ua_flags = uaccess_save_and_enable();
112 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
113 	"	.syntax unified\n"
114 	"1:	" TUSER(ldr) "	%1, [%4]\n"
115 	"	teq	%1, %2\n"
116 	"	it	eq	@ explicit IT needed for the 2b label\n"
117 	"2:	" TUSERCOND(str, eq) "	%3, [%4]\n"
118 	__futex_atomic_ex_table("%5")
119 	: "+r" (ret), "=&r" (val)
120 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
121 	: "cc", "memory");
122 	uaccess_restore(__ua_flags);
123 
124 	*uval = val;
125 	preempt_enable();
126 
127 	return ret;
128 }
129 
130 #endif /* !SMP */
131 
132 static inline int
133 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
134 {
135 	int oldval = 0, ret, tmp;
136 
137 	if (!access_ok(uaddr, sizeof(u32)))
138 		return -EFAULT;
139 
140 #ifndef CONFIG_SMP
141 	preempt_disable();
142 #endif
143 
144 	switch (op) {
145 	case FUTEX_OP_SET:
146 		__futex_atomic_op("mov	%0, %4", ret, oldval, tmp, uaddr, oparg);
147 		break;
148 	case FUTEX_OP_ADD:
149 		__futex_atomic_op("add	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);
150 		break;
151 	case FUTEX_OP_OR:
152 		__futex_atomic_op("orr	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);
153 		break;
154 	case FUTEX_OP_ANDN:
155 		__futex_atomic_op("and	%0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);
156 		break;
157 	case FUTEX_OP_XOR:
158 		__futex_atomic_op("eor	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);
159 		break;
160 	default:
161 		ret = -ENOSYS;
162 	}
163 
164 #ifndef CONFIG_SMP
165 	preempt_enable();
166 #endif
167 
168 	/*
169 	 * Store unconditionally. If ret != 0 the extra store is the least
170 	 * of the worries but GCC cannot figure out that __futex_atomic_op()
171 	 * is either setting ret to -EFAULT or storing the old value in
172 	 * oldval which results in a uninitialized warning at the call site.
173 	 */
174 	*oval = oldval;
175 
176 	return ret;
177 }
178 
179 #endif /* __KERNEL__ */
180 #endif /* _ASM_ARM_FUTEX_H */
181