xref: /openbmc/linux/arch/s390/lib/uaccess.c (revision e9b1adb7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Standard user space access functions based on mvcp/mvcs and doing
4  *  interesting things in the secondary space mode.
5  *
6  *    Copyright IBM Corp. 2006,2014
7  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8  *		 Gerald Schaefer (gerald.schaefer@de.ibm.com)
9  */
10 
11 #include <linux/jump_label.h>
12 #include <linux/uaccess.h>
13 #include <linux/export.h>
14 #include <linux/errno.h>
15 #include <linux/mm.h>
16 #include <asm/mmu_context.h>
17 #include <asm/facility.h>
18 
19 #ifdef CONFIG_DEBUG_ENTRY
20 void debug_user_asce(int exit)
21 {
22 	unsigned long cr1, cr7;
23 
24 	__ctl_store(cr1, 1, 1);
25 	__ctl_store(cr7, 7, 7);
26 	if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
27 		return;
28 	panic("incorrect ASCE on kernel %s\n"
29 	      "cr1:    %016lx cr7:  %016lx\n"
30 	      "kernel: %016llx user: %016llx\n",
31 	      exit ? "exit" : "entry", cr1, cr7,
32 	      S390_lowcore.kernel_asce, S390_lowcore.user_asce);
33 
34 }
35 #endif /*CONFIG_DEBUG_ENTRY */
36 
37 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
38 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
39 
40 static int __init uaccess_init(void)
41 {
42 	if (test_facility(27))
43 		static_branch_enable(&have_mvcos);
44 	return 0;
45 }
46 early_initcall(uaccess_init);
47 
48 static inline int copy_with_mvcos(void)
49 {
50 	if (static_branch_likely(&have_mvcos))
51 		return 1;
52 	return 0;
53 }
54 #else
55 static inline int copy_with_mvcos(void)
56 {
57 	return 1;
58 }
59 #endif
60 
61 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
62 						 unsigned long size)
63 {
64 	unsigned long tmp1, tmp2;
65 
66 	tmp1 = -4096UL;
67 	asm volatile(
68 		"   lghi  0,%[spec]\n"
69 		"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
70 		"6: jz    4f\n"
71 		"1: algr  %0,%3\n"
72 		"   slgr  %1,%3\n"
73 		"   slgr  %2,%3\n"
74 		"   j     0b\n"
75 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
76 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
77 		"   slgr  %4,%1\n"
78 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
79 		"   jnh   5f\n"
80 		"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
81 		"7: slgr  %0,%4\n"
82 		"   j     5f\n"
83 		"4: slgr  %0,%0\n"
84 		"5:\n"
85 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
86 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
87 		: [spec] "K" (0x81UL)
88 		: "cc", "memory", "0");
89 	return size;
90 }
91 
92 static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
93 						unsigned long size)
94 {
95 	unsigned long tmp1, tmp2;
96 
97 	tmp1 = -256UL;
98 	asm volatile(
99 		"   sacf  0\n"
100 		"0: mvcp  0(%0,%2),0(%1),%3\n"
101 		"7: jz    5f\n"
102 		"1: algr  %0,%3\n"
103 		"   la    %1,256(%1)\n"
104 		"   la    %2,256(%2)\n"
105 		"2: mvcp  0(%0,%2),0(%1),%3\n"
106 		"8: jnz   1b\n"
107 		"   j     5f\n"
108 		"3: la    %4,255(%1)\n"	/* %4 = ptr + 255 */
109 		"   lghi  %3,-4096\n"
110 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
111 		"   slgr  %4,%1\n"
112 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
113 		"   jnh   6f\n"
114 		"4: mvcp  0(%4,%2),0(%1),%3\n"
115 		"9: slgr  %0,%4\n"
116 		"   j     6f\n"
117 		"5: slgr  %0,%0\n"
118 		"6: sacf  768\n"
119 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
120 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
121 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
122 		: : "cc", "memory");
123 	return size;
124 }
125 
126 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
127 {
128 	if (copy_with_mvcos())
129 		return copy_from_user_mvcos(to, from, n);
130 	return copy_from_user_mvcp(to, from, n);
131 }
132 EXPORT_SYMBOL(raw_copy_from_user);
133 
134 static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
135 					       unsigned long size)
136 {
137 	unsigned long tmp1, tmp2;
138 
139 	tmp1 = -4096UL;
140 	asm volatile(
141 		"   llilh 0,%[spec]\n"
142 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
143 		"6: jz    4f\n"
144 		"1: algr  %0,%3\n"
145 		"   slgr  %1,%3\n"
146 		"   slgr  %2,%3\n"
147 		"   j     0b\n"
148 		"2: la    %4,4095(%1)\n"/* %4 = ptr + 4095 */
149 		"   nr    %4,%3\n"	/* %4 = (ptr + 4095) & -4096 */
150 		"   slgr  %4,%1\n"
151 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
152 		"   jnh   5f\n"
153 		"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
154 		"7: slgr  %0,%4\n"
155 		"   j     5f\n"
156 		"4: slgr  %0,%0\n"
157 		"5:\n"
158 		EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
159 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
160 		: [spec] "K" (0x81UL)
161 		: "cc", "memory", "0");
162 	return size;
163 }
164 
165 static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
166 					      unsigned long size)
167 {
168 	unsigned long tmp1, tmp2;
169 
170 	tmp1 = -256UL;
171 	asm volatile(
172 		"   sacf  0\n"
173 		"0: mvcs  0(%0,%1),0(%2),%3\n"
174 		"7: jz    5f\n"
175 		"1: algr  %0,%3\n"
176 		"   la    %1,256(%1)\n"
177 		"   la    %2,256(%2)\n"
178 		"2: mvcs  0(%0,%1),0(%2),%3\n"
179 		"8: jnz   1b\n"
180 		"   j     5f\n"
181 		"3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
182 		"   lghi  %3,-4096\n"
183 		"   nr    %4,%3\n"	/* %4 = (ptr + 255) & -4096 */
184 		"   slgr  %4,%1\n"
185 		"   clgr  %0,%4\n"	/* copy crosses next page boundary? */
186 		"   jnh   6f\n"
187 		"4: mvcs  0(%4,%1),0(%2),%3\n"
188 		"9: slgr  %0,%4\n"
189 		"   j     6f\n"
190 		"5: slgr  %0,%0\n"
191 		"6: sacf  768\n"
192 		EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
193 		EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
194 		: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
195 		: : "cc", "memory");
196 	return size;
197 }
198 
199 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
200 {
201 	if (copy_with_mvcos())
202 		return copy_to_user_mvcos(to, from, n);
203 	return copy_to_user_mvcs(to, from, n);
204 }
205 EXPORT_SYMBOL(raw_copy_to_user);
206 
207 static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
208 					       unsigned long size)
209 {
210 	unsigned long tmp1, tmp2;
211 
212 	tmp1 = -4096UL;
213 	/* FIXME: copy with reduced length. */
214 	asm volatile(
215 		"   lgr	  0,%[spec]\n"
216 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
217 		"   jz	  2f\n"
218 		"1: algr  %0,%3\n"
219 		"   slgr  %1,%3\n"
220 		"   slgr  %2,%3\n"
221 		"   j	  0b\n"
222 		"2:slgr  %0,%0\n"
223 		"3: \n"
224 		EX_TABLE(0b,3b)
225 		: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
226 		: [spec] "d" (0x810081UL)
227 		: "cc", "memory", "0");
228 	return size;
229 }
230 
231 static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
232 					     unsigned long size)
233 {
234 	unsigned long tmp1;
235 
236 	asm volatile(
237 		"   sacf  256\n"
238 		"   aghi  %0,-1\n"
239 		"   jo	  5f\n"
240 		"   bras  %3,3f\n"
241 		"0: aghi  %0,257\n"
242 		"1: mvc	  0(1,%1),0(%2)\n"
243 		"   la	  %1,1(%1)\n"
244 		"   la	  %2,1(%2)\n"
245 		"   aghi  %0,-1\n"
246 		"   jnz	  1b\n"
247 		"   j	  5f\n"
248 		"2: mvc	  0(256,%1),0(%2)\n"
249 		"   la	  %1,256(%1)\n"
250 		"   la	  %2,256(%2)\n"
251 		"3: aghi  %0,-256\n"
252 		"   jnm	  2b\n"
253 		"4: ex	  %0,1b-0b(%3)\n"
254 		"5: slgr  %0,%0\n"
255 		"6: sacf  768\n"
256 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
257 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
258 		: : "cc", "memory");
259 	return size;
260 }
261 
262 unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
263 {
264 	if (copy_with_mvcos())
265 		return copy_in_user_mvcos(to, from, n);
266 	return copy_in_user_mvc(to, from, n);
267 }
268 EXPORT_SYMBOL(raw_copy_in_user);
269 
270 static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
271 {
272 	unsigned long tmp1, tmp2;
273 
274 	tmp1 = -4096UL;
275 	asm volatile(
276 		"   llilh 0,%[spec]\n"
277 		"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
278 		"   jz	  4f\n"
279 		"1: algr  %0,%2\n"
280 		"   slgr  %1,%2\n"
281 		"   j	  0b\n"
282 		"2: la	  %3,4095(%1)\n"/* %4 = to + 4095 */
283 		"   nr	  %3,%2\n"	/* %4 = (to + 4095) & -4096 */
284 		"   slgr  %3,%1\n"
285 		"   clgr  %0,%3\n"	/* copy crosses next page boundary? */
286 		"   jnh	  5f\n"
287 		"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
288 		"   slgr  %0,%3\n"
289 		"   j	  5f\n"
290 		"4: slgr  %0,%0\n"
291 		"5:\n"
292 		EX_TABLE(0b,2b) EX_TABLE(3b,5b)
293 		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
294 		: "a" (empty_zero_page), [spec] "K" (0x81UL)
295 		: "cc", "memory", "0");
296 	return size;
297 }
298 
299 static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
300 {
301 	unsigned long tmp1, tmp2;
302 
303 	asm volatile(
304 		"   sacf  256\n"
305 		"   aghi  %0,-1\n"
306 		"   jo    5f\n"
307 		"   bras  %3,3f\n"
308 		"   xc    0(1,%1),0(%1)\n"
309 		"0: aghi  %0,257\n"
310 		"   la    %2,255(%1)\n" /* %2 = ptr + 255 */
311 		"   srl   %2,12\n"
312 		"   sll   %2,12\n"	/* %2 = (ptr + 255) & -4096 */
313 		"   slgr  %2,%1\n"
314 		"   clgr  %0,%2\n"	/* clear crosses next page boundary? */
315 		"   jnh   5f\n"
316 		"   aghi  %2,-1\n"
317 		"1: ex    %2,0(%3)\n"
318 		"   aghi  %2,1\n"
319 		"   slgr  %0,%2\n"
320 		"   j     5f\n"
321 		"2: xc    0(256,%1),0(%1)\n"
322 		"   la    %1,256(%1)\n"
323 		"3: aghi  %0,-256\n"
324 		"   jnm   2b\n"
325 		"4: ex    %0,0(%3)\n"
326 		"5: slgr  %0,%0\n"
327 		"6: sacf  768\n"
328 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
329 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
330 		: : "cc", "memory");
331 	return size;
332 }
333 
334 unsigned long __clear_user(void __user *to, unsigned long size)
335 {
336 	if (copy_with_mvcos())
337 			return clear_user_mvcos(to, size);
338 	return clear_user_xc(to, size);
339 }
340 EXPORT_SYMBOL(__clear_user);
341 
342 static inline unsigned long strnlen_user_srst(const char __user *src,
343 					      unsigned long size)
344 {
345 	unsigned long tmp1, tmp2;
346 
347 	asm volatile(
348 		"   lghi  0,0\n"
349 		"   la    %2,0(%1)\n"
350 		"   la    %3,0(%0,%1)\n"
351 		"   slgr  %0,%0\n"
352 		"   sacf  256\n"
353 		"0: srst  %3,%2\n"
354 		"   jo    0b\n"
355 		"   la    %0,1(%3)\n"	/* strnlen_user results includes \0 */
356 		"   slgr  %0,%1\n"
357 		"1: sacf  768\n"
358 		EX_TABLE(0b,1b)
359 		: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
360 		:
361 		: "cc", "memory", "0");
362 	return size;
363 }
364 
365 unsigned long __strnlen_user(const char __user *src, unsigned long size)
366 {
367 	if (unlikely(!size))
368 		return 0;
369 	return strnlen_user_srst(src, size);
370 }
371 EXPORT_SYMBOL(__strnlen_user);
372 
373 long __strncpy_from_user(char *dst, const char __user *src, long size)
374 {
375 	size_t done, len, offset, len_str;
376 
377 	if (unlikely(size <= 0))
378 		return 0;
379 	done = 0;
380 	do {
381 		offset = (size_t)src & (L1_CACHE_BYTES - 1);
382 		len = min(size - done, L1_CACHE_BYTES - offset);
383 		if (copy_from_user(dst, src, len))
384 			return -EFAULT;
385 		len_str = strnlen(dst, len);
386 		done += len_str;
387 		src += len_str;
388 		dst += len_str;
389 	} while ((len_str == len) && (done < size));
390 	return done;
391 }
392 EXPORT_SYMBOL(__strncpy_from_user);
393