xref: /openbmc/linux/arch/loongarch/kernel/signal.c (revision 8957261c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  *
7  * Derived from MIPS:
8  * Copyright (C) 1991, 1992  Linus Torvalds
9  * Copyright (C) 1994 - 2000  Ralf Baechle
10  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11  * Copyright (C) 2014, Imagination Technologies Ltd.
12  */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/irqflags.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/personality.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/signal.h>
23 #include <linux/errno.h>
24 #include <linux/wait.h>
25 #include <linux/ptrace.h>
26 #include <linux/unistd.h>
27 #include <linux/compiler.h>
28 #include <linux/syscalls.h>
29 #include <linux/uaccess.h>
30 
31 #include <asm/asm.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpu-features.h>
34 #include <asm/fpu.h>
35 #include <asm/lbt.h>
36 #include <asm/ucontext.h>
37 #include <asm/vdso.h>
38 
39 #ifdef DEBUG_SIG
40 #  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
41 #else
42 #  define DEBUGP(fmt, args...)
43 #endif
44 
45 /* Make sure we will not lose FPU ownership */
46 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
47 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
48 /* Make sure we will not lose LBT ownership */
49 #define lock_lbt_owner()	({ preempt_disable(); pagefault_disable(); })
50 #define unlock_lbt_owner()	({ pagefault_enable(); preempt_enable(); })
51 
52 /* Assembly functions to move context to/from the FPU */
53 extern asmlinkage int
54 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
55 extern asmlinkage int
56 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
57 extern asmlinkage int
58 _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
59 extern asmlinkage int
60 _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
61 extern asmlinkage int
62 _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
63 extern asmlinkage int
64 _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
65 
66 #ifdef CONFIG_CPU_HAS_LBT
67 extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
68 extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
69 extern asmlinkage int _save_ftop_context(void __user *ftop);
70 extern asmlinkage int _restore_ftop_context(void __user *ftop);
71 #endif
72 
73 struct rt_sigframe {
74 	struct siginfo rs_info;
75 	struct ucontext rs_uctx;
76 };
77 
78 struct _ctx_layout {
79 	struct sctx_info *addr;
80 	unsigned int size;
81 };
82 
83 struct extctx_layout {
84 	unsigned long size;
85 	unsigned int flags;
86 	struct _ctx_layout fpu;
87 	struct _ctx_layout lsx;
88 	struct _ctx_layout lasx;
89 	struct _ctx_layout lbt;
90 	struct _ctx_layout end;
91 };
92 
93 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
94 {
95 	return (void __user *)((char *)info + sizeof(struct sctx_info));
96 }
97 
98 /*
99  * Thread saved context copy to/from a signal context presumed to be on the
100  * user stack, and therefore accessed with appropriate macros from uaccess.h.
101  */
102 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
103 {
104 	int i;
105 	int err = 0;
106 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
107 	uint64_t __user *fcc	= &ctx->fcc;
108 	uint32_t __user *fcsr	= &ctx->fcsr;
109 
110 	for (i = 0; i < NUM_FPU_REGS; i++) {
111 		err |=
112 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
113 			       &regs[i]);
114 	}
115 	err |= __put_user(current->thread.fpu.fcc, fcc);
116 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
117 
118 	return err;
119 }
120 
121 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
122 {
123 	int i;
124 	int err = 0;
125 	u64 fpr_val;
126 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
127 	uint64_t __user *fcc	= &ctx->fcc;
128 	uint32_t __user *fcsr	= &ctx->fcsr;
129 
130 	for (i = 0; i < NUM_FPU_REGS; i++) {
131 		err |= __get_user(fpr_val, &regs[i]);
132 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
133 	}
134 	err |= __get_user(current->thread.fpu.fcc, fcc);
135 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
136 
137 	return err;
138 }
139 
140 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
141 {
142 	int i;
143 	int err = 0;
144 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
145 	uint64_t __user *fcc	= &ctx->fcc;
146 	uint32_t __user *fcsr	= &ctx->fcsr;
147 
148 	for (i = 0; i < NUM_FPU_REGS; i++) {
149 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
150 				  &regs[2*i]);
151 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
152 				  &regs[2*i+1]);
153 	}
154 	err |= __put_user(current->thread.fpu.fcc, fcc);
155 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
156 
157 	return err;
158 }
159 
160 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
161 {
162 	int i;
163 	int err = 0;
164 	u64 fpr_val;
165 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
166 	uint64_t __user *fcc	= &ctx->fcc;
167 	uint32_t __user *fcsr	= &ctx->fcsr;
168 
169 	for (i = 0; i < NUM_FPU_REGS; i++) {
170 		err |= __get_user(fpr_val, &regs[2*i]);
171 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
172 		err |= __get_user(fpr_val, &regs[2*i+1]);
173 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
174 	}
175 	err |= __get_user(current->thread.fpu.fcc, fcc);
176 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
177 
178 	return err;
179 }
180 
181 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
182 {
183 	int i;
184 	int err = 0;
185 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
186 	uint64_t __user *fcc	= &ctx->fcc;
187 	uint32_t __user *fcsr	= &ctx->fcsr;
188 
189 	for (i = 0; i < NUM_FPU_REGS; i++) {
190 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
191 				  &regs[4*i]);
192 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
193 				  &regs[4*i+1]);
194 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
195 				  &regs[4*i+2]);
196 		err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
197 				  &regs[4*i+3]);
198 	}
199 	err |= __put_user(current->thread.fpu.fcc, fcc);
200 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
201 
202 	return err;
203 }
204 
205 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
206 {
207 	int i;
208 	int err = 0;
209 	u64 fpr_val;
210 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
211 	uint64_t __user *fcc	= &ctx->fcc;
212 	uint32_t __user *fcsr	= &ctx->fcsr;
213 
214 	for (i = 0; i < NUM_FPU_REGS; i++) {
215 		err |= __get_user(fpr_val, &regs[4*i]);
216 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
217 		err |= __get_user(fpr_val, &regs[4*i+1]);
218 		set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
219 		err |= __get_user(fpr_val, &regs[4*i+2]);
220 		set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
221 		err |= __get_user(fpr_val, &regs[4*i+3]);
222 		set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
223 	}
224 	err |= __get_user(current->thread.fpu.fcc, fcc);
225 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
226 
227 	return err;
228 }
229 
230 #ifdef CONFIG_CPU_HAS_LBT
231 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
232 {
233 	int err = 0;
234 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
235 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
236 
237 	err |= __put_user(current->thread.lbt.scr0, &regs[0]);
238 	err |= __put_user(current->thread.lbt.scr1, &regs[1]);
239 	err |= __put_user(current->thread.lbt.scr2, &regs[2]);
240 	err |= __put_user(current->thread.lbt.scr3, &regs[3]);
241 	err |= __put_user(current->thread.lbt.eflags, eflags);
242 
243 	return err;
244 }
245 
246 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
247 {
248 	int err = 0;
249 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
250 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
251 
252 	err |= __get_user(current->thread.lbt.scr0, &regs[0]);
253 	err |= __get_user(current->thread.lbt.scr1, &regs[1]);
254 	err |= __get_user(current->thread.lbt.scr2, &regs[2]);
255 	err |= __get_user(current->thread.lbt.scr3, &regs[3]);
256 	err |= __get_user(current->thread.lbt.eflags, eflags);
257 
258 	return err;
259 }
260 
261 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
262 {
263 	uint32_t  __user *ftop	= &ctx->ftop;
264 
265 	return __put_user(current->thread.fpu.ftop, ftop);
266 }
267 
268 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
269 {
270 	uint32_t  __user *ftop	= &ctx->ftop;
271 
272 	return __get_user(current->thread.fpu.ftop, ftop);
273 }
274 #endif
275 
276 /*
277  * Wrappers for the assembly _{save,restore}_fp_context functions.
278  */
279 static int save_hw_fpu_context(struct fpu_context __user *ctx)
280 {
281 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
282 	uint64_t __user *fcc	= &ctx->fcc;
283 	uint32_t __user *fcsr	= &ctx->fcsr;
284 
285 	return _save_fp_context(regs, fcc, fcsr);
286 }
287 
288 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
289 {
290 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
291 	uint64_t __user *fcc	= &ctx->fcc;
292 	uint32_t __user *fcsr	= &ctx->fcsr;
293 
294 	return _restore_fp_context(regs, fcc, fcsr);
295 }
296 
297 static int save_hw_lsx_context(struct lsx_context __user *ctx)
298 {
299 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
300 	uint64_t __user *fcc	= &ctx->fcc;
301 	uint32_t __user *fcsr	= &ctx->fcsr;
302 
303 	return _save_lsx_context(regs, fcc, fcsr);
304 }
305 
306 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
307 {
308 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
309 	uint64_t __user *fcc	= &ctx->fcc;
310 	uint32_t __user *fcsr	= &ctx->fcsr;
311 
312 	return _restore_lsx_context(regs, fcc, fcsr);
313 }
314 
315 static int save_hw_lasx_context(struct lasx_context __user *ctx)
316 {
317 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
318 	uint64_t __user *fcc	= &ctx->fcc;
319 	uint32_t __user *fcsr	= &ctx->fcsr;
320 
321 	return _save_lasx_context(regs, fcc, fcsr);
322 }
323 
324 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
325 {
326 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
327 	uint64_t __user *fcc	= &ctx->fcc;
328 	uint32_t __user *fcsr	= &ctx->fcsr;
329 
330 	return _restore_lasx_context(regs, fcc, fcsr);
331 }
332 
333 /*
334  * Wrappers for the assembly _{save,restore}_lbt_context functions.
335  */
336 #ifdef CONFIG_CPU_HAS_LBT
337 static int save_hw_lbt_context(struct lbt_context __user *ctx)
338 {
339 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
340 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
341 
342 	return _save_lbt_context(regs, eflags);
343 }
344 
345 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
346 {
347 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
348 	uint32_t __user *eflags	= (uint32_t *)&ctx->eflags;
349 
350 	return _restore_lbt_context(regs, eflags);
351 }
352 
353 static int save_hw_ftop_context(struct lbt_context __user *ctx)
354 {
355 	uint32_t __user *ftop	= &ctx->ftop;
356 
357 	return _save_ftop_context(ftop);
358 }
359 
360 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
361 {
362 	uint32_t __user *ftop	= &ctx->ftop;
363 
364 	return _restore_ftop_context(ftop);
365 }
366 #endif
367 
368 static int fcsr_pending(unsigned int __user *fcsr)
369 {
370 	int err, sig = 0;
371 	unsigned int csr, enabled;
372 
373 	err = __get_user(csr, fcsr);
374 	enabled = ((csr & FPU_CSR_ALL_E) << 24);
375 	/*
376 	 * If the signal handler set some FPU exceptions, clear it and
377 	 * send SIGFPE.
378 	 */
379 	if (csr & enabled) {
380 		csr &= ~enabled;
381 		err |= __put_user(csr, fcsr);
382 		sig = SIGFPE;
383 	}
384 	return err ?: sig;
385 }
386 
387 /*
388  * Helper routines
389  */
390 static int protected_save_fpu_context(struct extctx_layout *extctx)
391 {
392 	int err = 0;
393 	struct sctx_info __user *info = extctx->fpu.addr;
394 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
395 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
396 	uint64_t __user *fcc	= &fpu_ctx->fcc;
397 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
398 
399 	while (1) {
400 		lock_fpu_owner();
401 		if (is_fpu_owner())
402 			err = save_hw_fpu_context(fpu_ctx);
403 		else
404 			err = copy_fpu_to_sigcontext(fpu_ctx);
405 		unlock_fpu_owner();
406 
407 		err |= __put_user(FPU_CTX_MAGIC, &info->magic);
408 		err |= __put_user(extctx->fpu.size, &info->size);
409 
410 		if (likely(!err))
411 			break;
412 		/* Touch the FPU context and try again */
413 		err = __put_user(0, &regs[0]) |
414 			__put_user(0, &regs[31]) |
415 			__put_user(0, fcc) |
416 			__put_user(0, fcsr);
417 		if (err)
418 			return err;	/* really bad sigcontext */
419 	}
420 
421 	return err;
422 }
423 
424 static int protected_restore_fpu_context(struct extctx_layout *extctx)
425 {
426 	int err = 0, sig = 0, tmp __maybe_unused;
427 	struct sctx_info __user *info = extctx->fpu.addr;
428 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
429 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
430 	uint64_t __user *fcc	= &fpu_ctx->fcc;
431 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
432 
433 	err = sig = fcsr_pending(fcsr);
434 	if (err < 0)
435 		return err;
436 
437 	while (1) {
438 		lock_fpu_owner();
439 		if (is_fpu_owner())
440 			err = restore_hw_fpu_context(fpu_ctx);
441 		else
442 			err = copy_fpu_from_sigcontext(fpu_ctx);
443 		unlock_fpu_owner();
444 
445 		if (likely(!err))
446 			break;
447 		/* Touch the FPU context and try again */
448 		err = __get_user(tmp, &regs[0]) |
449 			__get_user(tmp, &regs[31]) |
450 			__get_user(tmp, fcc) |
451 			__get_user(tmp, fcsr);
452 		if (err)
453 			break;	/* really bad sigcontext */
454 	}
455 
456 	return err ?: sig;
457 }
458 
459 static int protected_save_lsx_context(struct extctx_layout *extctx)
460 {
461 	int err = 0;
462 	struct sctx_info __user *info = extctx->lsx.addr;
463 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
464 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
465 	uint64_t __user *fcc	= &lsx_ctx->fcc;
466 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
467 
468 	while (1) {
469 		lock_fpu_owner();
470 		if (is_lsx_enabled())
471 			err = save_hw_lsx_context(lsx_ctx);
472 		else {
473 			if (is_fpu_owner())
474 				save_fp(current);
475 			err = copy_lsx_to_sigcontext(lsx_ctx);
476 		}
477 		unlock_fpu_owner();
478 
479 		err |= __put_user(LSX_CTX_MAGIC, &info->magic);
480 		err |= __put_user(extctx->lsx.size, &info->size);
481 
482 		if (likely(!err))
483 			break;
484 		/* Touch the LSX context and try again */
485 		err = __put_user(0, &regs[0]) |
486 			__put_user(0, &regs[32*2-1]) |
487 			__put_user(0, fcc) |
488 			__put_user(0, fcsr);
489 		if (err)
490 			return err;	/* really bad sigcontext */
491 	}
492 
493 	return err;
494 }
495 
496 static int protected_restore_lsx_context(struct extctx_layout *extctx)
497 {
498 	int err = 0, sig = 0, tmp __maybe_unused;
499 	struct sctx_info __user *info = extctx->lsx.addr;
500 	struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
501 	uint64_t __user *regs	= (uint64_t *)&lsx_ctx->regs;
502 	uint64_t __user *fcc	= &lsx_ctx->fcc;
503 	uint32_t __user *fcsr	= &lsx_ctx->fcsr;
504 
505 	err = sig = fcsr_pending(fcsr);
506 	if (err < 0)
507 		return err;
508 
509 	while (1) {
510 		lock_fpu_owner();
511 		if (is_lsx_enabled())
512 			err = restore_hw_lsx_context(lsx_ctx);
513 		else {
514 			err = copy_lsx_from_sigcontext(lsx_ctx);
515 			if (is_fpu_owner())
516 				restore_fp(current);
517 		}
518 		unlock_fpu_owner();
519 
520 		if (likely(!err))
521 			break;
522 		/* Touch the LSX context and try again */
523 		err = __get_user(tmp, &regs[0]) |
524 			__get_user(tmp, &regs[32*2-1]) |
525 			__get_user(tmp, fcc) |
526 			__get_user(tmp, fcsr);
527 		if (err)
528 			break;	/* really bad sigcontext */
529 	}
530 
531 	return err ?: sig;
532 }
533 
534 static int protected_save_lasx_context(struct extctx_layout *extctx)
535 {
536 	int err = 0;
537 	struct sctx_info __user *info = extctx->lasx.addr;
538 	struct lasx_context __user *lasx_ctx =
539 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
540 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
541 	uint64_t __user *fcc	= &lasx_ctx->fcc;
542 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
543 
544 	while (1) {
545 		lock_fpu_owner();
546 		if (is_lasx_enabled())
547 			err = save_hw_lasx_context(lasx_ctx);
548 		else {
549 			if (is_lsx_enabled())
550 				save_lsx(current);
551 			else if (is_fpu_owner())
552 				save_fp(current);
553 			err = copy_lasx_to_sigcontext(lasx_ctx);
554 		}
555 		unlock_fpu_owner();
556 
557 		err |= __put_user(LASX_CTX_MAGIC, &info->magic);
558 		err |= __put_user(extctx->lasx.size, &info->size);
559 
560 		if (likely(!err))
561 			break;
562 		/* Touch the LASX context and try again */
563 		err = __put_user(0, &regs[0]) |
564 			__put_user(0, &regs[32*4-1]) |
565 			__put_user(0, fcc) |
566 			__put_user(0, fcsr);
567 		if (err)
568 			return err;	/* really bad sigcontext */
569 	}
570 
571 	return err;
572 }
573 
574 static int protected_restore_lasx_context(struct extctx_layout *extctx)
575 {
576 	int err = 0, sig = 0, tmp __maybe_unused;
577 	struct sctx_info __user *info = extctx->lasx.addr;
578 	struct lasx_context __user *lasx_ctx =
579 		(struct lasx_context *)get_ctx_through_ctxinfo(info);
580 	uint64_t __user *regs	= (uint64_t *)&lasx_ctx->regs;
581 	uint64_t __user *fcc	= &lasx_ctx->fcc;
582 	uint32_t __user *fcsr	= &lasx_ctx->fcsr;
583 
584 	err = sig = fcsr_pending(fcsr);
585 	if (err < 0)
586 		return err;
587 
588 	while (1) {
589 		lock_fpu_owner();
590 		if (is_lasx_enabled())
591 			err = restore_hw_lasx_context(lasx_ctx);
592 		else {
593 			err = copy_lasx_from_sigcontext(lasx_ctx);
594 			if (is_lsx_enabled())
595 				restore_lsx(current);
596 			else if (is_fpu_owner())
597 				restore_fp(current);
598 		}
599 		unlock_fpu_owner();
600 
601 		if (likely(!err))
602 			break;
603 		/* Touch the LASX context and try again */
604 		err = __get_user(tmp, &regs[0]) |
605 			__get_user(tmp, &regs[32*4-1]) |
606 			__get_user(tmp, fcc) |
607 			__get_user(tmp, fcsr);
608 		if (err)
609 			break;	/* really bad sigcontext */
610 	}
611 
612 	return err ?: sig;
613 }
614 
615 #ifdef CONFIG_CPU_HAS_LBT
616 static int protected_save_lbt_context(struct extctx_layout *extctx)
617 {
618 	int err = 0;
619 	struct sctx_info __user *info = extctx->lbt.addr;
620 	struct lbt_context __user *lbt_ctx =
621 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
622 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
623 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
624 
625 	while (1) {
626 		lock_lbt_owner();
627 		if (is_lbt_owner())
628 			err |= save_hw_lbt_context(lbt_ctx);
629 		else
630 			err |= copy_lbt_to_sigcontext(lbt_ctx);
631 		if (is_fpu_owner())
632 			err |= save_hw_ftop_context(lbt_ctx);
633 		else
634 			err |= copy_ftop_to_sigcontext(lbt_ctx);
635 		unlock_lbt_owner();
636 
637 		err |= __put_user(LBT_CTX_MAGIC, &info->magic);
638 		err |= __put_user(extctx->lbt.size, &info->size);
639 
640 		if (likely(!err))
641 			break;
642 		/* Touch the LBT context and try again */
643 		err = __put_user(0, &regs[0]) | __put_user(0, eflags);
644 
645 		if (err)
646 			return err;
647 	}
648 
649 	return err;
650 }
651 
652 static int protected_restore_lbt_context(struct extctx_layout *extctx)
653 {
654 	int err = 0, tmp __maybe_unused;
655 	struct sctx_info __user *info = extctx->lbt.addr;
656 	struct lbt_context __user *lbt_ctx =
657 		(struct lbt_context *)get_ctx_through_ctxinfo(info);
658 	uint64_t __user *regs	= (uint64_t *)&lbt_ctx->regs;
659 	uint32_t __user *eflags	= (uint32_t *)&lbt_ctx->eflags;
660 
661 	while (1) {
662 		lock_lbt_owner();
663 		if (is_lbt_owner())
664 			err |= restore_hw_lbt_context(lbt_ctx);
665 		else
666 			err |= copy_lbt_from_sigcontext(lbt_ctx);
667 		if (is_fpu_owner())
668 			err |= restore_hw_ftop_context(lbt_ctx);
669 		else
670 			err |= copy_ftop_from_sigcontext(lbt_ctx);
671 		unlock_lbt_owner();
672 
673 		if (likely(!err))
674 			break;
675 		/* Touch the LBT context and try again */
676 		err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
677 
678 		if (err)
679 			return err;
680 	}
681 
682 	return err;
683 }
684 #endif
685 
686 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
687 			    struct extctx_layout *extctx)
688 {
689 	int i, err = 0;
690 	struct sctx_info __user *info;
691 
692 	err |= __put_user(regs->csr_era, &sc->sc_pc);
693 	err |= __put_user(extctx->flags, &sc->sc_flags);
694 
695 	err |= __put_user(0, &sc->sc_regs[0]);
696 	for (i = 1; i < 32; i++)
697 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
698 
699 	if (extctx->lasx.addr)
700 		err |= protected_save_lasx_context(extctx);
701 	else if (extctx->lsx.addr)
702 		err |= protected_save_lsx_context(extctx);
703 	else if (extctx->fpu.addr)
704 		err |= protected_save_fpu_context(extctx);
705 
706 #ifdef CONFIG_CPU_HAS_LBT
707 	if (extctx->lbt.addr)
708 		err |= protected_save_lbt_context(extctx);
709 #endif
710 
711 	/* Set the "end" magic */
712 	info = (struct sctx_info *)extctx->end.addr;
713 	err |= __put_user(0, &info->magic);
714 	err |= __put_user(0, &info->size);
715 
716 	return err;
717 }
718 
719 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
720 {
721 	int err = 0;
722 	unsigned int magic, size;
723 	struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
724 
725 	while(1) {
726 		err |= __get_user(magic, &info->magic);
727 		err |= __get_user(size, &info->size);
728 		if (err)
729 			return err;
730 
731 		switch (magic) {
732 		case 0: /* END */
733 			goto done;
734 
735 		case FPU_CTX_MAGIC:
736 			if (size < (sizeof(struct sctx_info) +
737 				    sizeof(struct fpu_context)))
738 				goto invalid;
739 			extctx->fpu.addr = info;
740 			break;
741 
742 		case LSX_CTX_MAGIC:
743 			if (size < (sizeof(struct sctx_info) +
744 				    sizeof(struct lsx_context)))
745 				goto invalid;
746 			extctx->lsx.addr = info;
747 			break;
748 
749 		case LASX_CTX_MAGIC:
750 			if (size < (sizeof(struct sctx_info) +
751 				    sizeof(struct lasx_context)))
752 				goto invalid;
753 			extctx->lasx.addr = info;
754 			break;
755 
756 		case LBT_CTX_MAGIC:
757 			if (size < (sizeof(struct sctx_info) +
758 				    sizeof(struct lbt_context)))
759 				goto invalid;
760 			extctx->lbt.addr = info;
761 			break;
762 
763 		default:
764 			goto invalid;
765 		}
766 
767 		info = (struct sctx_info *)((char *)info + size);
768 	}
769 
770 done:
771 	return 0;
772 
773 invalid:
774 	return -EINVAL;
775 }
776 
777 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
778 {
779 	int i, err = 0;
780 	struct extctx_layout extctx;
781 
782 	memset(&extctx, 0, sizeof(struct extctx_layout));
783 
784 	err = __get_user(extctx.flags, &sc->sc_flags);
785 	if (err)
786 		goto bad;
787 
788 	err = parse_extcontext(sc, &extctx);
789 	if (err)
790 		goto bad;
791 
792 	conditional_used_math(extctx.flags & SC_USED_FP);
793 
794 	/*
795 	 * The signal handler may have used FPU; give it up if the program
796 	 * doesn't want it following sigreturn.
797 	 */
798 	if (!(extctx.flags & SC_USED_FP))
799 		lose_fpu(0);
800 
801 	/* Always make any pending restarted system calls return -EINTR */
802 	current->restart_block.fn = do_no_restart_syscall;
803 
804 	err |= __get_user(regs->csr_era, &sc->sc_pc);
805 	for (i = 1; i < 32; i++)
806 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
807 
808 	if (extctx.lasx.addr)
809 		err |= protected_restore_lasx_context(&extctx);
810 	else if (extctx.lsx.addr)
811 		err |= protected_restore_lsx_context(&extctx);
812 	else if (extctx.fpu.addr)
813 		err |= protected_restore_fpu_context(&extctx);
814 
815 #ifdef CONFIG_CPU_HAS_LBT
816 	if (extctx.lbt.addr)
817 		err |= protected_restore_lbt_context(&extctx);
818 #endif
819 
820 bad:
821 	return err;
822 }
823 
824 static unsigned int handle_flags(void)
825 {
826 	unsigned int flags = 0;
827 
828 	flags = used_math() ? SC_USED_FP : 0;
829 
830 	switch (current->thread.error_code) {
831 	case 1:
832 		flags |= SC_ADDRERR_RD;
833 		break;
834 	case 2:
835 		flags |= SC_ADDRERR_WR;
836 		break;
837 	}
838 
839 	return flags;
840 }
841 
842 static unsigned long extframe_alloc(struct extctx_layout *extctx,
843 				    struct _ctx_layout *layout,
844 				    size_t size, unsigned int align, unsigned long base)
845 {
846 	unsigned long new_base = base - size;
847 
848 	new_base = round_down(new_base, (align < 16 ? 16 : align));
849 	new_base -= sizeof(struct sctx_info);
850 
851 	layout->addr = (void *)new_base;
852 	layout->size = (unsigned int)(base - new_base);
853 	extctx->size += layout->size;
854 
855 	return new_base;
856 }
857 
858 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
859 {
860 	unsigned long new_sp = sp;
861 
862 	memset(extctx, 0, sizeof(struct extctx_layout));
863 
864 	extctx->flags = handle_flags();
865 
866 	/* Grow down, alloc "end" context info first. */
867 	new_sp -= sizeof(struct sctx_info);
868 	extctx->end.addr = (void *)new_sp;
869 	extctx->end.size = (unsigned int)sizeof(struct sctx_info);
870 	extctx->size += extctx->end.size;
871 
872 	if (extctx->flags & SC_USED_FP) {
873 		if (cpu_has_lasx && thread_lasx_context_live())
874 			new_sp = extframe_alloc(extctx, &extctx->lasx,
875 			  sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
876 		else if (cpu_has_lsx && thread_lsx_context_live())
877 			new_sp = extframe_alloc(extctx, &extctx->lsx,
878 			  sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
879 		else if (cpu_has_fpu)
880 			new_sp = extframe_alloc(extctx, &extctx->fpu,
881 			  sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
882 	}
883 
884 #ifdef CONFIG_CPU_HAS_LBT
885 	if (cpu_has_lbt && thread_lbt_context_live()) {
886 		new_sp = extframe_alloc(extctx, &extctx->lbt,
887 			  sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
888 	}
889 #endif
890 
891 	return new_sp;
892 }
893 
894 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
895 			  struct extctx_layout *extctx)
896 {
897 	unsigned long sp;
898 
899 	/* Default to using normal stack */
900 	sp = regs->regs[3];
901 
902 	/*
903 	 * If we are on the alternate signal stack and would overflow it, don't.
904 	 * Return an always-bogus address instead so we will die with SIGSEGV.
905 	 */
906 	if (on_sig_stack(sp) &&
907 	    !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
908 		return (void __user __force *)(-1UL);
909 
910 	sp = sigsp(sp, ksig);
911 	sp = round_down(sp, 16);
912 	sp = setup_extcontext(extctx, sp);
913 	sp -= sizeof(struct rt_sigframe);
914 
915 	if (!IS_ALIGNED(sp, 16))
916 		BUG();
917 
918 	return (void __user *)sp;
919 }
920 
921 /*
922  * Atomically swap in the new signal mask, and wait for a signal.
923  */
924 
925 asmlinkage long sys_rt_sigreturn(void)
926 {
927 	int sig;
928 	sigset_t set;
929 	struct pt_regs *regs;
930 	struct rt_sigframe __user *frame;
931 
932 	regs = current_pt_regs();
933 	frame = (struct rt_sigframe __user *)regs->regs[3];
934 	if (!access_ok(frame, sizeof(*frame)))
935 		goto badframe;
936 	if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
937 		goto badframe;
938 
939 	set_current_blocked(&set);
940 
941 	sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
942 	if (sig < 0)
943 		goto badframe;
944 	else if (sig)
945 		force_sig(sig);
946 
947 	regs->regs[0] = 0; /* No syscall restarting */
948 	if (restore_altstack(&frame->rs_uctx.uc_stack))
949 		goto badframe;
950 
951 	return regs->regs[4];
952 
953 badframe:
954 	force_sig(SIGSEGV);
955 	return 0;
956 }
957 
958 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
959 			  struct pt_regs *regs, sigset_t *set)
960 {
961 	int err = 0;
962 	struct extctx_layout extctx;
963 	struct rt_sigframe __user *frame;
964 
965 	frame = get_sigframe(ksig, regs, &extctx);
966 	if (!access_ok(frame, sizeof(*frame) + extctx.size))
967 		return -EFAULT;
968 
969 	/* Create siginfo.  */
970 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
971 
972 	/* Create the ucontext.	 */
973 	err |= __put_user(0, &frame->rs_uctx.uc_flags);
974 	err |= __put_user(NULL, &frame->rs_uctx.uc_link);
975 	err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
976 	err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
977 	err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
978 
979 	if (err)
980 		return -EFAULT;
981 
982 	/*
983 	 * Arguments to signal handler:
984 	 *
985 	 *   a0 = signal number
986 	 *   a1 = pointer to siginfo
987 	 *   a2 = pointer to ucontext
988 	 *
989 	 * c0_era point to the signal handler, $r3 (sp) points to
990 	 * the struct rt_sigframe.
991 	 */
992 	regs->regs[4] = ksig->sig;
993 	regs->regs[5] = (unsigned long) &frame->rs_info;
994 	regs->regs[6] = (unsigned long) &frame->rs_uctx;
995 	regs->regs[3] = (unsigned long) frame;
996 	regs->regs[1] = (unsigned long) sig_return;
997 	regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
998 
999 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
1000 	       current->comm, current->pid,
1001 	       frame, regs->csr_era, regs->regs[1]);
1002 
1003 	return 0;
1004 }
1005 
1006 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1007 {
1008 	int ret;
1009 	sigset_t *oldset = sigmask_to_save();
1010 	void *vdso = current->mm->context.vdso;
1011 
1012 	/* Are we from a system call? */
1013 	if (regs->regs[0]) {
1014 		switch (regs->regs[4]) {
1015 		case -ERESTART_RESTARTBLOCK:
1016 		case -ERESTARTNOHAND:
1017 			regs->regs[4] = -EINTR;
1018 			break;
1019 		case -ERESTARTSYS:
1020 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
1021 				regs->regs[4] = -EINTR;
1022 				break;
1023 			}
1024 			fallthrough;
1025 		case -ERESTARTNOINTR:
1026 			regs->regs[4] = regs->orig_a0;
1027 			regs->csr_era -= 4;
1028 		}
1029 
1030 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1031 	}
1032 
1033 	rseq_signal_deliver(ksig, regs);
1034 
1035 	ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1036 
1037 	signal_setup_done(ret, ksig, 0);
1038 }
1039 
1040 void arch_do_signal_or_restart(struct pt_regs *regs)
1041 {
1042 	struct ksignal ksig;
1043 
1044 	if (get_signal(&ksig)) {
1045 		/* Whee!  Actually deliver the signal.	*/
1046 		handle_signal(&ksig, regs);
1047 		return;
1048 	}
1049 
1050 	/* Are we from a system call? */
1051 	if (regs->regs[0]) {
1052 		switch (regs->regs[4]) {
1053 		case -ERESTARTNOHAND:
1054 		case -ERESTARTSYS:
1055 		case -ERESTARTNOINTR:
1056 			regs->regs[4] = regs->orig_a0;
1057 			regs->csr_era -= 4;
1058 			break;
1059 
1060 		case -ERESTART_RESTARTBLOCK:
1061 			regs->regs[4] = regs->orig_a0;
1062 			regs->regs[11] = __NR_restart_syscall;
1063 			regs->csr_era -= 4;
1064 			break;
1065 		}
1066 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
1067 	}
1068 
1069 	/*
1070 	 * If there's no signal to deliver, we just put the saved sigmask
1071 	 * back
1072 	 */
1073 	restore_saved_sigmask();
1074 }
1075