xref: /openbmc/linux/arch/loongarch/kernel/signal.c (revision 920b0442)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  *
7  * Derived from MIPS:
8  * Copyright (C) 1991, 1992  Linus Torvalds
9  * Copyright (C) 1994 - 2000  Ralf Baechle
10  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11  * Copyright (C) 2014, Imagination Technologies Ltd.
12  */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/irqflags.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/personality.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/signal.h>
23 #include <linux/errno.h>
24 #include <linux/wait.h>
25 #include <linux/ptrace.h>
26 #include <linux/unistd.h>
27 #include <linux/compiler.h>
28 #include <linux/syscalls.h>
29 #include <linux/uaccess.h>
30 
31 #include <asm/asm.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpu-features.h>
34 #include <asm/fpu.h>
35 #include <asm/ucontext.h>
36 #include <asm/vdso.h>
37 
38 #ifdef DEBUG_SIG
39 #  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
40 #else
41 #  define DEBUGP(fmt, args...)
42 #endif
43 
44 /* Make sure we will not lose FPU ownership */
45 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
46 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
47 
48 /* Assembly functions to move context to/from the FPU */
49 extern asmlinkage int
50 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
51 extern asmlinkage int
52 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
53 
54 struct rt_sigframe {
55 	struct siginfo rs_info;
56 	struct ucontext rs_uctx;
57 };
58 
59 struct _ctx_layout {
60 	struct sctx_info *addr;
61 	unsigned int size;
62 };
63 
64 struct extctx_layout {
65 	unsigned long size;
66 	unsigned int flags;
67 	struct _ctx_layout fpu;
68 	struct _ctx_layout end;
69 };
70 
71 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
72 {
73 	return (void __user *)((char *)info + sizeof(struct sctx_info));
74 }
75 
76 /*
77  * Thread saved context copy to/from a signal context presumed to be on the
78  * user stack, and therefore accessed with appropriate macros from uaccess.h.
79  */
80 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
81 {
82 	int i;
83 	int err = 0;
84 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
85 	uint64_t __user *fcc	= &ctx->fcc;
86 	uint32_t __user *fcsr	= &ctx->fcsr;
87 
88 	for (i = 0; i < NUM_FPU_REGS; i++) {
89 		err |=
90 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
91 			       &regs[i]);
92 	}
93 	err |= __put_user(current->thread.fpu.fcc, fcc);
94 	err |= __put_user(current->thread.fpu.fcsr, fcsr);
95 
96 	return err;
97 }
98 
99 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
100 {
101 	int i;
102 	int err = 0;
103 	u64 fpr_val;
104 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
105 	uint64_t __user *fcc	= &ctx->fcc;
106 	uint32_t __user *fcsr	= &ctx->fcsr;
107 
108 	for (i = 0; i < NUM_FPU_REGS; i++) {
109 		err |= __get_user(fpr_val, &regs[i]);
110 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
111 	}
112 	err |= __get_user(current->thread.fpu.fcc, fcc);
113 	err |= __get_user(current->thread.fpu.fcsr, fcsr);
114 
115 	return err;
116 }
117 
118 /*
119  * Wrappers for the assembly _{save,restore}_fp_context functions.
120  */
121 static int save_hw_fpu_context(struct fpu_context __user *ctx)
122 {
123 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
124 	uint64_t __user *fcc	= &ctx->fcc;
125 	uint32_t __user *fcsr	= &ctx->fcsr;
126 
127 	return _save_fp_context(regs, fcc, fcsr);
128 }
129 
130 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
131 {
132 	uint64_t __user *regs	= (uint64_t *)&ctx->regs;
133 	uint64_t __user *fcc	= &ctx->fcc;
134 	uint32_t __user *fcsr	= &ctx->fcsr;
135 
136 	return _restore_fp_context(regs, fcc, fcsr);
137 }
138 
139 static int fcsr_pending(unsigned int __user *fcsr)
140 {
141 	int err, sig = 0;
142 	unsigned int csr, enabled;
143 
144 	err = __get_user(csr, fcsr);
145 	enabled = ((csr & FPU_CSR_ALL_E) << 24);
146 	/*
147 	 * If the signal handler set some FPU exceptions, clear it and
148 	 * send SIGFPE.
149 	 */
150 	if (csr & enabled) {
151 		csr &= ~enabled;
152 		err |= __put_user(csr, fcsr);
153 		sig = SIGFPE;
154 	}
155 	return err ?: sig;
156 }
157 
158 /*
159  * Helper routines
160  */
161 static int protected_save_fpu_context(struct extctx_layout *extctx)
162 {
163 	int err = 0;
164 	struct sctx_info __user *info = extctx->fpu.addr;
165 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
166 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
167 	uint64_t __user *fcc	= &fpu_ctx->fcc;
168 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
169 
170 	while (1) {
171 		lock_fpu_owner();
172 		if (is_fpu_owner())
173 			err = save_hw_fpu_context(fpu_ctx);
174 		else
175 			err = copy_fpu_to_sigcontext(fpu_ctx);
176 		unlock_fpu_owner();
177 
178 		err |= __put_user(FPU_CTX_MAGIC, &info->magic);
179 		err |= __put_user(extctx->fpu.size, &info->size);
180 
181 		if (likely(!err))
182 			break;
183 		/* Touch the FPU context and try again */
184 		err = __put_user(0, &regs[0]) |
185 			__put_user(0, &regs[31]) |
186 			__put_user(0, fcc) |
187 			__put_user(0, fcsr);
188 		if (err)
189 			return err;	/* really bad sigcontext */
190 	}
191 
192 	return err;
193 }
194 
195 static int protected_restore_fpu_context(struct extctx_layout *extctx)
196 {
197 	int err = 0, sig = 0, tmp __maybe_unused;
198 	struct sctx_info __user *info = extctx->fpu.addr;
199 	struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
200 	uint64_t __user *regs	= (uint64_t *)&fpu_ctx->regs;
201 	uint64_t __user *fcc	= &fpu_ctx->fcc;
202 	uint32_t __user *fcsr	= &fpu_ctx->fcsr;
203 
204 	err = sig = fcsr_pending(fcsr);
205 	if (err < 0)
206 		return err;
207 
208 	while (1) {
209 		lock_fpu_owner();
210 		if (is_fpu_owner())
211 			err = restore_hw_fpu_context(fpu_ctx);
212 		else
213 			err = copy_fpu_from_sigcontext(fpu_ctx);
214 		unlock_fpu_owner();
215 
216 		if (likely(!err))
217 			break;
218 		/* Touch the FPU context and try again */
219 		err = __get_user(tmp, &regs[0]) |
220 			__get_user(tmp, &regs[31]) |
221 			__get_user(tmp, fcc) |
222 			__get_user(tmp, fcsr);
223 		if (err)
224 			break;	/* really bad sigcontext */
225 	}
226 
227 	return err ?: sig;
228 }
229 
230 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
231 			    struct extctx_layout *extctx)
232 {
233 	int i, err = 0;
234 	struct sctx_info __user *info;
235 
236 	err |= __put_user(regs->csr_era, &sc->sc_pc);
237 	err |= __put_user(extctx->flags, &sc->sc_flags);
238 
239 	err |= __put_user(0, &sc->sc_regs[0]);
240 	for (i = 1; i < 32; i++)
241 		err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
242 
243 	if (extctx->fpu.addr)
244 		err |= protected_save_fpu_context(extctx);
245 
246 	/* Set the "end" magic */
247 	info = (struct sctx_info *)extctx->end.addr;
248 	err |= __put_user(0, &info->magic);
249 	err |= __put_user(0, &info->size);
250 
251 	return err;
252 }
253 
254 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
255 {
256 	int err = 0;
257 	unsigned int magic, size;
258 	struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
259 
260 	while(1) {
261 		err |= __get_user(magic, &info->magic);
262 		err |= __get_user(size, &info->size);
263 		if (err)
264 			return err;
265 
266 		switch (magic) {
267 		case 0: /* END */
268 			goto done;
269 
270 		case FPU_CTX_MAGIC:
271 			if (size < (sizeof(struct sctx_info) +
272 				    sizeof(struct fpu_context)))
273 				goto invalid;
274 			extctx->fpu.addr = info;
275 			break;
276 
277 		default:
278 			goto invalid;
279 		}
280 
281 		info = (struct sctx_info *)((char *)info + size);
282 	}
283 
284 done:
285 	return 0;
286 
287 invalid:
288 	return -EINVAL;
289 }
290 
291 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
292 {
293 	int i, err = 0;
294 	struct extctx_layout extctx;
295 
296 	memset(&extctx, 0, sizeof(struct extctx_layout));
297 
298 	err = __get_user(extctx.flags, &sc->sc_flags);
299 	if (err)
300 		goto bad;
301 
302 	err = parse_extcontext(sc, &extctx);
303 	if (err)
304 		goto bad;
305 
306 	conditional_used_math(extctx.flags & SC_USED_FP);
307 
308 	/*
309 	 * The signal handler may have used FPU; give it up if the program
310 	 * doesn't want it following sigreturn.
311 	 */
312 	if (!(extctx.flags & SC_USED_FP))
313 		lose_fpu(0);
314 
315 	/* Always make any pending restarted system calls return -EINTR */
316 	current->restart_block.fn = do_no_restart_syscall;
317 
318 	err |= __get_user(regs->csr_era, &sc->sc_pc);
319 	for (i = 1; i < 32; i++)
320 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
321 
322 	if (extctx.fpu.addr)
323 		err |= protected_restore_fpu_context(&extctx);
324 
325 bad:
326 	return err;
327 }
328 
329 static unsigned int handle_flags(void)
330 {
331 	unsigned int flags = 0;
332 
333 	flags = used_math() ? SC_USED_FP : 0;
334 
335 	switch (current->thread.error_code) {
336 	case 1:
337 		flags |= SC_ADDRERR_RD;
338 		break;
339 	case 2:
340 		flags |= SC_ADDRERR_WR;
341 		break;
342 	}
343 
344 	return flags;
345 }
346 
347 static unsigned long extframe_alloc(struct extctx_layout *extctx,
348 				    struct _ctx_layout *layout,
349 				    size_t size, unsigned int align, unsigned long base)
350 {
351 	unsigned long new_base = base - size;
352 
353 	new_base = round_down(new_base, (align < 16 ? 16 : align));
354 	new_base -= sizeof(struct sctx_info);
355 
356 	layout->addr = (void *)new_base;
357 	layout->size = (unsigned int)(base - new_base);
358 	extctx->size += layout->size;
359 
360 	return new_base;
361 }
362 
363 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
364 {
365 	unsigned long new_sp = sp;
366 
367 	memset(extctx, 0, sizeof(struct extctx_layout));
368 
369 	extctx->flags = handle_flags();
370 
371 	/* Grow down, alloc "end" context info first. */
372 	new_sp -= sizeof(struct sctx_info);
373 	extctx->end.addr = (void *)new_sp;
374 	extctx->end.size = (unsigned int)sizeof(struct sctx_info);
375 	extctx->size += extctx->end.size;
376 
377 	if (extctx->flags & SC_USED_FP) {
378 		if (cpu_has_fpu)
379 			new_sp = extframe_alloc(extctx, &extctx->fpu,
380 			  sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
381 	}
382 
383 	return new_sp;
384 }
385 
386 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
387 			  struct extctx_layout *extctx)
388 {
389 	unsigned long sp;
390 
391 	/* Default to using normal stack */
392 	sp = regs->regs[3];
393 
394 	/*
395 	 * If we are on the alternate signal stack and would overflow it, don't.
396 	 * Return an always-bogus address instead so we will die with SIGSEGV.
397 	 */
398 	if (on_sig_stack(sp) &&
399 	    !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
400 		return (void __user __force *)(-1UL);
401 
402 	sp = sigsp(sp, ksig);
403 	sp = round_down(sp, 16);
404 	sp = setup_extcontext(extctx, sp);
405 	sp -= sizeof(struct rt_sigframe);
406 
407 	if (!IS_ALIGNED(sp, 16))
408 		BUG();
409 
410 	return (void __user *)sp;
411 }
412 
413 /*
414  * Atomically swap in the new signal mask, and wait for a signal.
415  */
416 
417 asmlinkage long sys_rt_sigreturn(void)
418 {
419 	int sig;
420 	sigset_t set;
421 	struct pt_regs *regs;
422 	struct rt_sigframe __user *frame;
423 
424 	regs = current_pt_regs();
425 	frame = (struct rt_sigframe __user *)regs->regs[3];
426 	if (!access_ok(frame, sizeof(*frame)))
427 		goto badframe;
428 	if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
429 		goto badframe;
430 
431 	set_current_blocked(&set);
432 
433 	sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
434 	if (sig < 0)
435 		goto badframe;
436 	else if (sig)
437 		force_sig(sig);
438 
439 	regs->regs[0] = 0; /* No syscall restarting */
440 	if (restore_altstack(&frame->rs_uctx.uc_stack))
441 		goto badframe;
442 
443 	return regs->regs[4];
444 
445 badframe:
446 	force_sig(SIGSEGV);
447 	return 0;
448 }
449 
450 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
451 			  struct pt_regs *regs, sigset_t *set)
452 {
453 	int err = 0;
454 	struct extctx_layout extctx;
455 	struct rt_sigframe __user *frame;
456 
457 	frame = get_sigframe(ksig, regs, &extctx);
458 	if (!access_ok(frame, sizeof(*frame) + extctx.size))
459 		return -EFAULT;
460 
461 	/* Create siginfo.  */
462 	err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
463 
464 	/* Create the ucontext.	 */
465 	err |= __put_user(0, &frame->rs_uctx.uc_flags);
466 	err |= __put_user(NULL, &frame->rs_uctx.uc_link);
467 	err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
468 	err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
469 	err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
470 
471 	if (err)
472 		return -EFAULT;
473 
474 	/*
475 	 * Arguments to signal handler:
476 	 *
477 	 *   a0 = signal number
478 	 *   a1 = pointer to siginfo
479 	 *   a2 = pointer to ucontext
480 	 *
481 	 * c0_era point to the signal handler, $r3 (sp) points to
482 	 * the struct rt_sigframe.
483 	 */
484 	regs->regs[4] = ksig->sig;
485 	regs->regs[5] = (unsigned long) &frame->rs_info;
486 	regs->regs[6] = (unsigned long) &frame->rs_uctx;
487 	regs->regs[3] = (unsigned long) frame;
488 	regs->regs[1] = (unsigned long) sig_return;
489 	regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
490 
491 	DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
492 	       current->comm, current->pid,
493 	       frame, regs->csr_era, regs->regs[1]);
494 
495 	return 0;
496 }
497 
498 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
499 {
500 	int ret;
501 	sigset_t *oldset = sigmask_to_save();
502 	void *vdso = current->mm->context.vdso;
503 
504 	/* Are we from a system call? */
505 	if (regs->regs[0]) {
506 		switch (regs->regs[4]) {
507 		case -ERESTART_RESTARTBLOCK:
508 		case -ERESTARTNOHAND:
509 			regs->regs[4] = -EINTR;
510 			break;
511 		case -ERESTARTSYS:
512 			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
513 				regs->regs[4] = -EINTR;
514 				break;
515 			}
516 			fallthrough;
517 		case -ERESTARTNOINTR:
518 			regs->regs[4] = regs->orig_a0;
519 			regs->csr_era -= 4;
520 		}
521 
522 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
523 	}
524 
525 	rseq_signal_deliver(ksig, regs);
526 
527 	ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
528 
529 	signal_setup_done(ret, ksig, 0);
530 }
531 
532 void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
533 {
534 	struct ksignal ksig;
535 
536 	if (has_signal && get_signal(&ksig)) {
537 		/* Whee!  Actually deliver the signal.	*/
538 		handle_signal(&ksig, regs);
539 		return;
540 	}
541 
542 	/* Are we from a system call? */
543 	if (regs->regs[0]) {
544 		switch (regs->regs[4]) {
545 		case -ERESTARTNOHAND:
546 		case -ERESTARTSYS:
547 		case -ERESTARTNOINTR:
548 			regs->regs[4] = regs->orig_a0;
549 			regs->csr_era -= 4;
550 			break;
551 
552 		case -ERESTART_RESTARTBLOCK:
553 			regs->regs[4] = regs->orig_a0;
554 			regs->regs[11] = __NR_restart_syscall;
555 			regs->csr_era -= 4;
556 			break;
557 		}
558 		regs->regs[0] = 0;	/* Don't deal with this again.	*/
559 	}
560 
561 	/*
562 	 * If there's no signal to deliver, we just put the saved sigmask
563 	 * back
564 	 */
565 	restore_saved_sigmask();
566 }
567