xref: /openbmc/linux/arch/mips/kernel/ptrace.c (revision 31b90347)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 Ross Biro
7  * Copyright (C) Linus Torvalds
8  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9  * Copyright (C) 1996 David S. Miller
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 1999 MIPS Technologies, Inc.
12  * Copyright (C) 2000 Ulf Carlsson
13  *
14  * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15  * binaries.
16  */
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/regset.h>
26 #include <linux/smp.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/tracehook.h>
30 #include <linux/audit.h>
31 #include <linux/seccomp.h>
32 #include <linux/ftrace.h>
33 
34 #include <asm/byteorder.h>
35 #include <asm/cpu.h>
36 #include <asm/dsp.h>
37 #include <asm/fpu.h>
38 #include <asm/mipsregs.h>
39 #include <asm/mipsmtregs.h>
40 #include <asm/pgtable.h>
41 #include <asm/page.h>
42 #include <asm/syscall.h>
43 #include <asm/uaccess.h>
44 #include <asm/bootinfo.h>
45 #include <asm/reg.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
49 
50 /*
51  * Called by kernel/ptrace.c when detaching..
52  *
53  * Make sure single step bits etc are not set.
54  */
55 void ptrace_disable(struct task_struct *child)
56 {
57 	/* Don't load the watchpoint registers for the ex-child. */
58 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
59 }
60 
61 /*
62  * Read a general register set.	 We always use the 64-bit format, even
63  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
64  * Registers are sign extended to fill the available space.
65  */
66 int ptrace_getregs(struct task_struct *child, __s64 __user *data)
67 {
68 	struct pt_regs *regs;
69 	int i;
70 
71 	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
72 		return -EIO;
73 
74 	regs = task_pt_regs(child);
75 
76 	for (i = 0; i < 32; i++)
77 		__put_user((long)regs->regs[i], data + i);
78 	__put_user((long)regs->lo, data + EF_LO - EF_R0);
79 	__put_user((long)regs->hi, data + EF_HI - EF_R0);
80 	__put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
81 	__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
82 	__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
83 	__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
84 
85 	return 0;
86 }
87 
88 /*
89  * Write a general register set.  As for PTRACE_GETREGS, we always use
90  * the 64-bit format.  On a 32-bit kernel only the lower order half
91  * (according to endianness) will be used.
92  */
93 int ptrace_setregs(struct task_struct *child, __s64 __user *data)
94 {
95 	struct pt_regs *regs;
96 	int i;
97 
98 	if (!access_ok(VERIFY_READ, data, 38 * 8))
99 		return -EIO;
100 
101 	regs = task_pt_regs(child);
102 
103 	for (i = 0; i < 32; i++)
104 		__get_user(regs->regs[i], data + i);
105 	__get_user(regs->lo, data + EF_LO - EF_R0);
106 	__get_user(regs->hi, data + EF_HI - EF_R0);
107 	__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
108 
109 	/* badvaddr, status, and cause may not be written.  */
110 
111 	return 0;
112 }
113 
114 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
115 {
116 	int i;
117 	unsigned int tmp;
118 
119 	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
120 		return -EIO;
121 
122 	if (tsk_used_math(child)) {
123 		fpureg_t *fregs = get_fpu_regs(child);
124 		for (i = 0; i < 32; i++)
125 			__put_user(fregs[i], i + (__u64 __user *) data);
126 	} else {
127 		for (i = 0; i < 32; i++)
128 			__put_user((__u64) -1, i + (__u64 __user *) data);
129 	}
130 
131 	__put_user(child->thread.fpu.fcr31, data + 64);
132 
133 	preempt_disable();
134 	if (cpu_has_fpu) {
135 		unsigned int flags;
136 
137 		if (cpu_has_mipsmt) {
138 			unsigned int vpflags = dvpe();
139 			flags = read_c0_status();
140 			__enable_fpu();
141 			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
142 			write_c0_status(flags);
143 			evpe(vpflags);
144 		} else {
145 			flags = read_c0_status();
146 			__enable_fpu();
147 			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
148 			write_c0_status(flags);
149 		}
150 	} else {
151 		tmp = 0;
152 	}
153 	preempt_enable();
154 	__put_user(tmp, data + 65);
155 
156 	return 0;
157 }
158 
159 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
160 {
161 	fpureg_t *fregs;
162 	int i;
163 
164 	if (!access_ok(VERIFY_READ, data, 33 * 8))
165 		return -EIO;
166 
167 	fregs = get_fpu_regs(child);
168 
169 	for (i = 0; i < 32; i++)
170 		__get_user(fregs[i], i + (__u64 __user *) data);
171 
172 	__get_user(child->thread.fpu.fcr31, data + 64);
173 
174 	/* FIR may not be written.  */
175 
176 	return 0;
177 }
178 
179 int ptrace_get_watch_regs(struct task_struct *child,
180 			  struct pt_watch_regs __user *addr)
181 {
182 	enum pt_watch_style style;
183 	int i;
184 
185 	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
186 		return -EIO;
187 	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
188 		return -EIO;
189 
190 #ifdef CONFIG_32BIT
191 	style = pt_watch_style_mips32;
192 #define WATCH_STYLE mips32
193 #else
194 	style = pt_watch_style_mips64;
195 #define WATCH_STYLE mips64
196 #endif
197 
198 	__put_user(style, &addr->style);
199 	__put_user(current_cpu_data.watch_reg_use_cnt,
200 		   &addr->WATCH_STYLE.num_valid);
201 	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
202 		__put_user(child->thread.watch.mips3264.watchlo[i],
203 			   &addr->WATCH_STYLE.watchlo[i]);
204 		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
205 			   &addr->WATCH_STYLE.watchhi[i]);
206 		__put_user(current_cpu_data.watch_reg_masks[i],
207 			   &addr->WATCH_STYLE.watch_masks[i]);
208 	}
209 	for (; i < 8; i++) {
210 		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
211 		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
212 		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
213 	}
214 
215 	return 0;
216 }
217 
218 int ptrace_set_watch_regs(struct task_struct *child,
219 			  struct pt_watch_regs __user *addr)
220 {
221 	int i;
222 	int watch_active = 0;
223 	unsigned long lt[NUM_WATCH_REGS];
224 	u16 ht[NUM_WATCH_REGS];
225 
226 	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
227 		return -EIO;
228 	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
229 		return -EIO;
230 	/* Check the values. */
231 	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
232 		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
233 #ifdef CONFIG_32BIT
234 		if (lt[i] & __UA_LIMIT)
235 			return -EINVAL;
236 #else
237 		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
238 			if (lt[i] & 0xffffffff80000000UL)
239 				return -EINVAL;
240 		} else {
241 			if (lt[i] & __UA_LIMIT)
242 				return -EINVAL;
243 		}
244 #endif
245 		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
246 		if (ht[i] & ~0xff8)
247 			return -EINVAL;
248 	}
249 	/* Install them. */
250 	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
251 		if (lt[i] & 7)
252 			watch_active = 1;
253 		child->thread.watch.mips3264.watchlo[i] = lt[i];
254 		/* Set the G bit. */
255 		child->thread.watch.mips3264.watchhi[i] = ht[i];
256 	}
257 
258 	if (watch_active)
259 		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
260 	else
261 		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
262 
263 	return 0;
264 }
265 
266 /* regset get/set implementations */
267 
268 static int gpr_get(struct task_struct *target,
269 		   const struct user_regset *regset,
270 		   unsigned int pos, unsigned int count,
271 		   void *kbuf, void __user *ubuf)
272 {
273 	struct pt_regs *regs = task_pt_regs(target);
274 
275 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
276 				   regs, 0, sizeof(*regs));
277 }
278 
279 static int gpr_set(struct task_struct *target,
280 		   const struct user_regset *regset,
281 		   unsigned int pos, unsigned int count,
282 		   const void *kbuf, const void __user *ubuf)
283 {
284 	struct pt_regs newregs;
285 	int ret;
286 
287 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
288 				 &newregs,
289 				 0, sizeof(newregs));
290 	if (ret)
291 		return ret;
292 
293 	*task_pt_regs(target) = newregs;
294 
295 	return 0;
296 }
297 
298 static int fpr_get(struct task_struct *target,
299 		   const struct user_regset *regset,
300 		   unsigned int pos, unsigned int count,
301 		   void *kbuf, void __user *ubuf)
302 {
303 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
304 				   &target->thread.fpu,
305 				   0, sizeof(elf_fpregset_t));
306 	/* XXX fcr31  */
307 }
308 
309 static int fpr_set(struct task_struct *target,
310 		   const struct user_regset *regset,
311 		   unsigned int pos, unsigned int count,
312 		   const void *kbuf, const void __user *ubuf)
313 {
314 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
315 				  &target->thread.fpu,
316 				  0, sizeof(elf_fpregset_t));
317 	/* XXX fcr31  */
318 }
319 
320 enum mips_regset {
321 	REGSET_GPR,
322 	REGSET_FPR,
323 };
324 
325 static const struct user_regset mips_regsets[] = {
326 	[REGSET_GPR] = {
327 		.core_note_type	= NT_PRSTATUS,
328 		.n		= ELF_NGREG,
329 		.size		= sizeof(unsigned int),
330 		.align		= sizeof(unsigned int),
331 		.get		= gpr_get,
332 		.set		= gpr_set,
333 	},
334 	[REGSET_FPR] = {
335 		.core_note_type	= NT_PRFPREG,
336 		.n		= ELF_NFPREG,
337 		.size		= sizeof(elf_fpreg_t),
338 		.align		= sizeof(elf_fpreg_t),
339 		.get		= fpr_get,
340 		.set		= fpr_set,
341 	},
342 };
343 
344 static const struct user_regset_view user_mips_view = {
345 	.name		= "mips",
346 	.e_machine	= ELF_ARCH,
347 	.ei_osabi	= ELF_OSABI,
348 	.regsets	= mips_regsets,
349 	.n		= ARRAY_SIZE(mips_regsets),
350 };
351 
352 static const struct user_regset mips64_regsets[] = {
353 	[REGSET_GPR] = {
354 		.core_note_type	= NT_PRSTATUS,
355 		.n		= ELF_NGREG,
356 		.size		= sizeof(unsigned long),
357 		.align		= sizeof(unsigned long),
358 		.get		= gpr_get,
359 		.set		= gpr_set,
360 	},
361 	[REGSET_FPR] = {
362 		.core_note_type	= NT_PRFPREG,
363 		.n		= ELF_NFPREG,
364 		.size		= sizeof(elf_fpreg_t),
365 		.align		= sizeof(elf_fpreg_t),
366 		.get		= fpr_get,
367 		.set		= fpr_set,
368 	},
369 };
370 
371 static const struct user_regset_view user_mips64_view = {
372 	.name		= "mips",
373 	.e_machine	= ELF_ARCH,
374 	.ei_osabi	= ELF_OSABI,
375 	.regsets	= mips64_regsets,
376 	.n		= ARRAY_SIZE(mips_regsets),
377 };
378 
379 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
380 {
381 #ifdef CONFIG_32BIT
382 	return &user_mips_view;
383 #endif
384 
385 #ifdef CONFIG_MIPS32_O32
386 		if (test_thread_flag(TIF_32BIT_REGS))
387 			return &user_mips_view;
388 #endif
389 
390 	return &user_mips64_view;
391 }
392 
393 long arch_ptrace(struct task_struct *child, long request,
394 		 unsigned long addr, unsigned long data)
395 {
396 	int ret;
397 	void __user *addrp = (void __user *) addr;
398 	void __user *datavp = (void __user *) data;
399 	unsigned long __user *datalp = (void __user *) data;
400 
401 	switch (request) {
402 	/* when I and D space are separate, these will need to be fixed. */
403 	case PTRACE_PEEKTEXT: /* read word at location addr. */
404 	case PTRACE_PEEKDATA:
405 		ret = generic_ptrace_peekdata(child, addr, data);
406 		break;
407 
408 	/* Read the word at location addr in the USER area. */
409 	case PTRACE_PEEKUSR: {
410 		struct pt_regs *regs;
411 		unsigned long tmp = 0;
412 
413 		regs = task_pt_regs(child);
414 		ret = 0;  /* Default return value. */
415 
416 		switch (addr) {
417 		case 0 ... 31:
418 			tmp = regs->regs[addr];
419 			break;
420 		case FPR_BASE ... FPR_BASE + 31:
421 			if (tsk_used_math(child)) {
422 				fpureg_t *fregs = get_fpu_regs(child);
423 
424 #ifdef CONFIG_32BIT
425 				/*
426 				 * The odd registers are actually the high
427 				 * order bits of the values stored in the even
428 				 * registers - unless we're using r2k_switch.S.
429 				 */
430 				if (addr & 1)
431 					tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
432 				else
433 					tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
434 #endif
435 #ifdef CONFIG_64BIT
436 				tmp = fregs[addr - FPR_BASE];
437 #endif
438 			} else {
439 				tmp = -1;	/* FP not yet used  */
440 			}
441 			break;
442 		case PC:
443 			tmp = regs->cp0_epc;
444 			break;
445 		case CAUSE:
446 			tmp = regs->cp0_cause;
447 			break;
448 		case BADVADDR:
449 			tmp = regs->cp0_badvaddr;
450 			break;
451 		case MMHI:
452 			tmp = regs->hi;
453 			break;
454 		case MMLO:
455 			tmp = regs->lo;
456 			break;
457 #ifdef CONFIG_CPU_HAS_SMARTMIPS
458 		case ACX:
459 			tmp = regs->acx;
460 			break;
461 #endif
462 		case FPC_CSR:
463 			tmp = child->thread.fpu.fcr31;
464 			break;
465 		case FPC_EIR: { /* implementation / version register */
466 			unsigned int flags;
467 #ifdef CONFIG_MIPS_MT_SMTC
468 			unsigned long irqflags;
469 			unsigned int mtflags;
470 #endif /* CONFIG_MIPS_MT_SMTC */
471 
472 			preempt_disable();
473 			if (!cpu_has_fpu) {
474 				preempt_enable();
475 				break;
476 			}
477 
478 #ifdef CONFIG_MIPS_MT_SMTC
479 			/* Read-modify-write of Status must be atomic */
480 			local_irq_save(irqflags);
481 			mtflags = dmt();
482 #endif /* CONFIG_MIPS_MT_SMTC */
483 			if (cpu_has_mipsmt) {
484 				unsigned int vpflags = dvpe();
485 				flags = read_c0_status();
486 				__enable_fpu();
487 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
488 				write_c0_status(flags);
489 				evpe(vpflags);
490 			} else {
491 				flags = read_c0_status();
492 				__enable_fpu();
493 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
494 				write_c0_status(flags);
495 			}
496 #ifdef CONFIG_MIPS_MT_SMTC
497 			emt(mtflags);
498 			local_irq_restore(irqflags);
499 #endif /* CONFIG_MIPS_MT_SMTC */
500 			preempt_enable();
501 			break;
502 		}
503 		case DSP_BASE ... DSP_BASE + 5: {
504 			dspreg_t *dregs;
505 
506 			if (!cpu_has_dsp) {
507 				tmp = 0;
508 				ret = -EIO;
509 				goto out;
510 			}
511 			dregs = __get_dsp_regs(child);
512 			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
513 			break;
514 		}
515 		case DSP_CONTROL:
516 			if (!cpu_has_dsp) {
517 				tmp = 0;
518 				ret = -EIO;
519 				goto out;
520 			}
521 			tmp = child->thread.dsp.dspcontrol;
522 			break;
523 		default:
524 			tmp = 0;
525 			ret = -EIO;
526 			goto out;
527 		}
528 		ret = put_user(tmp, datalp);
529 		break;
530 	}
531 
532 	/* when I and D space are separate, this will have to be fixed. */
533 	case PTRACE_POKETEXT: /* write the word at location addr. */
534 	case PTRACE_POKEDATA:
535 		ret = generic_ptrace_pokedata(child, addr, data);
536 		break;
537 
538 	case PTRACE_POKEUSR: {
539 		struct pt_regs *regs;
540 		ret = 0;
541 		regs = task_pt_regs(child);
542 
543 		switch (addr) {
544 		case 0 ... 31:
545 			regs->regs[addr] = data;
546 			break;
547 		case FPR_BASE ... FPR_BASE + 31: {
548 			fpureg_t *fregs = get_fpu_regs(child);
549 
550 			if (!tsk_used_math(child)) {
551 				/* FP not yet used  */
552 				memset(&child->thread.fpu, ~0,
553 				       sizeof(child->thread.fpu));
554 				child->thread.fpu.fcr31 = 0;
555 			}
556 #ifdef CONFIG_32BIT
557 			/*
558 			 * The odd registers are actually the high order bits
559 			 * of the values stored in the even registers - unless
560 			 * we're using r2k_switch.S.
561 			 */
562 			if (addr & 1) {
563 				fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
564 				fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
565 			} else {
566 				fregs[addr - FPR_BASE] &= ~0xffffffffLL;
567 				fregs[addr - FPR_BASE] |= data;
568 			}
569 #endif
570 #ifdef CONFIG_64BIT
571 			fregs[addr - FPR_BASE] = data;
572 #endif
573 			break;
574 		}
575 		case PC:
576 			regs->cp0_epc = data;
577 			break;
578 		case MMHI:
579 			regs->hi = data;
580 			break;
581 		case MMLO:
582 			regs->lo = data;
583 			break;
584 #ifdef CONFIG_CPU_HAS_SMARTMIPS
585 		case ACX:
586 			regs->acx = data;
587 			break;
588 #endif
589 		case FPC_CSR:
590 			child->thread.fpu.fcr31 = data;
591 			break;
592 		case DSP_BASE ... DSP_BASE + 5: {
593 			dspreg_t *dregs;
594 
595 			if (!cpu_has_dsp) {
596 				ret = -EIO;
597 				break;
598 			}
599 
600 			dregs = __get_dsp_regs(child);
601 			dregs[addr - DSP_BASE] = data;
602 			break;
603 		}
604 		case DSP_CONTROL:
605 			if (!cpu_has_dsp) {
606 				ret = -EIO;
607 				break;
608 			}
609 			child->thread.dsp.dspcontrol = data;
610 			break;
611 		default:
612 			/* The rest are not allowed. */
613 			ret = -EIO;
614 			break;
615 		}
616 		break;
617 		}
618 
619 	case PTRACE_GETREGS:
620 		ret = ptrace_getregs(child, datavp);
621 		break;
622 
623 	case PTRACE_SETREGS:
624 		ret = ptrace_setregs(child, datavp);
625 		break;
626 
627 	case PTRACE_GETFPREGS:
628 		ret = ptrace_getfpregs(child, datavp);
629 		break;
630 
631 	case PTRACE_SETFPREGS:
632 		ret = ptrace_setfpregs(child, datavp);
633 		break;
634 
635 	case PTRACE_GET_THREAD_AREA:
636 		ret = put_user(task_thread_info(child)->tp_value, datalp);
637 		break;
638 
639 	case PTRACE_GET_WATCH_REGS:
640 		ret = ptrace_get_watch_regs(child, addrp);
641 		break;
642 
643 	case PTRACE_SET_WATCH_REGS:
644 		ret = ptrace_set_watch_regs(child, addrp);
645 		break;
646 
647 	default:
648 		ret = ptrace_request(child, request, addr, data);
649 		break;
650 	}
651  out:
652 	return ret;
653 }
654 
655 /*
656  * Notification of system call entry/exit
657  * - triggered by current->work.syscall_trace
658  */
659 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
660 {
661 	long ret = 0;
662 	user_exit();
663 
664 	/* do the secure computing check first */
665 	secure_computing_strict(regs->regs[2]);
666 
667 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
668 	    tracehook_report_syscall_entry(regs))
669 		ret = -1;
670 
671 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
672 		trace_sys_enter(regs, regs->regs[2]);
673 
674 	audit_syscall_entry(__syscall_get_arch(),
675 			    regs->regs[2],
676 			    regs->regs[4], regs->regs[5],
677 			    regs->regs[6], regs->regs[7]);
678 }
679 
680 /*
681  * Notification of system call entry/exit
682  * - triggered by current->work.syscall_trace
683  */
684 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
685 {
686         /*
687 	 * We may come here right after calling schedule_user()
688 	 * or do_notify_resume(), in which case we can be in RCU
689 	 * user mode.
690 	 */
691 	user_exit();
692 
693 	audit_syscall_exit(regs);
694 
695 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
696 		trace_sys_exit(regs, regs->regs[2]);
697 
698 	if (test_thread_flag(TIF_SYSCALL_TRACE))
699 		tracehook_report_syscall_exit(regs, 0);
700 
701 	user_enter();
702 }
703