xref: /openbmc/linux/arch/mips/kernel/ptrace.c (revision 1d27a0be)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 Ross Biro
7  * Copyright (C) Linus Torvalds
8  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9  * Copyright (C) 1996 David S. Miller
10  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11  * Copyright (C) 1999 MIPS Technologies, Inc.
12  * Copyright (C) 2000 Ulf Carlsson
13  *
14  * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15  * binaries.
16  */
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/mm.h>
24 #include <linux/errno.h>
25 #include <linux/ptrace.h>
26 #include <linux/regset.h>
27 #include <linux/smp.h>
28 #include <linux/security.h>
29 #include <linux/stddef.h>
30 #include <linux/tracehook.h>
31 #include <linux/audit.h>
32 #include <linux/seccomp.h>
33 #include <linux/ftrace.h>
34 
35 #include <asm/byteorder.h>
36 #include <asm/cpu.h>
37 #include <asm/cpu-info.h>
38 #include <asm/dsp.h>
39 #include <asm/fpu.h>
40 #include <asm/mipsregs.h>
41 #include <asm/mipsmtregs.h>
42 #include <asm/page.h>
43 #include <asm/processor.h>
44 #include <asm/syscall.h>
45 #include <linux/uaccess.h>
46 #include <asm/bootinfo.h>
47 #include <asm/reg.h>
48 
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/syscalls.h>
51 
52 /*
53  * Called by kernel/ptrace.c when detaching..
54  *
55  * Make sure single step bits etc are not set.
56  */
57 void ptrace_disable(struct task_struct *child)
58 {
59 	/* Don't load the watchpoint registers for the ex-child. */
60 	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
61 }
62 
63 /*
64  * Read a general register set.	 We always use the 64-bit format, even
65  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
66  * Registers are sign extended to fill the available space.
67  */
68 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
69 {
70 	struct pt_regs *regs;
71 	int i;
72 
73 	if (!access_ok(data, 38 * 8))
74 		return -EIO;
75 
76 	regs = task_pt_regs(child);
77 
78 	for (i = 0; i < 32; i++)
79 		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
80 	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
81 	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
82 	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
83 	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
84 	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
85 	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
86 
87 	return 0;
88 }
89 
90 /*
91  * Write a general register set.  As for PTRACE_GETREGS, we always use
92  * the 64-bit format.  On a 32-bit kernel only the lower order half
93  * (according to endianness) will be used.
94  */
95 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
96 {
97 	struct pt_regs *regs;
98 	int i;
99 
100 	if (!access_ok(data, 38 * 8))
101 		return -EIO;
102 
103 	regs = task_pt_regs(child);
104 
105 	for (i = 0; i < 32; i++)
106 		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
107 	__get_user(regs->lo, (__s64 __user *)&data->lo);
108 	__get_user(regs->hi, (__s64 __user *)&data->hi);
109 	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
110 
111 	/* badvaddr, status, and cause may not be written.  */
112 
113 	/* System call number may have been changed */
114 	mips_syscall_update_nr(child, regs);
115 
116 	return 0;
117 }
118 
119 int ptrace_get_watch_regs(struct task_struct *child,
120 			  struct pt_watch_regs __user *addr)
121 {
122 	enum pt_watch_style style;
123 	int i;
124 
125 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
126 		return -EIO;
127 	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
128 		return -EIO;
129 
130 #ifdef CONFIG_32BIT
131 	style = pt_watch_style_mips32;
132 #define WATCH_STYLE mips32
133 #else
134 	style = pt_watch_style_mips64;
135 #define WATCH_STYLE mips64
136 #endif
137 
138 	__put_user(style, &addr->style);
139 	__put_user(boot_cpu_data.watch_reg_use_cnt,
140 		   &addr->WATCH_STYLE.num_valid);
141 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
142 		__put_user(child->thread.watch.mips3264.watchlo[i],
143 			   &addr->WATCH_STYLE.watchlo[i]);
144 		__put_user(child->thread.watch.mips3264.watchhi[i] &
145 				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
146 			   &addr->WATCH_STYLE.watchhi[i]);
147 		__put_user(boot_cpu_data.watch_reg_masks[i],
148 			   &addr->WATCH_STYLE.watch_masks[i]);
149 	}
150 	for (; i < 8; i++) {
151 		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
152 		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
153 		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
154 	}
155 
156 	return 0;
157 }
158 
159 int ptrace_set_watch_regs(struct task_struct *child,
160 			  struct pt_watch_regs __user *addr)
161 {
162 	int i;
163 	int watch_active = 0;
164 	unsigned long lt[NUM_WATCH_REGS];
165 	u16 ht[NUM_WATCH_REGS];
166 
167 	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
168 		return -EIO;
169 	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
170 		return -EIO;
171 	/* Check the values. */
172 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
173 		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
174 #ifdef CONFIG_32BIT
175 		if (lt[i] & __UA_LIMIT)
176 			return -EINVAL;
177 #else
178 		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
179 			if (lt[i] & 0xffffffff80000000UL)
180 				return -EINVAL;
181 		} else {
182 			if (lt[i] & __UA_LIMIT)
183 				return -EINVAL;
184 		}
185 #endif
186 		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
187 		if (ht[i] & ~MIPS_WATCHHI_MASK)
188 			return -EINVAL;
189 	}
190 	/* Install them. */
191 	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
192 		if (lt[i] & MIPS_WATCHLO_IRW)
193 			watch_active = 1;
194 		child->thread.watch.mips3264.watchlo[i] = lt[i];
195 		/* Set the G bit. */
196 		child->thread.watch.mips3264.watchhi[i] = ht[i];
197 	}
198 
199 	if (watch_active)
200 		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
201 	else
202 		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
203 
204 	return 0;
205 }
206 
207 /* regset get/set implementations */
208 
209 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
210 
211 static int gpr32_get(struct task_struct *target,
212 		     const struct user_regset *regset,
213 		     unsigned int pos, unsigned int count,
214 		     void *kbuf, void __user *ubuf)
215 {
216 	struct pt_regs *regs = task_pt_regs(target);
217 	u32 uregs[ELF_NGREG] = {};
218 
219 	mips_dump_regs32(uregs, regs);
220 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
221 				   sizeof(uregs));
222 }
223 
224 static int gpr32_set(struct task_struct *target,
225 		     const struct user_regset *regset,
226 		     unsigned int pos, unsigned int count,
227 		     const void *kbuf, const void __user *ubuf)
228 {
229 	struct pt_regs *regs = task_pt_regs(target);
230 	u32 uregs[ELF_NGREG];
231 	unsigned start, num_regs, i;
232 	int err;
233 
234 	start = pos / sizeof(u32);
235 	num_regs = count / sizeof(u32);
236 
237 	if (start + num_regs > ELF_NGREG)
238 		return -EIO;
239 
240 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
241 				 sizeof(uregs));
242 	if (err)
243 		return err;
244 
245 	for (i = start; i < num_regs; i++) {
246 		/*
247 		 * Cast all values to signed here so that if this is a 64-bit
248 		 * kernel, the supplied 32-bit values will be sign extended.
249 		 */
250 		switch (i) {
251 		case MIPS32_EF_R1 ... MIPS32_EF_R25:
252 			/* k0/k1 are ignored. */
253 		case MIPS32_EF_R28 ... MIPS32_EF_R31:
254 			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
255 			break;
256 		case MIPS32_EF_LO:
257 			regs->lo = (s32)uregs[i];
258 			break;
259 		case MIPS32_EF_HI:
260 			regs->hi = (s32)uregs[i];
261 			break;
262 		case MIPS32_EF_CP0_EPC:
263 			regs->cp0_epc = (s32)uregs[i];
264 			break;
265 		}
266 	}
267 
268 	/* System call number may have been changed */
269 	mips_syscall_update_nr(target, regs);
270 
271 	return 0;
272 }
273 
274 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
275 
276 #ifdef CONFIG_64BIT
277 
278 static int gpr64_get(struct task_struct *target,
279 		     const struct user_regset *regset,
280 		     unsigned int pos, unsigned int count,
281 		     void *kbuf, void __user *ubuf)
282 {
283 	struct pt_regs *regs = task_pt_regs(target);
284 	u64 uregs[ELF_NGREG] = {};
285 
286 	mips_dump_regs64(uregs, regs);
287 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
288 				   sizeof(uregs));
289 }
290 
291 static int gpr64_set(struct task_struct *target,
292 		     const struct user_regset *regset,
293 		     unsigned int pos, unsigned int count,
294 		     const void *kbuf, const void __user *ubuf)
295 {
296 	struct pt_regs *regs = task_pt_regs(target);
297 	u64 uregs[ELF_NGREG];
298 	unsigned start, num_regs, i;
299 	int err;
300 
301 	start = pos / sizeof(u64);
302 	num_regs = count / sizeof(u64);
303 
304 	if (start + num_regs > ELF_NGREG)
305 		return -EIO;
306 
307 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
308 				 sizeof(uregs));
309 	if (err)
310 		return err;
311 
312 	for (i = start; i < num_regs; i++) {
313 		switch (i) {
314 		case MIPS64_EF_R1 ... MIPS64_EF_R25:
315 			/* k0/k1 are ignored. */
316 		case MIPS64_EF_R28 ... MIPS64_EF_R31:
317 			regs->regs[i - MIPS64_EF_R0] = uregs[i];
318 			break;
319 		case MIPS64_EF_LO:
320 			regs->lo = uregs[i];
321 			break;
322 		case MIPS64_EF_HI:
323 			regs->hi = uregs[i];
324 			break;
325 		case MIPS64_EF_CP0_EPC:
326 			regs->cp0_epc = uregs[i];
327 			break;
328 		}
329 	}
330 
331 	/* System call number may have been changed */
332 	mips_syscall_update_nr(target, regs);
333 
334 	return 0;
335 }
336 
337 #endif /* CONFIG_64BIT */
338 
339 
340 #ifdef CONFIG_MIPS_FP_SUPPORT
341 
342 /*
343  * Poke at FCSR according to its mask.  Set the Cause bits even
344  * if a corresponding Enable bit is set.  This will be noticed at
345  * the time the thread is switched to and SIGFPE thrown accordingly.
346  */
347 static void ptrace_setfcr31(struct task_struct *child, u32 value)
348 {
349 	u32 fcr31;
350 	u32 mask;
351 
352 	fcr31 = child->thread.fpu.fcr31;
353 	mask = boot_cpu_data.fpu_msk31;
354 	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
355 }
356 
357 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
358 {
359 	int i;
360 
361 	if (!access_ok(data, 33 * 8))
362 		return -EIO;
363 
364 	if (tsk_used_math(child)) {
365 		union fpureg *fregs = get_fpu_regs(child);
366 		for (i = 0; i < 32; i++)
367 			__put_user(get_fpr64(&fregs[i], 0),
368 				   i + (__u64 __user *)data);
369 	} else {
370 		for (i = 0; i < 32; i++)
371 			__put_user((__u64) -1, i + (__u64 __user *) data);
372 	}
373 
374 	__put_user(child->thread.fpu.fcr31, data + 64);
375 	__put_user(boot_cpu_data.fpu_id, data + 65);
376 
377 	return 0;
378 }
379 
380 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
381 {
382 	union fpureg *fregs;
383 	u64 fpr_val;
384 	u32 value;
385 	int i;
386 
387 	if (!access_ok(data, 33 * 8))
388 		return -EIO;
389 
390 	init_fp_ctx(child);
391 	fregs = get_fpu_regs(child);
392 
393 	for (i = 0; i < 32; i++) {
394 		__get_user(fpr_val, i + (__u64 __user *)data);
395 		set_fpr64(&fregs[i], 0, fpr_val);
396 	}
397 
398 	__get_user(value, data + 64);
399 	ptrace_setfcr31(child, value);
400 
401 	/* FIR may not be written.  */
402 
403 	return 0;
404 }
405 
406 /*
407  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
408  * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
409  * correspond 1:1 to buffer slots.  Only general registers are copied.
410  */
411 static int fpr_get_fpa(struct task_struct *target,
412 		       unsigned int *pos, unsigned int *count,
413 		       void **kbuf, void __user **ubuf)
414 {
415 	return user_regset_copyout(pos, count, kbuf, ubuf,
416 				   &target->thread.fpu,
417 				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
418 }
419 
420 /*
421  * Copy the floating-point context to the supplied NT_PRFPREG buffer,
422  * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
423  * general register slots are copied to buffer slots.  Only general
424  * registers are copied.
425  */
426 static int fpr_get_msa(struct task_struct *target,
427 		       unsigned int *pos, unsigned int *count,
428 		       void **kbuf, void __user **ubuf)
429 {
430 	unsigned int i;
431 	u64 fpr_val;
432 	int err;
433 
434 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
435 	for (i = 0; i < NUM_FPU_REGS; i++) {
436 		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
437 		err = user_regset_copyout(pos, count, kbuf, ubuf,
438 					  &fpr_val, i * sizeof(elf_fpreg_t),
439 					  (i + 1) * sizeof(elf_fpreg_t));
440 		if (err)
441 			return err;
442 	}
443 
444 	return 0;
445 }
446 
447 /*
448  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
449  * Choose the appropriate helper for general registers, and then copy
450  * the FCSR and FIR registers separately.
451  */
452 static int fpr_get(struct task_struct *target,
453 		   const struct user_regset *regset,
454 		   unsigned int pos, unsigned int count,
455 		   void *kbuf, void __user *ubuf)
456 {
457 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
458 	const int fir_pos = fcr31_pos + sizeof(u32);
459 	int err;
460 
461 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
462 		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
463 	else
464 		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
465 	if (err)
466 		return err;
467 
468 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
469 				  &target->thread.fpu.fcr31,
470 				  fcr31_pos, fcr31_pos + sizeof(u32));
471 	if (err)
472 		return err;
473 
474 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
475 				  &boot_cpu_data.fpu_id,
476 				  fir_pos, fir_pos + sizeof(u32));
477 
478 	return err;
479 }
480 
481 /*
482  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
483  * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
484  * context's general register slots.  Only general registers are copied.
485  */
486 static int fpr_set_fpa(struct task_struct *target,
487 		       unsigned int *pos, unsigned int *count,
488 		       const void **kbuf, const void __user **ubuf)
489 {
490 	return user_regset_copyin(pos, count, kbuf, ubuf,
491 				  &target->thread.fpu,
492 				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
493 }
494 
495 /*
496  * Copy the supplied NT_PRFPREG buffer to the floating-point context,
497  * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
498  * bits only of FP context's general register slots.  Only general
499  * registers are copied.
500  */
501 static int fpr_set_msa(struct task_struct *target,
502 		       unsigned int *pos, unsigned int *count,
503 		       const void **kbuf, const void __user **ubuf)
504 {
505 	unsigned int i;
506 	u64 fpr_val;
507 	int err;
508 
509 	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
510 	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
511 		err = user_regset_copyin(pos, count, kbuf, ubuf,
512 					 &fpr_val, i * sizeof(elf_fpreg_t),
513 					 (i + 1) * sizeof(elf_fpreg_t));
514 		if (err)
515 			return err;
516 		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
517 	}
518 
519 	return 0;
520 }
521 
522 /*
523  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
524  * Choose the appropriate helper for general registers, and then copy
525  * the FCSR register separately.  Ignore the incoming FIR register
526  * contents though, as the register is read-only.
527  *
528  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
529  * which is supposed to have been guaranteed by the kernel before
530  * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
531  * so that we can safely avoid preinitializing temporaries for
532  * partial register writes.
533  */
534 static int fpr_set(struct task_struct *target,
535 		   const struct user_regset *regset,
536 		   unsigned int pos, unsigned int count,
537 		   const void *kbuf, const void __user *ubuf)
538 {
539 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
540 	const int fir_pos = fcr31_pos + sizeof(u32);
541 	u32 fcr31;
542 	int err;
543 
544 	BUG_ON(count % sizeof(elf_fpreg_t));
545 
546 	if (pos + count > sizeof(elf_fpregset_t))
547 		return -EIO;
548 
549 	init_fp_ctx(target);
550 
551 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
552 		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
553 	else
554 		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
555 	if (err)
556 		return err;
557 
558 	if (count > 0) {
559 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
560 					 &fcr31,
561 					 fcr31_pos, fcr31_pos + sizeof(u32));
562 		if (err)
563 			return err;
564 
565 		ptrace_setfcr31(target, fcr31);
566 	}
567 
568 	if (count > 0)
569 		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
570 						fir_pos,
571 						fir_pos + sizeof(u32));
572 
573 	return err;
574 }
575 
576 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
577 static int fp_mode_get(struct task_struct *target,
578 		       const struct user_regset *regset,
579 		       unsigned int pos, unsigned int count,
580 		       void *kbuf, void __user *ubuf)
581 {
582 	int fp_mode;
583 
584 	fp_mode = mips_get_process_fp_mode(target);
585 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
586 				   sizeof(fp_mode));
587 }
588 
589 /*
590  * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
591  *
592  * We optimize for the case where `count % sizeof(int) == 0', which
593  * is supposed to have been guaranteed by the kernel before calling
594  * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
595  * that we can safely avoid preinitializing temporaries for partial
596  * mode writes.
597  */
598 static int fp_mode_set(struct task_struct *target,
599 		       const struct user_regset *regset,
600 		       unsigned int pos, unsigned int count,
601 		       const void *kbuf, const void __user *ubuf)
602 {
603 	int fp_mode;
604 	int err;
605 
606 	BUG_ON(count % sizeof(int));
607 
608 	if (pos + count > sizeof(fp_mode))
609 		return -EIO;
610 
611 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
612 				 sizeof(fp_mode));
613 	if (err)
614 		return err;
615 
616 	if (count > 0)
617 		err = mips_set_process_fp_mode(target, fp_mode);
618 
619 	return err;
620 }
621 
622 #endif /* CONFIG_MIPS_FP_SUPPORT */
623 
624 #ifdef CONFIG_CPU_HAS_MSA
625 
626 struct msa_control_regs {
627 	unsigned int fir;
628 	unsigned int fcsr;
629 	unsigned int msair;
630 	unsigned int msacsr;
631 };
632 
633 static int copy_pad_fprs(struct task_struct *target,
634 			 const struct user_regset *regset,
635 			 unsigned int *ppos, unsigned int *pcount,
636 			 void **pkbuf, void __user **pubuf,
637 			 unsigned int live_sz)
638 {
639 	int i, j, start, start_pad, err;
640 	unsigned long long fill = ~0ull;
641 	unsigned int cp_sz, pad_sz;
642 
643 	cp_sz = min(regset->size, live_sz);
644 	pad_sz = regset->size - cp_sz;
645 	WARN_ON(pad_sz % sizeof(fill));
646 
647 	i = start = err = 0;
648 	for (; i < NUM_FPU_REGS; i++, start += regset->size) {
649 		err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
650 					   &target->thread.fpu.fpr[i],
651 					   start, start + cp_sz);
652 
653 		start_pad = start + cp_sz;
654 		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
655 			err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
656 						   &fill, start_pad,
657 						   start_pad + sizeof(fill));
658 			start_pad += sizeof(fill);
659 		}
660 	}
661 
662 	return err;
663 }
664 
665 static int msa_get(struct task_struct *target,
666 		   const struct user_regset *regset,
667 		   unsigned int pos, unsigned int count,
668 		   void *kbuf, void __user *ubuf)
669 {
670 	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
671 	const struct msa_control_regs ctrl_regs = {
672 		.fir = boot_cpu_data.fpu_id,
673 		.fcsr = target->thread.fpu.fcr31,
674 		.msair = boot_cpu_data.msa_id,
675 		.msacsr = target->thread.fpu.msacsr,
676 	};
677 	int err;
678 
679 	if (!tsk_used_math(target)) {
680 		/* The task hasn't used FP or MSA, fill with 0xff */
681 		err = copy_pad_fprs(target, regset, &pos, &count,
682 				    &kbuf, &ubuf, 0);
683 	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
684 		/* Copy scalar FP context, fill the rest with 0xff */
685 		err = copy_pad_fprs(target, regset, &pos, &count,
686 				    &kbuf, &ubuf, 8);
687 	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
688 		/* Trivially copy the vector registers */
689 		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
690 					  &target->thread.fpu.fpr,
691 					  0, wr_size);
692 	} else {
693 		/* Copy as much context as possible, fill the rest with 0xff */
694 		err = copy_pad_fprs(target, regset, &pos, &count,
695 				    &kbuf, &ubuf,
696 				    sizeof(target->thread.fpu.fpr[0]));
697 	}
698 
699 	err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
700 				   &ctrl_regs, wr_size,
701 				   wr_size + sizeof(ctrl_regs));
702 	return err;
703 }
704 
705 static int msa_set(struct task_struct *target,
706 		   const struct user_regset *regset,
707 		   unsigned int pos, unsigned int count,
708 		   const void *kbuf, const void __user *ubuf)
709 {
710 	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
711 	struct msa_control_regs ctrl_regs;
712 	unsigned int cp_sz;
713 	int i, err, start;
714 
715 	init_fp_ctx(target);
716 
717 	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
718 		/* Trivially copy the vector registers */
719 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
720 					 &target->thread.fpu.fpr,
721 					 0, wr_size);
722 	} else {
723 		/* Copy as much context as possible */
724 		cp_sz = min_t(unsigned int, regset->size,
725 			      sizeof(target->thread.fpu.fpr[0]));
726 
727 		i = start = err = 0;
728 		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
729 			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
730 						  &target->thread.fpu.fpr[i],
731 						  start, start + cp_sz);
732 		}
733 	}
734 
735 	if (!err)
736 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
737 					 wr_size, wr_size + sizeof(ctrl_regs));
738 	if (!err) {
739 		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
740 		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
741 	}
742 
743 	return err;
744 }
745 
746 #endif /* CONFIG_CPU_HAS_MSA */
747 
748 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
749 
750 /*
751  * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
752  */
753 static int dsp32_get(struct task_struct *target,
754 		     const struct user_regset *regset,
755 		     unsigned int pos, unsigned int count,
756 		     void *kbuf, void __user *ubuf)
757 {
758 	unsigned int start, num_regs, i;
759 	u32 dspregs[NUM_DSP_REGS + 1];
760 
761 	BUG_ON(count % sizeof(u32));
762 
763 	if (!cpu_has_dsp)
764 		return -EIO;
765 
766 	start = pos / sizeof(u32);
767 	num_regs = count / sizeof(u32);
768 
769 	if (start + num_regs > NUM_DSP_REGS + 1)
770 		return -EIO;
771 
772 	for (i = start; i < num_regs; i++)
773 		switch (i) {
774 		case 0 ... NUM_DSP_REGS - 1:
775 			dspregs[i] = target->thread.dsp.dspr[i];
776 			break;
777 		case NUM_DSP_REGS:
778 			dspregs[i] = target->thread.dsp.dspcontrol;
779 			break;
780 		}
781 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
782 				   sizeof(dspregs));
783 }
784 
785 /*
786  * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
787  */
788 static int dsp32_set(struct task_struct *target,
789 		     const struct user_regset *regset,
790 		     unsigned int pos, unsigned int count,
791 		     const void *kbuf, const void __user *ubuf)
792 {
793 	unsigned int start, num_regs, i;
794 	u32 dspregs[NUM_DSP_REGS + 1];
795 	int err;
796 
797 	BUG_ON(count % sizeof(u32));
798 
799 	if (!cpu_has_dsp)
800 		return -EIO;
801 
802 	start = pos / sizeof(u32);
803 	num_regs = count / sizeof(u32);
804 
805 	if (start + num_regs > NUM_DSP_REGS + 1)
806 		return -EIO;
807 
808 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
809 				 sizeof(dspregs));
810 	if (err)
811 		return err;
812 
813 	for (i = start; i < num_regs; i++)
814 		switch (i) {
815 		case 0 ... NUM_DSP_REGS - 1:
816 			target->thread.dsp.dspr[i] = (s32)dspregs[i];
817 			break;
818 		case NUM_DSP_REGS:
819 			target->thread.dsp.dspcontrol = (s32)dspregs[i];
820 			break;
821 		}
822 
823 	return 0;
824 }
825 
826 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
827 
828 #ifdef CONFIG_64BIT
829 
830 /*
831  * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
832  */
833 static int dsp64_get(struct task_struct *target,
834 		     const struct user_regset *regset,
835 		     unsigned int pos, unsigned int count,
836 		     void *kbuf, void __user *ubuf)
837 {
838 	unsigned int start, num_regs, i;
839 	u64 dspregs[NUM_DSP_REGS + 1];
840 
841 	BUG_ON(count % sizeof(u64));
842 
843 	if (!cpu_has_dsp)
844 		return -EIO;
845 
846 	start = pos / sizeof(u64);
847 	num_regs = count / sizeof(u64);
848 
849 	if (start + num_regs > NUM_DSP_REGS + 1)
850 		return -EIO;
851 
852 	for (i = start; i < num_regs; i++)
853 		switch (i) {
854 		case 0 ... NUM_DSP_REGS - 1:
855 			dspregs[i] = target->thread.dsp.dspr[i];
856 			break;
857 		case NUM_DSP_REGS:
858 			dspregs[i] = target->thread.dsp.dspcontrol;
859 			break;
860 		}
861 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
862 				   sizeof(dspregs));
863 }
864 
865 /*
866  * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
867  */
868 static int dsp64_set(struct task_struct *target,
869 		     const struct user_regset *regset,
870 		     unsigned int pos, unsigned int count,
871 		     const void *kbuf, const void __user *ubuf)
872 {
873 	unsigned int start, num_regs, i;
874 	u64 dspregs[NUM_DSP_REGS + 1];
875 	int err;
876 
877 	BUG_ON(count % sizeof(u64));
878 
879 	if (!cpu_has_dsp)
880 		return -EIO;
881 
882 	start = pos / sizeof(u64);
883 	num_regs = count / sizeof(u64);
884 
885 	if (start + num_regs > NUM_DSP_REGS + 1)
886 		return -EIO;
887 
888 	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
889 				 sizeof(dspregs));
890 	if (err)
891 		return err;
892 
893 	for (i = start; i < num_regs; i++)
894 		switch (i) {
895 		case 0 ... NUM_DSP_REGS - 1:
896 			target->thread.dsp.dspr[i] = dspregs[i];
897 			break;
898 		case NUM_DSP_REGS:
899 			target->thread.dsp.dspcontrol = dspregs[i];
900 			break;
901 		}
902 
903 	return 0;
904 }
905 
906 #endif /* CONFIG_64BIT */
907 
908 /*
909  * Determine whether the DSP context is present.
910  */
911 static int dsp_active(struct task_struct *target,
912 		      const struct user_regset *regset)
913 {
914 	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
915 }
916 
917 enum mips_regset {
918 	REGSET_GPR,
919 	REGSET_DSP,
920 #ifdef CONFIG_MIPS_FP_SUPPORT
921 	REGSET_FPR,
922 	REGSET_FP_MODE,
923 #endif
924 #ifdef CONFIG_CPU_HAS_MSA
925 	REGSET_MSA,
926 #endif
927 };
928 
929 struct pt_regs_offset {
930 	const char *name;
931 	int offset;
932 };
933 
934 #define REG_OFFSET_NAME(reg, r) {					\
935 	.name = #reg,							\
936 	.offset = offsetof(struct pt_regs, r)				\
937 }
938 
939 #define REG_OFFSET_END {						\
940 	.name = NULL,							\
941 	.offset = 0							\
942 }
943 
944 static const struct pt_regs_offset regoffset_table[] = {
945 	REG_OFFSET_NAME(r0, regs[0]),
946 	REG_OFFSET_NAME(r1, regs[1]),
947 	REG_OFFSET_NAME(r2, regs[2]),
948 	REG_OFFSET_NAME(r3, regs[3]),
949 	REG_OFFSET_NAME(r4, regs[4]),
950 	REG_OFFSET_NAME(r5, regs[5]),
951 	REG_OFFSET_NAME(r6, regs[6]),
952 	REG_OFFSET_NAME(r7, regs[7]),
953 	REG_OFFSET_NAME(r8, regs[8]),
954 	REG_OFFSET_NAME(r9, regs[9]),
955 	REG_OFFSET_NAME(r10, regs[10]),
956 	REG_OFFSET_NAME(r11, regs[11]),
957 	REG_OFFSET_NAME(r12, regs[12]),
958 	REG_OFFSET_NAME(r13, regs[13]),
959 	REG_OFFSET_NAME(r14, regs[14]),
960 	REG_OFFSET_NAME(r15, regs[15]),
961 	REG_OFFSET_NAME(r16, regs[16]),
962 	REG_OFFSET_NAME(r17, regs[17]),
963 	REG_OFFSET_NAME(r18, regs[18]),
964 	REG_OFFSET_NAME(r19, regs[19]),
965 	REG_OFFSET_NAME(r20, regs[20]),
966 	REG_OFFSET_NAME(r21, regs[21]),
967 	REG_OFFSET_NAME(r22, regs[22]),
968 	REG_OFFSET_NAME(r23, regs[23]),
969 	REG_OFFSET_NAME(r24, regs[24]),
970 	REG_OFFSET_NAME(r25, regs[25]),
971 	REG_OFFSET_NAME(r26, regs[26]),
972 	REG_OFFSET_NAME(r27, regs[27]),
973 	REG_OFFSET_NAME(r28, regs[28]),
974 	REG_OFFSET_NAME(r29, regs[29]),
975 	REG_OFFSET_NAME(r30, regs[30]),
976 	REG_OFFSET_NAME(r31, regs[31]),
977 	REG_OFFSET_NAME(c0_status, cp0_status),
978 	REG_OFFSET_NAME(hi, hi),
979 	REG_OFFSET_NAME(lo, lo),
980 #ifdef CONFIG_CPU_HAS_SMARTMIPS
981 	REG_OFFSET_NAME(acx, acx),
982 #endif
983 	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
984 	REG_OFFSET_NAME(c0_cause, cp0_cause),
985 	REG_OFFSET_NAME(c0_epc, cp0_epc),
986 #ifdef CONFIG_CPU_CAVIUM_OCTEON
987 	REG_OFFSET_NAME(mpl0, mpl[0]),
988 	REG_OFFSET_NAME(mpl1, mpl[1]),
989 	REG_OFFSET_NAME(mpl2, mpl[2]),
990 	REG_OFFSET_NAME(mtp0, mtp[0]),
991 	REG_OFFSET_NAME(mtp1, mtp[1]),
992 	REG_OFFSET_NAME(mtp2, mtp[2]),
993 #endif
994 	REG_OFFSET_END,
995 };
996 
997 /**
998  * regs_query_register_offset() - query register offset from its name
999  * @name:       the name of a register
1000  *
1001  * regs_query_register_offset() returns the offset of a register in struct
1002  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
1003  */
1004 int regs_query_register_offset(const char *name)
1005 {
1006         const struct pt_regs_offset *roff;
1007         for (roff = regoffset_table; roff->name != NULL; roff++)
1008                 if (!strcmp(roff->name, name))
1009                         return roff->offset;
1010         return -EINVAL;
1011 }
1012 
1013 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
1014 
1015 static const struct user_regset mips_regsets[] = {
1016 	[REGSET_GPR] = {
1017 		.core_note_type	= NT_PRSTATUS,
1018 		.n		= ELF_NGREG,
1019 		.size		= sizeof(unsigned int),
1020 		.align		= sizeof(unsigned int),
1021 		.get		= gpr32_get,
1022 		.set		= gpr32_set,
1023 	},
1024 	[REGSET_DSP] = {
1025 		.core_note_type	= NT_MIPS_DSP,
1026 		.n		= NUM_DSP_REGS + 1,
1027 		.size		= sizeof(u32),
1028 		.align		= sizeof(u32),
1029 		.get		= dsp32_get,
1030 		.set		= dsp32_set,
1031 		.active		= dsp_active,
1032 	},
1033 #ifdef CONFIG_MIPS_FP_SUPPORT
1034 	[REGSET_FPR] = {
1035 		.core_note_type	= NT_PRFPREG,
1036 		.n		= ELF_NFPREG,
1037 		.size		= sizeof(elf_fpreg_t),
1038 		.align		= sizeof(elf_fpreg_t),
1039 		.get		= fpr_get,
1040 		.set		= fpr_set,
1041 	},
1042 	[REGSET_FP_MODE] = {
1043 		.core_note_type	= NT_MIPS_FP_MODE,
1044 		.n		= 1,
1045 		.size		= sizeof(int),
1046 		.align		= sizeof(int),
1047 		.get		= fp_mode_get,
1048 		.set		= fp_mode_set,
1049 	},
1050 #endif
1051 #ifdef CONFIG_CPU_HAS_MSA
1052 	[REGSET_MSA] = {
1053 		.core_note_type	= NT_MIPS_MSA,
1054 		.n		= NUM_FPU_REGS + 1,
1055 		.size		= 16,
1056 		.align		= 16,
1057 		.get		= msa_get,
1058 		.set		= msa_set,
1059 	},
1060 #endif
1061 };
1062 
1063 static const struct user_regset_view user_mips_view = {
1064 	.name		= "mips",
1065 	.e_machine	= ELF_ARCH,
1066 	.ei_osabi	= ELF_OSABI,
1067 	.regsets	= mips_regsets,
1068 	.n		= ARRAY_SIZE(mips_regsets),
1069 };
1070 
1071 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
1072 
1073 #ifdef CONFIG_64BIT
1074 
1075 static const struct user_regset mips64_regsets[] = {
1076 	[REGSET_GPR] = {
1077 		.core_note_type	= NT_PRSTATUS,
1078 		.n		= ELF_NGREG,
1079 		.size		= sizeof(unsigned long),
1080 		.align		= sizeof(unsigned long),
1081 		.get		= gpr64_get,
1082 		.set		= gpr64_set,
1083 	},
1084 	[REGSET_DSP] = {
1085 		.core_note_type	= NT_MIPS_DSP,
1086 		.n		= NUM_DSP_REGS + 1,
1087 		.size		= sizeof(u64),
1088 		.align		= sizeof(u64),
1089 		.get		= dsp64_get,
1090 		.set		= dsp64_set,
1091 		.active		= dsp_active,
1092 	},
1093 #ifdef CONFIG_MIPS_FP_SUPPORT
1094 	[REGSET_FP_MODE] = {
1095 		.core_note_type	= NT_MIPS_FP_MODE,
1096 		.n		= 1,
1097 		.size		= sizeof(int),
1098 		.align		= sizeof(int),
1099 		.get		= fp_mode_get,
1100 		.set		= fp_mode_set,
1101 	},
1102 	[REGSET_FPR] = {
1103 		.core_note_type	= NT_PRFPREG,
1104 		.n		= ELF_NFPREG,
1105 		.size		= sizeof(elf_fpreg_t),
1106 		.align		= sizeof(elf_fpreg_t),
1107 		.get		= fpr_get,
1108 		.set		= fpr_set,
1109 	},
1110 #endif
1111 #ifdef CONFIG_CPU_HAS_MSA
1112 	[REGSET_MSA] = {
1113 		.core_note_type	= NT_MIPS_MSA,
1114 		.n		= NUM_FPU_REGS + 1,
1115 		.size		= 16,
1116 		.align		= 16,
1117 		.get		= msa_get,
1118 		.set		= msa_set,
1119 	},
1120 #endif
1121 };
1122 
1123 static const struct user_regset_view user_mips64_view = {
1124 	.name		= "mips64",
1125 	.e_machine	= ELF_ARCH,
1126 	.ei_osabi	= ELF_OSABI,
1127 	.regsets	= mips64_regsets,
1128 	.n		= ARRAY_SIZE(mips64_regsets),
1129 };
1130 
1131 #ifdef CONFIG_MIPS32_N32
1132 
1133 static const struct user_regset_view user_mipsn32_view = {
1134 	.name		= "mipsn32",
1135 	.e_flags	= EF_MIPS_ABI2,
1136 	.e_machine	= ELF_ARCH,
1137 	.ei_osabi	= ELF_OSABI,
1138 	.regsets	= mips64_regsets,
1139 	.n		= ARRAY_SIZE(mips64_regsets),
1140 };
1141 
1142 #endif /* CONFIG_MIPS32_N32 */
1143 
1144 #endif /* CONFIG_64BIT */
1145 
1146 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1147 {
1148 #ifdef CONFIG_32BIT
1149 	return &user_mips_view;
1150 #else
1151 #ifdef CONFIG_MIPS32_O32
1152 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1153 		return &user_mips_view;
1154 #endif
1155 #ifdef CONFIG_MIPS32_N32
1156 	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1157 		return &user_mipsn32_view;
1158 #endif
1159 	return &user_mips64_view;
1160 #endif
1161 }
1162 
1163 long arch_ptrace(struct task_struct *child, long request,
1164 		 unsigned long addr, unsigned long data)
1165 {
1166 	int ret;
1167 	void __user *addrp = (void __user *) addr;
1168 	void __user *datavp = (void __user *) data;
1169 	unsigned long __user *datalp = (void __user *) data;
1170 
1171 	switch (request) {
1172 	/* when I and D space are separate, these will need to be fixed. */
1173 	case PTRACE_PEEKTEXT: /* read word at location addr. */
1174 	case PTRACE_PEEKDATA:
1175 		ret = generic_ptrace_peekdata(child, addr, data);
1176 		break;
1177 
1178 	/* Read the word at location addr in the USER area. */
1179 	case PTRACE_PEEKUSR: {
1180 		struct pt_regs *regs;
1181 		unsigned long tmp = 0;
1182 
1183 		regs = task_pt_regs(child);
1184 		ret = 0;  /* Default return value. */
1185 
1186 		switch (addr) {
1187 		case 0 ... 31:
1188 			tmp = regs->regs[addr];
1189 			break;
1190 #ifdef CONFIG_MIPS_FP_SUPPORT
1191 		case FPR_BASE ... FPR_BASE + 31: {
1192 			union fpureg *fregs;
1193 
1194 			if (!tsk_used_math(child)) {
1195 				/* FP not yet used */
1196 				tmp = -1;
1197 				break;
1198 			}
1199 			fregs = get_fpu_regs(child);
1200 
1201 #ifdef CONFIG_32BIT
1202 			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1203 				/*
1204 				 * The odd registers are actually the high
1205 				 * order bits of the values stored in the even
1206 				 * registers.
1207 				 */
1208 				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1209 						addr & 1);
1210 				break;
1211 			}
1212 #endif
1213 			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1214 			break;
1215 		}
1216 		case FPC_CSR:
1217 			tmp = child->thread.fpu.fcr31;
1218 			break;
1219 		case FPC_EIR:
1220 			/* implementation / version register */
1221 			tmp = boot_cpu_data.fpu_id;
1222 			break;
1223 #endif
1224 		case PC:
1225 			tmp = regs->cp0_epc;
1226 			break;
1227 		case CAUSE:
1228 			tmp = regs->cp0_cause;
1229 			break;
1230 		case BADVADDR:
1231 			tmp = regs->cp0_badvaddr;
1232 			break;
1233 		case MMHI:
1234 			tmp = regs->hi;
1235 			break;
1236 		case MMLO:
1237 			tmp = regs->lo;
1238 			break;
1239 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1240 		case ACX:
1241 			tmp = regs->acx;
1242 			break;
1243 #endif
1244 		case DSP_BASE ... DSP_BASE + 5: {
1245 			dspreg_t *dregs;
1246 
1247 			if (!cpu_has_dsp) {
1248 				tmp = 0;
1249 				ret = -EIO;
1250 				goto out;
1251 			}
1252 			dregs = __get_dsp_regs(child);
1253 			tmp = dregs[addr - DSP_BASE];
1254 			break;
1255 		}
1256 		case DSP_CONTROL:
1257 			if (!cpu_has_dsp) {
1258 				tmp = 0;
1259 				ret = -EIO;
1260 				goto out;
1261 			}
1262 			tmp = child->thread.dsp.dspcontrol;
1263 			break;
1264 		default:
1265 			tmp = 0;
1266 			ret = -EIO;
1267 			goto out;
1268 		}
1269 		ret = put_user(tmp, datalp);
1270 		break;
1271 	}
1272 
1273 	/* when I and D space are separate, this will have to be fixed. */
1274 	case PTRACE_POKETEXT: /* write the word at location addr. */
1275 	case PTRACE_POKEDATA:
1276 		ret = generic_ptrace_pokedata(child, addr, data);
1277 		break;
1278 
1279 	case PTRACE_POKEUSR: {
1280 		struct pt_regs *regs;
1281 		ret = 0;
1282 		regs = task_pt_regs(child);
1283 
1284 		switch (addr) {
1285 		case 0 ... 31:
1286 			regs->regs[addr] = data;
1287 			/* System call number may have been changed */
1288 			if (addr == 2)
1289 				mips_syscall_update_nr(child, regs);
1290 			else if (addr == 4 &&
1291 				 mips_syscall_is_indirect(child, regs))
1292 				mips_syscall_update_nr(child, regs);
1293 			break;
1294 #ifdef CONFIG_MIPS_FP_SUPPORT
1295 		case FPR_BASE ... FPR_BASE + 31: {
1296 			union fpureg *fregs = get_fpu_regs(child);
1297 
1298 			init_fp_ctx(child);
1299 #ifdef CONFIG_32BIT
1300 			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1301 				/*
1302 				 * The odd registers are actually the high
1303 				 * order bits of the values stored in the even
1304 				 * registers.
1305 				 */
1306 				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1307 					  addr & 1, data);
1308 				break;
1309 			}
1310 #endif
1311 			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1312 			break;
1313 		}
1314 		case FPC_CSR:
1315 			init_fp_ctx(child);
1316 			ptrace_setfcr31(child, data);
1317 			break;
1318 #endif
1319 		case PC:
1320 			regs->cp0_epc = data;
1321 			break;
1322 		case MMHI:
1323 			regs->hi = data;
1324 			break;
1325 		case MMLO:
1326 			regs->lo = data;
1327 			break;
1328 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1329 		case ACX:
1330 			regs->acx = data;
1331 			break;
1332 #endif
1333 		case DSP_BASE ... DSP_BASE + 5: {
1334 			dspreg_t *dregs;
1335 
1336 			if (!cpu_has_dsp) {
1337 				ret = -EIO;
1338 				break;
1339 			}
1340 
1341 			dregs = __get_dsp_regs(child);
1342 			dregs[addr - DSP_BASE] = data;
1343 			break;
1344 		}
1345 		case DSP_CONTROL:
1346 			if (!cpu_has_dsp) {
1347 				ret = -EIO;
1348 				break;
1349 			}
1350 			child->thread.dsp.dspcontrol = data;
1351 			break;
1352 		default:
1353 			/* The rest are not allowed. */
1354 			ret = -EIO;
1355 			break;
1356 		}
1357 		break;
1358 		}
1359 
1360 	case PTRACE_GETREGS:
1361 		ret = ptrace_getregs(child, datavp);
1362 		break;
1363 
1364 	case PTRACE_SETREGS:
1365 		ret = ptrace_setregs(child, datavp);
1366 		break;
1367 
1368 #ifdef CONFIG_MIPS_FP_SUPPORT
1369 	case PTRACE_GETFPREGS:
1370 		ret = ptrace_getfpregs(child, datavp);
1371 		break;
1372 
1373 	case PTRACE_SETFPREGS:
1374 		ret = ptrace_setfpregs(child, datavp);
1375 		break;
1376 #endif
1377 	case PTRACE_GET_THREAD_AREA:
1378 		ret = put_user(task_thread_info(child)->tp_value, datalp);
1379 		break;
1380 
1381 	case PTRACE_GET_WATCH_REGS:
1382 		ret = ptrace_get_watch_regs(child, addrp);
1383 		break;
1384 
1385 	case PTRACE_SET_WATCH_REGS:
1386 		ret = ptrace_set_watch_regs(child, addrp);
1387 		break;
1388 
1389 	default:
1390 		ret = ptrace_request(child, request, addr, data);
1391 		break;
1392 	}
1393  out:
1394 	return ret;
1395 }
1396 
1397 /*
1398  * Notification of system call entry/exit
1399  * - triggered by current->work.syscall_trace
1400  */
1401 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1402 {
1403 	user_exit();
1404 
1405 	current_thread_info()->syscall = syscall;
1406 
1407 	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1408 		if (tracehook_report_syscall_entry(regs))
1409 			return -1;
1410 		syscall = current_thread_info()->syscall;
1411 	}
1412 
1413 #ifdef CONFIG_SECCOMP
1414 	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1415 		int ret, i;
1416 		struct seccomp_data sd;
1417 		unsigned long args[6];
1418 
1419 		sd.nr = syscall;
1420 		sd.arch = syscall_get_arch(current);
1421 		syscall_get_arguments(current, regs, args);
1422 		for (i = 0; i < 6; i++)
1423 			sd.args[i] = args[i];
1424 		sd.instruction_pointer = KSTK_EIP(current);
1425 
1426 		ret = __secure_computing(&sd);
1427 		if (ret == -1)
1428 			return ret;
1429 		syscall = current_thread_info()->syscall;
1430 	}
1431 #endif
1432 
1433 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1434 		trace_sys_enter(regs, regs->regs[2]);
1435 
1436 	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1437 			    regs->regs[6], regs->regs[7]);
1438 
1439 	/*
1440 	 * Negative syscall numbers are mistaken for rejected syscalls, but
1441 	 * won't have had the return value set appropriately, so we do so now.
1442 	 */
1443 	if (syscall < 0)
1444 		syscall_set_return_value(current, regs, -ENOSYS, 0);
1445 	return syscall;
1446 }
1447 
1448 /*
1449  * Notification of system call entry/exit
1450  * - triggered by current->work.syscall_trace
1451  */
1452 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1453 {
1454         /*
1455 	 * We may come here right after calling schedule_user()
1456 	 * or do_notify_resume(), in which case we can be in RCU
1457 	 * user mode.
1458 	 */
1459 	user_exit();
1460 
1461 	audit_syscall_exit(regs);
1462 
1463 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1464 		trace_sys_exit(regs, regs_return_value(regs));
1465 
1466 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1467 		tracehook_report_syscall_exit(regs, 0);
1468 
1469 	user_enter();
1470 }
1471