xref: /openbmc/linux/arch/arm/kernel/ptrace.c (revision f20c7d91)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/kernel/ptrace.c
4  *
5  *  By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  */
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/mm.h>
13 #include <linux/elf.h>
14 #include <linux/smp.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/init.h>
19 #include <linux/signal.h>
20 #include <linux/uaccess.h>
21 #include <linux/perf_event.h>
22 #include <linux/hw_breakpoint.h>
23 #include <linux/regset.h>
24 #include <linux/audit.h>
25 #include <linux/tracehook.h>
26 #include <linux/unistd.h>
27 
28 #include <asm/traps.h>
29 
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/syscalls.h>
32 
33 #define REG_PC	15
34 #define REG_PSR	16
35 /*
36  * does not yet catch signals sent when the child dies.
37  * in exit.c or in signal.c.
38  */
39 
40 #if 0
41 /*
42  * Breakpoint SWI instruction: SWI &9F0001
43  */
44 #define BREAKINST_ARM	0xef9f0001
45 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
46 #else
47 /*
48  * New breakpoints - use an undefined instruction.  The ARM architecture
49  * reference manual guarantees that the following instruction space
50  * will produce an undefined instruction exception on all CPUs:
51  *
52  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
53  *  Thumb: 1101 1110 xxxx xxxx
54  */
55 #define BREAKINST_ARM	0xe7f001f0
56 #define BREAKINST_THUMB	0xde01
57 #endif
58 
59 struct pt_regs_offset {
60 	const char *name;
61 	int offset;
62 };
63 
64 #define REG_OFFSET_NAME(r) \
65 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
66 #define REG_OFFSET_END {.name = NULL, .offset = 0}
67 
68 static const struct pt_regs_offset regoffset_table[] = {
69 	REG_OFFSET_NAME(r0),
70 	REG_OFFSET_NAME(r1),
71 	REG_OFFSET_NAME(r2),
72 	REG_OFFSET_NAME(r3),
73 	REG_OFFSET_NAME(r4),
74 	REG_OFFSET_NAME(r5),
75 	REG_OFFSET_NAME(r6),
76 	REG_OFFSET_NAME(r7),
77 	REG_OFFSET_NAME(r8),
78 	REG_OFFSET_NAME(r9),
79 	REG_OFFSET_NAME(r10),
80 	REG_OFFSET_NAME(fp),
81 	REG_OFFSET_NAME(ip),
82 	REG_OFFSET_NAME(sp),
83 	REG_OFFSET_NAME(lr),
84 	REG_OFFSET_NAME(pc),
85 	REG_OFFSET_NAME(cpsr),
86 	REG_OFFSET_NAME(ORIG_r0),
87 	REG_OFFSET_END,
88 };
89 
90 /**
91  * regs_query_register_offset() - query register offset from its name
92  * @name:	the name of a register
93  *
94  * regs_query_register_offset() returns the offset of a register in struct
95  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
96  */
97 int regs_query_register_offset(const char *name)
98 {
99 	const struct pt_regs_offset *roff;
100 	for (roff = regoffset_table; roff->name != NULL; roff++)
101 		if (!strcmp(roff->name, name))
102 			return roff->offset;
103 	return -EINVAL;
104 }
105 
106 /**
107  * regs_query_register_name() - query register name from its offset
108  * @offset:	the offset of a register in struct pt_regs.
109  *
110  * regs_query_register_name() returns the name of a register from its
111  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
112  */
113 const char *regs_query_register_name(unsigned int offset)
114 {
115 	const struct pt_regs_offset *roff;
116 	for (roff = regoffset_table; roff->name != NULL; roff++)
117 		if (roff->offset == offset)
118 			return roff->name;
119 	return NULL;
120 }
121 
122 /**
123  * regs_within_kernel_stack() - check the address in the stack
124  * @regs:      pt_regs which contains kernel stack pointer.
125  * @addr:      address which is checked.
126  *
127  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
128  * If @addr is within the kernel stack, it returns true. If not, returns false.
129  */
130 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
131 {
132 	return ((addr & ~(THREAD_SIZE - 1))  ==
133 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
134 }
135 
136 /**
137  * regs_get_kernel_stack_nth() - get Nth entry of the stack
138  * @regs:	pt_regs which contains kernel stack pointer.
139  * @n:		stack entry number.
140  *
141  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
142  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
143  * this returns 0.
144  */
145 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
146 {
147 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
148 	addr += n;
149 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
150 		return *addr;
151 	else
152 		return 0;
153 }
154 
155 /*
156  * this routine will get a word off of the processes privileged stack.
157  * the offset is how far from the base addr as stored in the THREAD.
158  * this routine assumes that all the privileged stacks are in our
159  * data space.
160  */
161 static inline long get_user_reg(struct task_struct *task, int offset)
162 {
163 	return task_pt_regs(task)->uregs[offset];
164 }
165 
166 /*
167  * this routine will put a word on the processes privileged stack.
168  * the offset is how far from the base addr as stored in the THREAD.
169  * this routine assumes that all the privileged stacks are in our
170  * data space.
171  */
172 static inline int
173 put_user_reg(struct task_struct *task, int offset, long data)
174 {
175 	struct pt_regs newregs, *regs = task_pt_regs(task);
176 	int ret = -EINVAL;
177 
178 	newregs = *regs;
179 	newregs.uregs[offset] = data;
180 
181 	if (valid_user_regs(&newregs)) {
182 		regs->uregs[offset] = data;
183 		ret = 0;
184 	}
185 
186 	return ret;
187 }
188 
189 /*
190  * Called by kernel/ptrace.c when detaching..
191  */
192 void ptrace_disable(struct task_struct *child)
193 {
194 	/* Nothing to do. */
195 }
196 
197 /*
198  * Handle hitting a breakpoint.
199  */
200 void ptrace_break(struct pt_regs *regs)
201 {
202 	force_sig_fault(SIGTRAP, TRAP_BRKPT,
203 			(void __user *)instruction_pointer(regs));
204 }
205 
206 static int break_trap(struct pt_regs *regs, unsigned int instr)
207 {
208 	ptrace_break(regs);
209 	return 0;
210 }
211 
212 static struct undef_hook arm_break_hook = {
213 	.instr_mask	= 0x0fffffff,
214 	.instr_val	= 0x07f001f0,
215 	.cpsr_mask	= PSR_T_BIT,
216 	.cpsr_val	= 0,
217 	.fn		= break_trap,
218 };
219 
220 static struct undef_hook thumb_break_hook = {
221 	.instr_mask	= 0xffffffff,
222 	.instr_val	= 0x0000de01,
223 	.cpsr_mask	= PSR_T_BIT,
224 	.cpsr_val	= PSR_T_BIT,
225 	.fn		= break_trap,
226 };
227 
228 static struct undef_hook thumb2_break_hook = {
229 	.instr_mask	= 0xffffffff,
230 	.instr_val	= 0xf7f0a000,
231 	.cpsr_mask	= PSR_T_BIT,
232 	.cpsr_val	= PSR_T_BIT,
233 	.fn		= break_trap,
234 };
235 
236 static int __init ptrace_break_init(void)
237 {
238 	register_undef_hook(&arm_break_hook);
239 	register_undef_hook(&thumb_break_hook);
240 	register_undef_hook(&thumb2_break_hook);
241 	return 0;
242 }
243 
244 core_initcall(ptrace_break_init);
245 
246 /*
247  * Read the word at offset "off" into the "struct user".  We
248  * actually access the pt_regs stored on the kernel stack.
249  */
250 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
251 			    unsigned long __user *ret)
252 {
253 	unsigned long tmp;
254 
255 	if (off & 3)
256 		return -EIO;
257 
258 	tmp = 0;
259 	if (off == PT_TEXT_ADDR)
260 		tmp = tsk->mm->start_code;
261 	else if (off == PT_DATA_ADDR)
262 		tmp = tsk->mm->start_data;
263 	else if (off == PT_TEXT_END_ADDR)
264 		tmp = tsk->mm->end_code;
265 	else if (off < sizeof(struct pt_regs))
266 		tmp = get_user_reg(tsk, off >> 2);
267 	else if (off >= sizeof(struct user))
268 		return -EIO;
269 
270 	return put_user(tmp, ret);
271 }
272 
273 /*
274  * Write the word at offset "off" into "struct user".  We
275  * actually access the pt_regs stored on the kernel stack.
276  */
277 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
278 			     unsigned long val)
279 {
280 	if (off & 3 || off >= sizeof(struct user))
281 		return -EIO;
282 
283 	if (off >= sizeof(struct pt_regs))
284 		return 0;
285 
286 	return put_user_reg(tsk, off >> 2, val);
287 }
288 
289 #ifdef CONFIG_IWMMXT
290 
291 /*
292  * Get the child iWMMXt state.
293  */
294 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
295 {
296 	struct thread_info *thread = task_thread_info(tsk);
297 
298 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
299 		return -ENODATA;
300 	iwmmxt_task_disable(thread);  /* force it to ram */
301 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
302 		? -EFAULT : 0;
303 }
304 
305 /*
306  * Set the child iWMMXt state.
307  */
308 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
309 {
310 	struct thread_info *thread = task_thread_info(tsk);
311 
312 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
313 		return -EACCES;
314 	iwmmxt_task_release(thread);  /* force a reload */
315 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
316 		? -EFAULT : 0;
317 }
318 
319 #endif
320 
321 #ifdef CONFIG_CRUNCH
322 /*
323  * Get the child Crunch state.
324  */
325 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
326 {
327 	struct thread_info *thread = task_thread_info(tsk);
328 
329 	crunch_task_disable(thread);  /* force it to ram */
330 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
331 		? -EFAULT : 0;
332 }
333 
334 /*
335  * Set the child Crunch state.
336  */
337 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
338 {
339 	struct thread_info *thread = task_thread_info(tsk);
340 
341 	crunch_task_release(thread);  /* force a reload */
342 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
343 		? -EFAULT : 0;
344 }
345 #endif
346 
347 #ifdef CONFIG_HAVE_HW_BREAKPOINT
348 /*
349  * Convert a virtual register number into an index for a thread_info
350  * breakpoint array. Breakpoints are identified using positive numbers
351  * whilst watchpoints are negative. The registers are laid out as pairs
352  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
353  * Register 0 is reserved for describing resource information.
354  */
355 static int ptrace_hbp_num_to_idx(long num)
356 {
357 	if (num < 0)
358 		num = (ARM_MAX_BRP << 1) - num;
359 	return (num - 1) >> 1;
360 }
361 
362 /*
363  * Returns the virtual register number for the address of the
364  * breakpoint at index idx.
365  */
366 static long ptrace_hbp_idx_to_num(int idx)
367 {
368 	long mid = ARM_MAX_BRP << 1;
369 	long num = (idx << 1) + 1;
370 	return num > mid ? mid - num : num;
371 }
372 
373 /*
374  * Handle hitting a HW-breakpoint.
375  */
376 static void ptrace_hbptriggered(struct perf_event *bp,
377 				     struct perf_sample_data *data,
378 				     struct pt_regs *regs)
379 {
380 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
381 	long num;
382 	int i;
383 
384 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
385 		if (current->thread.debug.hbp[i] == bp)
386 			break;
387 
388 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
389 
390 	force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
391 }
392 
393 /*
394  * Set ptrace breakpoint pointers to zero for this task.
395  * This is required in order to prevent child processes from unregistering
396  * breakpoints held by their parent.
397  */
398 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
399 {
400 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
401 }
402 
403 /*
404  * Unregister breakpoints from this task and reset the pointers in
405  * the thread_struct.
406  */
407 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
408 {
409 	int i;
410 	struct thread_struct *t = &tsk->thread;
411 
412 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
413 		if (t->debug.hbp[i]) {
414 			unregister_hw_breakpoint(t->debug.hbp[i]);
415 			t->debug.hbp[i] = NULL;
416 		}
417 	}
418 }
419 
420 static u32 ptrace_get_hbp_resource_info(void)
421 {
422 	u8 num_brps, num_wrps, debug_arch, wp_len;
423 	u32 reg = 0;
424 
425 	num_brps	= hw_breakpoint_slots(TYPE_INST);
426 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
427 	debug_arch	= arch_get_debug_arch();
428 	wp_len		= arch_get_max_wp_len();
429 
430 	reg		|= debug_arch;
431 	reg		<<= 8;
432 	reg		|= wp_len;
433 	reg		<<= 8;
434 	reg		|= num_wrps;
435 	reg		<<= 8;
436 	reg		|= num_brps;
437 
438 	return reg;
439 }
440 
441 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
442 {
443 	struct perf_event_attr attr;
444 
445 	ptrace_breakpoint_init(&attr);
446 
447 	/* Initialise fields to sane defaults. */
448 	attr.bp_addr	= 0;
449 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
450 	attr.bp_type	= type;
451 	attr.disabled	= 1;
452 
453 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
454 					   tsk);
455 }
456 
457 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
458 			     unsigned long  __user *data)
459 {
460 	u32 reg;
461 	int idx, ret = 0;
462 	struct perf_event *bp;
463 	struct arch_hw_breakpoint_ctrl arch_ctrl;
464 
465 	if (num == 0) {
466 		reg = ptrace_get_hbp_resource_info();
467 	} else {
468 		idx = ptrace_hbp_num_to_idx(num);
469 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
470 			ret = -EINVAL;
471 			goto out;
472 		}
473 
474 		bp = tsk->thread.debug.hbp[idx];
475 		if (!bp) {
476 			reg = 0;
477 			goto put;
478 		}
479 
480 		arch_ctrl = counter_arch_bp(bp)->ctrl;
481 
482 		/*
483 		 * Fix up the len because we may have adjusted it
484 		 * to compensate for an unaligned address.
485 		 */
486 		while (!(arch_ctrl.len & 0x1))
487 			arch_ctrl.len >>= 1;
488 
489 		if (num & 0x1)
490 			reg = bp->attr.bp_addr;
491 		else
492 			reg = encode_ctrl_reg(arch_ctrl);
493 	}
494 
495 put:
496 	if (put_user(reg, data))
497 		ret = -EFAULT;
498 
499 out:
500 	return ret;
501 }
502 
503 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
504 			     unsigned long __user *data)
505 {
506 	int idx, gen_len, gen_type, implied_type, ret = 0;
507 	u32 user_val;
508 	struct perf_event *bp;
509 	struct arch_hw_breakpoint_ctrl ctrl;
510 	struct perf_event_attr attr;
511 
512 	if (num == 0)
513 		goto out;
514 	else if (num < 0)
515 		implied_type = HW_BREAKPOINT_RW;
516 	else
517 		implied_type = HW_BREAKPOINT_X;
518 
519 	idx = ptrace_hbp_num_to_idx(num);
520 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
521 		ret = -EINVAL;
522 		goto out;
523 	}
524 
525 	if (get_user(user_val, data)) {
526 		ret = -EFAULT;
527 		goto out;
528 	}
529 
530 	bp = tsk->thread.debug.hbp[idx];
531 	if (!bp) {
532 		bp = ptrace_hbp_create(tsk, implied_type);
533 		if (IS_ERR(bp)) {
534 			ret = PTR_ERR(bp);
535 			goto out;
536 		}
537 		tsk->thread.debug.hbp[idx] = bp;
538 	}
539 
540 	attr = bp->attr;
541 
542 	if (num & 0x1) {
543 		/* Address */
544 		attr.bp_addr	= user_val;
545 	} else {
546 		/* Control */
547 		decode_ctrl_reg(user_val, &ctrl);
548 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
549 		if (ret)
550 			goto out;
551 
552 		if ((gen_type & implied_type) != gen_type) {
553 			ret = -EINVAL;
554 			goto out;
555 		}
556 
557 		attr.bp_len	= gen_len;
558 		attr.bp_type	= gen_type;
559 		attr.disabled	= !ctrl.enabled;
560 	}
561 
562 	ret = modify_user_hw_breakpoint(bp, &attr);
563 out:
564 	return ret;
565 }
566 #endif
567 
568 /* regset get/set implementations */
569 
570 static int gpr_get(struct task_struct *target,
571 		   const struct user_regset *regset,
572 		   unsigned int pos, unsigned int count,
573 		   void *kbuf, void __user *ubuf)
574 {
575 	struct pt_regs *regs = task_pt_regs(target);
576 
577 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
578 				   regs,
579 				   0, sizeof(*regs));
580 }
581 
582 static int gpr_set(struct task_struct *target,
583 		   const struct user_regset *regset,
584 		   unsigned int pos, unsigned int count,
585 		   const void *kbuf, const void __user *ubuf)
586 {
587 	int ret;
588 	struct pt_regs newregs = *task_pt_regs(target);
589 
590 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
591 				 &newregs,
592 				 0, sizeof(newregs));
593 	if (ret)
594 		return ret;
595 
596 	if (!valid_user_regs(&newregs))
597 		return -EINVAL;
598 
599 	*task_pt_regs(target) = newregs;
600 	return 0;
601 }
602 
603 static int fpa_get(struct task_struct *target,
604 		   const struct user_regset *regset,
605 		   unsigned int pos, unsigned int count,
606 		   void *kbuf, void __user *ubuf)
607 {
608 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
609 				   &task_thread_info(target)->fpstate,
610 				   0, sizeof(struct user_fp));
611 }
612 
613 static int fpa_set(struct task_struct *target,
614 		   const struct user_regset *regset,
615 		   unsigned int pos, unsigned int count,
616 		   const void *kbuf, const void __user *ubuf)
617 {
618 	struct thread_info *thread = task_thread_info(target);
619 
620 	thread->used_cp[1] = thread->used_cp[2] = 1;
621 
622 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
623 		&thread->fpstate,
624 		0, sizeof(struct user_fp));
625 }
626 
627 #ifdef CONFIG_VFP
628 /*
629  * VFP register get/set implementations.
630  *
631  * With respect to the kernel, struct user_fp is divided into three chunks:
632  * 16 or 32 real VFP registers (d0-d15 or d0-31)
633  *	These are transferred to/from the real registers in the task's
634  *	vfp_hard_struct.  The number of registers depends on the kernel
635  *	configuration.
636  *
637  * 16 or 0 fake VFP registers (d16-d31 or empty)
638  *	i.e., the user_vfp structure has space for 32 registers even if
639  *	the kernel doesn't have them all.
640  *
641  *	vfp_get() reads this chunk as zero where applicable
642  *	vfp_set() ignores this chunk
643  *
644  * 1 word for the FPSCR
645  *
646  * The bounds-checking logic built into user_regset_copyout and friends
647  * means that we can make a simple sequence of calls to map the relevant data
648  * to/from the specified slice of the user regset structure.
649  */
650 static int vfp_get(struct task_struct *target,
651 		   const struct user_regset *regset,
652 		   unsigned int pos, unsigned int count,
653 		   void *kbuf, void __user *ubuf)
654 {
655 	int ret;
656 	struct thread_info *thread = task_thread_info(target);
657 	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
658 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
659 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
660 
661 	vfp_sync_hwstate(thread);
662 
663 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
664 				  &vfp->fpregs,
665 				  user_fpregs_offset,
666 				  user_fpregs_offset + sizeof(vfp->fpregs));
667 	if (ret)
668 		return ret;
669 
670 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
671 				       user_fpregs_offset + sizeof(vfp->fpregs),
672 				       user_fpscr_offset);
673 	if (ret)
674 		return ret;
675 
676 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
677 				   &vfp->fpscr,
678 				   user_fpscr_offset,
679 				   user_fpscr_offset + sizeof(vfp->fpscr));
680 }
681 
682 /*
683  * For vfp_set() a read-modify-write is done on the VFP registers,
684  * in order to avoid writing back a half-modified set of registers on
685  * failure.
686  */
687 static int vfp_set(struct task_struct *target,
688 			  const struct user_regset *regset,
689 			  unsigned int pos, unsigned int count,
690 			  const void *kbuf, const void __user *ubuf)
691 {
692 	int ret;
693 	struct thread_info *thread = task_thread_info(target);
694 	struct vfp_hard_struct new_vfp;
695 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
696 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
697 
698 	vfp_sync_hwstate(thread);
699 	new_vfp = thread->vfpstate.hard;
700 
701 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
702 				  &new_vfp.fpregs,
703 				  user_fpregs_offset,
704 				  user_fpregs_offset + sizeof(new_vfp.fpregs));
705 	if (ret)
706 		return ret;
707 
708 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
709 				user_fpregs_offset + sizeof(new_vfp.fpregs),
710 				user_fpscr_offset);
711 	if (ret)
712 		return ret;
713 
714 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
715 				 &new_vfp.fpscr,
716 				 user_fpscr_offset,
717 				 user_fpscr_offset + sizeof(new_vfp.fpscr));
718 	if (ret)
719 		return ret;
720 
721 	thread->vfpstate.hard = new_vfp;
722 	vfp_flush_hwstate(thread);
723 
724 	return 0;
725 }
726 #endif /* CONFIG_VFP */
727 
728 enum arm_regset {
729 	REGSET_GPR,
730 	REGSET_FPR,
731 #ifdef CONFIG_VFP
732 	REGSET_VFP,
733 #endif
734 };
735 
736 static const struct user_regset arm_regsets[] = {
737 	[REGSET_GPR] = {
738 		.core_note_type = NT_PRSTATUS,
739 		.n = ELF_NGREG,
740 		.size = sizeof(u32),
741 		.align = sizeof(u32),
742 		.get = gpr_get,
743 		.set = gpr_set
744 	},
745 	[REGSET_FPR] = {
746 		/*
747 		 * For the FPA regs in fpstate, the real fields are a mixture
748 		 * of sizes, so pretend that the registers are word-sized:
749 		 */
750 		.core_note_type = NT_PRFPREG,
751 		.n = sizeof(struct user_fp) / sizeof(u32),
752 		.size = sizeof(u32),
753 		.align = sizeof(u32),
754 		.get = fpa_get,
755 		.set = fpa_set
756 	},
757 #ifdef CONFIG_VFP
758 	[REGSET_VFP] = {
759 		/*
760 		 * Pretend that the VFP regs are word-sized, since the FPSCR is
761 		 * a single word dangling at the end of struct user_vfp:
762 		 */
763 		.core_note_type = NT_ARM_VFP,
764 		.n = ARM_VFPREGS_SIZE / sizeof(u32),
765 		.size = sizeof(u32),
766 		.align = sizeof(u32),
767 		.get = vfp_get,
768 		.set = vfp_set
769 	},
770 #endif /* CONFIG_VFP */
771 };
772 
773 static const struct user_regset_view user_arm_view = {
774 	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
775 	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
776 };
777 
778 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
779 {
780 	return &user_arm_view;
781 }
782 
783 long arch_ptrace(struct task_struct *child, long request,
784 		 unsigned long addr, unsigned long data)
785 {
786 	int ret;
787 	unsigned long __user *datap = (unsigned long __user *) data;
788 
789 	switch (request) {
790 		case PTRACE_PEEKUSR:
791 			ret = ptrace_read_user(child, addr, datap);
792 			break;
793 
794 		case PTRACE_POKEUSR:
795 			ret = ptrace_write_user(child, addr, data);
796 			break;
797 
798 		case PTRACE_GETREGS:
799 			ret = copy_regset_to_user(child,
800 						  &user_arm_view, REGSET_GPR,
801 						  0, sizeof(struct pt_regs),
802 						  datap);
803 			break;
804 
805 		case PTRACE_SETREGS:
806 			ret = copy_regset_from_user(child,
807 						    &user_arm_view, REGSET_GPR,
808 						    0, sizeof(struct pt_regs),
809 						    datap);
810 			break;
811 
812 		case PTRACE_GETFPREGS:
813 			ret = copy_regset_to_user(child,
814 						  &user_arm_view, REGSET_FPR,
815 						  0, sizeof(union fp_state),
816 						  datap);
817 			break;
818 
819 		case PTRACE_SETFPREGS:
820 			ret = copy_regset_from_user(child,
821 						    &user_arm_view, REGSET_FPR,
822 						    0, sizeof(union fp_state),
823 						    datap);
824 			break;
825 
826 #ifdef CONFIG_IWMMXT
827 		case PTRACE_GETWMMXREGS:
828 			ret = ptrace_getwmmxregs(child, datap);
829 			break;
830 
831 		case PTRACE_SETWMMXREGS:
832 			ret = ptrace_setwmmxregs(child, datap);
833 			break;
834 #endif
835 
836 		case PTRACE_GET_THREAD_AREA:
837 			ret = put_user(task_thread_info(child)->tp_value[0],
838 				       datap);
839 			break;
840 
841 		case PTRACE_SET_SYSCALL:
842 			task_thread_info(child)->syscall = data;
843 			ret = 0;
844 			break;
845 
846 #ifdef CONFIG_CRUNCH
847 		case PTRACE_GETCRUNCHREGS:
848 			ret = ptrace_getcrunchregs(child, datap);
849 			break;
850 
851 		case PTRACE_SETCRUNCHREGS:
852 			ret = ptrace_setcrunchregs(child, datap);
853 			break;
854 #endif
855 
856 #ifdef CONFIG_VFP
857 		case PTRACE_GETVFPREGS:
858 			ret = copy_regset_to_user(child,
859 						  &user_arm_view, REGSET_VFP,
860 						  0, ARM_VFPREGS_SIZE,
861 						  datap);
862 			break;
863 
864 		case PTRACE_SETVFPREGS:
865 			ret = copy_regset_from_user(child,
866 						    &user_arm_view, REGSET_VFP,
867 						    0, ARM_VFPREGS_SIZE,
868 						    datap);
869 			break;
870 #endif
871 
872 #ifdef CONFIG_HAVE_HW_BREAKPOINT
873 		case PTRACE_GETHBPREGS:
874 			ret = ptrace_gethbpregs(child, addr,
875 						(unsigned long __user *)data);
876 			break;
877 		case PTRACE_SETHBPREGS:
878 			ret = ptrace_sethbpregs(child, addr,
879 						(unsigned long __user *)data);
880 			break;
881 #endif
882 
883 		default:
884 			ret = ptrace_request(child, request, addr, data);
885 			break;
886 	}
887 
888 	return ret;
889 }
890 
891 enum ptrace_syscall_dir {
892 	PTRACE_SYSCALL_ENTER = 0,
893 	PTRACE_SYSCALL_EXIT,
894 };
895 
896 static void tracehook_report_syscall(struct pt_regs *regs,
897 				    enum ptrace_syscall_dir dir)
898 {
899 	unsigned long ip;
900 
901 	/*
902 	 * IP is used to denote syscall entry/exit:
903 	 * IP = 0 -> entry, =1 -> exit
904 	 */
905 	ip = regs->ARM_ip;
906 	regs->ARM_ip = dir;
907 
908 	if (dir == PTRACE_SYSCALL_EXIT)
909 		tracehook_report_syscall_exit(regs, 0);
910 	else if (tracehook_report_syscall_entry(regs))
911 		current_thread_info()->syscall = -1;
912 
913 	regs->ARM_ip = ip;
914 }
915 
916 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
917 {
918 	current_thread_info()->syscall = scno;
919 
920 	if (test_thread_flag(TIF_SYSCALL_TRACE))
921 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
922 
923 	/* Do seccomp after ptrace; syscall may have changed. */
924 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
925 	if (secure_computing() == -1)
926 		return -1;
927 #else
928 	/* XXX: remove this once OABI gets fixed */
929 	secure_computing_strict(current_thread_info()->syscall);
930 #endif
931 
932 	/* Tracer or seccomp may have changed syscall. */
933 	scno = current_thread_info()->syscall;
934 
935 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
936 		trace_sys_enter(regs, scno);
937 
938 	audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
939 			    regs->ARM_r3);
940 
941 	return scno;
942 }
943 
944 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
945 {
946 	/*
947 	 * Audit the syscall before anything else, as a debugger may
948 	 * come in and change the current registers.
949 	 */
950 	audit_syscall_exit(regs);
951 
952 	/*
953 	 * Note that we haven't updated the ->syscall field for the
954 	 * current thread. This isn't a problem because it will have
955 	 * been set on syscall entry and there hasn't been an opportunity
956 	 * for a PTRACE_SET_SYSCALL since then.
957 	 */
958 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
959 		trace_sys_exit(regs, regs_return_value(regs));
960 
961 	if (test_thread_flag(TIF_SYSCALL_TRACE))
962 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
963 }
964