xref: /openbmc/linux/arch/arm/kernel/ptrace.c (revision 31af04cd)
1 /*
2  *  linux/arch/arm/kernel/ptrace.c
3  *
4  *  By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/mm.h>
16 #include <linux/elf.h>
17 #include <linux/smp.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/security.h>
21 #include <linux/init.h>
22 #include <linux/signal.h>
23 #include <linux/uaccess.h>
24 #include <linux/perf_event.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/regset.h>
27 #include <linux/audit.h>
28 #include <linux/tracehook.h>
29 #include <linux/unistd.h>
30 
31 #include <asm/pgtable.h>
32 #include <asm/traps.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/syscalls.h>
36 
37 #define REG_PC	15
38 #define REG_PSR	16
39 /*
40  * does not yet catch signals sent when the child dies.
41  * in exit.c or in signal.c.
42  */
43 
44 #if 0
45 /*
46  * Breakpoint SWI instruction: SWI &9F0001
47  */
48 #define BREAKINST_ARM	0xef9f0001
49 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
50 #else
51 /*
52  * New breakpoints - use an undefined instruction.  The ARM architecture
53  * reference manual guarantees that the following instruction space
54  * will produce an undefined instruction exception on all CPUs:
55  *
56  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
57  *  Thumb: 1101 1110 xxxx xxxx
58  */
59 #define BREAKINST_ARM	0xe7f001f0
60 #define BREAKINST_THUMB	0xde01
61 #endif
62 
63 struct pt_regs_offset {
64 	const char *name;
65 	int offset;
66 };
67 
68 #define REG_OFFSET_NAME(r) \
69 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
70 #define REG_OFFSET_END {.name = NULL, .offset = 0}
71 
72 static const struct pt_regs_offset regoffset_table[] = {
73 	REG_OFFSET_NAME(r0),
74 	REG_OFFSET_NAME(r1),
75 	REG_OFFSET_NAME(r2),
76 	REG_OFFSET_NAME(r3),
77 	REG_OFFSET_NAME(r4),
78 	REG_OFFSET_NAME(r5),
79 	REG_OFFSET_NAME(r6),
80 	REG_OFFSET_NAME(r7),
81 	REG_OFFSET_NAME(r8),
82 	REG_OFFSET_NAME(r9),
83 	REG_OFFSET_NAME(r10),
84 	REG_OFFSET_NAME(fp),
85 	REG_OFFSET_NAME(ip),
86 	REG_OFFSET_NAME(sp),
87 	REG_OFFSET_NAME(lr),
88 	REG_OFFSET_NAME(pc),
89 	REG_OFFSET_NAME(cpsr),
90 	REG_OFFSET_NAME(ORIG_r0),
91 	REG_OFFSET_END,
92 };
93 
94 /**
95  * regs_query_register_offset() - query register offset from its name
96  * @name:	the name of a register
97  *
98  * regs_query_register_offset() returns the offset of a register in struct
99  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
100  */
101 int regs_query_register_offset(const char *name)
102 {
103 	const struct pt_regs_offset *roff;
104 	for (roff = regoffset_table; roff->name != NULL; roff++)
105 		if (!strcmp(roff->name, name))
106 			return roff->offset;
107 	return -EINVAL;
108 }
109 
110 /**
111  * regs_query_register_name() - query register name from its offset
112  * @offset:	the offset of a register in struct pt_regs.
113  *
114  * regs_query_register_name() returns the name of a register from its
115  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
116  */
117 const char *regs_query_register_name(unsigned int offset)
118 {
119 	const struct pt_regs_offset *roff;
120 	for (roff = regoffset_table; roff->name != NULL; roff++)
121 		if (roff->offset == offset)
122 			return roff->name;
123 	return NULL;
124 }
125 
126 /**
127  * regs_within_kernel_stack() - check the address in the stack
128  * @regs:      pt_regs which contains kernel stack pointer.
129  * @addr:      address which is checked.
130  *
131  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
132  * If @addr is within the kernel stack, it returns true. If not, returns false.
133  */
134 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
135 {
136 	return ((addr & ~(THREAD_SIZE - 1))  ==
137 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
138 }
139 
140 /**
141  * regs_get_kernel_stack_nth() - get Nth entry of the stack
142  * @regs:	pt_regs which contains kernel stack pointer.
143  * @n:		stack entry number.
144  *
145  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
146  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
147  * this returns 0.
148  */
149 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
150 {
151 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
152 	addr += n;
153 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
154 		return *addr;
155 	else
156 		return 0;
157 }
158 
159 /*
160  * this routine will get a word off of the processes privileged stack.
161  * the offset is how far from the base addr as stored in the THREAD.
162  * this routine assumes that all the privileged stacks are in our
163  * data space.
164  */
165 static inline long get_user_reg(struct task_struct *task, int offset)
166 {
167 	return task_pt_regs(task)->uregs[offset];
168 }
169 
170 /*
171  * this routine will put a word on the processes privileged stack.
172  * the offset is how far from the base addr as stored in the THREAD.
173  * this routine assumes that all the privileged stacks are in our
174  * data space.
175  */
176 static inline int
177 put_user_reg(struct task_struct *task, int offset, long data)
178 {
179 	struct pt_regs newregs, *regs = task_pt_regs(task);
180 	int ret = -EINVAL;
181 
182 	newregs = *regs;
183 	newregs.uregs[offset] = data;
184 
185 	if (valid_user_regs(&newregs)) {
186 		regs->uregs[offset] = data;
187 		ret = 0;
188 	}
189 
190 	return ret;
191 }
192 
193 /*
194  * Called by kernel/ptrace.c when detaching..
195  */
196 void ptrace_disable(struct task_struct *child)
197 {
198 	/* Nothing to do. */
199 }
200 
201 /*
202  * Handle hitting a breakpoint.
203  */
204 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
205 {
206 	force_sig_fault(SIGTRAP, TRAP_BRKPT,
207 			(void __user *)instruction_pointer(regs), tsk);
208 }
209 
210 static int break_trap(struct pt_regs *regs, unsigned int instr)
211 {
212 	ptrace_break(current, regs);
213 	return 0;
214 }
215 
216 static struct undef_hook arm_break_hook = {
217 	.instr_mask	= 0x0fffffff,
218 	.instr_val	= 0x07f001f0,
219 	.cpsr_mask	= PSR_T_BIT,
220 	.cpsr_val	= 0,
221 	.fn		= break_trap,
222 };
223 
224 static struct undef_hook thumb_break_hook = {
225 	.instr_mask	= 0xffff,
226 	.instr_val	= 0xde01,
227 	.cpsr_mask	= PSR_T_BIT,
228 	.cpsr_val	= PSR_T_BIT,
229 	.fn		= break_trap,
230 };
231 
232 static struct undef_hook thumb2_break_hook = {
233 	.instr_mask	= 0xffffffff,
234 	.instr_val	= 0xf7f0a000,
235 	.cpsr_mask	= PSR_T_BIT,
236 	.cpsr_val	= PSR_T_BIT,
237 	.fn		= break_trap,
238 };
239 
240 static int __init ptrace_break_init(void)
241 {
242 	register_undef_hook(&arm_break_hook);
243 	register_undef_hook(&thumb_break_hook);
244 	register_undef_hook(&thumb2_break_hook);
245 	return 0;
246 }
247 
248 core_initcall(ptrace_break_init);
249 
250 /*
251  * Read the word at offset "off" into the "struct user".  We
252  * actually access the pt_regs stored on the kernel stack.
253  */
254 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
255 			    unsigned long __user *ret)
256 {
257 	unsigned long tmp;
258 
259 	if (off & 3)
260 		return -EIO;
261 
262 	tmp = 0;
263 	if (off == PT_TEXT_ADDR)
264 		tmp = tsk->mm->start_code;
265 	else if (off == PT_DATA_ADDR)
266 		tmp = tsk->mm->start_data;
267 	else if (off == PT_TEXT_END_ADDR)
268 		tmp = tsk->mm->end_code;
269 	else if (off < sizeof(struct pt_regs))
270 		tmp = get_user_reg(tsk, off >> 2);
271 	else if (off >= sizeof(struct user))
272 		return -EIO;
273 
274 	return put_user(tmp, ret);
275 }
276 
277 /*
278  * Write the word at offset "off" into "struct user".  We
279  * actually access the pt_regs stored on the kernel stack.
280  */
281 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
282 			     unsigned long val)
283 {
284 	if (off & 3 || off >= sizeof(struct user))
285 		return -EIO;
286 
287 	if (off >= sizeof(struct pt_regs))
288 		return 0;
289 
290 	return put_user_reg(tsk, off >> 2, val);
291 }
292 
293 #ifdef CONFIG_IWMMXT
294 
295 /*
296  * Get the child iWMMXt state.
297  */
298 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
299 {
300 	struct thread_info *thread = task_thread_info(tsk);
301 
302 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
303 		return -ENODATA;
304 	iwmmxt_task_disable(thread);  /* force it to ram */
305 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
306 		? -EFAULT : 0;
307 }
308 
309 /*
310  * Set the child iWMMXt state.
311  */
312 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
313 {
314 	struct thread_info *thread = task_thread_info(tsk);
315 
316 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
317 		return -EACCES;
318 	iwmmxt_task_release(thread);  /* force a reload */
319 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
320 		? -EFAULT : 0;
321 }
322 
323 #endif
324 
325 #ifdef CONFIG_CRUNCH
326 /*
327  * Get the child Crunch state.
328  */
329 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
330 {
331 	struct thread_info *thread = task_thread_info(tsk);
332 
333 	crunch_task_disable(thread);  /* force it to ram */
334 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
335 		? -EFAULT : 0;
336 }
337 
338 /*
339  * Set the child Crunch state.
340  */
341 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
342 {
343 	struct thread_info *thread = task_thread_info(tsk);
344 
345 	crunch_task_release(thread);  /* force a reload */
346 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
347 		? -EFAULT : 0;
348 }
349 #endif
350 
351 #ifdef CONFIG_HAVE_HW_BREAKPOINT
352 /*
353  * Convert a virtual register number into an index for a thread_info
354  * breakpoint array. Breakpoints are identified using positive numbers
355  * whilst watchpoints are negative. The registers are laid out as pairs
356  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
357  * Register 0 is reserved for describing resource information.
358  */
359 static int ptrace_hbp_num_to_idx(long num)
360 {
361 	if (num < 0)
362 		num = (ARM_MAX_BRP << 1) - num;
363 	return (num - 1) >> 1;
364 }
365 
366 /*
367  * Returns the virtual register number for the address of the
368  * breakpoint at index idx.
369  */
370 static long ptrace_hbp_idx_to_num(int idx)
371 {
372 	long mid = ARM_MAX_BRP << 1;
373 	long num = (idx << 1) + 1;
374 	return num > mid ? mid - num : num;
375 }
376 
377 /*
378  * Handle hitting a HW-breakpoint.
379  */
380 static void ptrace_hbptriggered(struct perf_event *bp,
381 				     struct perf_sample_data *data,
382 				     struct pt_regs *regs)
383 {
384 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
385 	long num;
386 	int i;
387 
388 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
389 		if (current->thread.debug.hbp[i] == bp)
390 			break;
391 
392 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
393 
394 	force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
395 }
396 
397 /*
398  * Set ptrace breakpoint pointers to zero for this task.
399  * This is required in order to prevent child processes from unregistering
400  * breakpoints held by their parent.
401  */
402 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
403 {
404 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
405 }
406 
407 /*
408  * Unregister breakpoints from this task and reset the pointers in
409  * the thread_struct.
410  */
411 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
412 {
413 	int i;
414 	struct thread_struct *t = &tsk->thread;
415 
416 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
417 		if (t->debug.hbp[i]) {
418 			unregister_hw_breakpoint(t->debug.hbp[i]);
419 			t->debug.hbp[i] = NULL;
420 		}
421 	}
422 }
423 
424 static u32 ptrace_get_hbp_resource_info(void)
425 {
426 	u8 num_brps, num_wrps, debug_arch, wp_len;
427 	u32 reg = 0;
428 
429 	num_brps	= hw_breakpoint_slots(TYPE_INST);
430 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
431 	debug_arch	= arch_get_debug_arch();
432 	wp_len		= arch_get_max_wp_len();
433 
434 	reg		|= debug_arch;
435 	reg		<<= 8;
436 	reg		|= wp_len;
437 	reg		<<= 8;
438 	reg		|= num_wrps;
439 	reg		<<= 8;
440 	reg		|= num_brps;
441 
442 	return reg;
443 }
444 
445 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
446 {
447 	struct perf_event_attr attr;
448 
449 	ptrace_breakpoint_init(&attr);
450 
451 	/* Initialise fields to sane defaults. */
452 	attr.bp_addr	= 0;
453 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
454 	attr.bp_type	= type;
455 	attr.disabled	= 1;
456 
457 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
458 					   tsk);
459 }
460 
461 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
462 			     unsigned long  __user *data)
463 {
464 	u32 reg;
465 	int idx, ret = 0;
466 	struct perf_event *bp;
467 	struct arch_hw_breakpoint_ctrl arch_ctrl;
468 
469 	if (num == 0) {
470 		reg = ptrace_get_hbp_resource_info();
471 	} else {
472 		idx = ptrace_hbp_num_to_idx(num);
473 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
474 			ret = -EINVAL;
475 			goto out;
476 		}
477 
478 		bp = tsk->thread.debug.hbp[idx];
479 		if (!bp) {
480 			reg = 0;
481 			goto put;
482 		}
483 
484 		arch_ctrl = counter_arch_bp(bp)->ctrl;
485 
486 		/*
487 		 * Fix up the len because we may have adjusted it
488 		 * to compensate for an unaligned address.
489 		 */
490 		while (!(arch_ctrl.len & 0x1))
491 			arch_ctrl.len >>= 1;
492 
493 		if (num & 0x1)
494 			reg = bp->attr.bp_addr;
495 		else
496 			reg = encode_ctrl_reg(arch_ctrl);
497 	}
498 
499 put:
500 	if (put_user(reg, data))
501 		ret = -EFAULT;
502 
503 out:
504 	return ret;
505 }
506 
507 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
508 			     unsigned long __user *data)
509 {
510 	int idx, gen_len, gen_type, implied_type, ret = 0;
511 	u32 user_val;
512 	struct perf_event *bp;
513 	struct arch_hw_breakpoint_ctrl ctrl;
514 	struct perf_event_attr attr;
515 
516 	if (num == 0)
517 		goto out;
518 	else if (num < 0)
519 		implied_type = HW_BREAKPOINT_RW;
520 	else
521 		implied_type = HW_BREAKPOINT_X;
522 
523 	idx = ptrace_hbp_num_to_idx(num);
524 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
525 		ret = -EINVAL;
526 		goto out;
527 	}
528 
529 	if (get_user(user_val, data)) {
530 		ret = -EFAULT;
531 		goto out;
532 	}
533 
534 	bp = tsk->thread.debug.hbp[idx];
535 	if (!bp) {
536 		bp = ptrace_hbp_create(tsk, implied_type);
537 		if (IS_ERR(bp)) {
538 			ret = PTR_ERR(bp);
539 			goto out;
540 		}
541 		tsk->thread.debug.hbp[idx] = bp;
542 	}
543 
544 	attr = bp->attr;
545 
546 	if (num & 0x1) {
547 		/* Address */
548 		attr.bp_addr	= user_val;
549 	} else {
550 		/* Control */
551 		decode_ctrl_reg(user_val, &ctrl);
552 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
553 		if (ret)
554 			goto out;
555 
556 		if ((gen_type & implied_type) != gen_type) {
557 			ret = -EINVAL;
558 			goto out;
559 		}
560 
561 		attr.bp_len	= gen_len;
562 		attr.bp_type	= gen_type;
563 		attr.disabled	= !ctrl.enabled;
564 	}
565 
566 	ret = modify_user_hw_breakpoint(bp, &attr);
567 out:
568 	return ret;
569 }
570 #endif
571 
572 /* regset get/set implementations */
573 
574 static int gpr_get(struct task_struct *target,
575 		   const struct user_regset *regset,
576 		   unsigned int pos, unsigned int count,
577 		   void *kbuf, void __user *ubuf)
578 {
579 	struct pt_regs *regs = task_pt_regs(target);
580 
581 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
582 				   regs,
583 				   0, sizeof(*regs));
584 }
585 
586 static int gpr_set(struct task_struct *target,
587 		   const struct user_regset *regset,
588 		   unsigned int pos, unsigned int count,
589 		   const void *kbuf, const void __user *ubuf)
590 {
591 	int ret;
592 	struct pt_regs newregs = *task_pt_regs(target);
593 
594 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
595 				 &newregs,
596 				 0, sizeof(newregs));
597 	if (ret)
598 		return ret;
599 
600 	if (!valid_user_regs(&newregs))
601 		return -EINVAL;
602 
603 	*task_pt_regs(target) = newregs;
604 	return 0;
605 }
606 
607 static int fpa_get(struct task_struct *target,
608 		   const struct user_regset *regset,
609 		   unsigned int pos, unsigned int count,
610 		   void *kbuf, void __user *ubuf)
611 {
612 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
613 				   &task_thread_info(target)->fpstate,
614 				   0, sizeof(struct user_fp));
615 }
616 
617 static int fpa_set(struct task_struct *target,
618 		   const struct user_regset *regset,
619 		   unsigned int pos, unsigned int count,
620 		   const void *kbuf, const void __user *ubuf)
621 {
622 	struct thread_info *thread = task_thread_info(target);
623 
624 	thread->used_cp[1] = thread->used_cp[2] = 1;
625 
626 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
627 		&thread->fpstate,
628 		0, sizeof(struct user_fp));
629 }
630 
631 #ifdef CONFIG_VFP
632 /*
633  * VFP register get/set implementations.
634  *
635  * With respect to the kernel, struct user_fp is divided into three chunks:
636  * 16 or 32 real VFP registers (d0-d15 or d0-31)
637  *	These are transferred to/from the real registers in the task's
638  *	vfp_hard_struct.  The number of registers depends on the kernel
639  *	configuration.
640  *
641  * 16 or 0 fake VFP registers (d16-d31 or empty)
642  *	i.e., the user_vfp structure has space for 32 registers even if
643  *	the kernel doesn't have them all.
644  *
645  *	vfp_get() reads this chunk as zero where applicable
646  *	vfp_set() ignores this chunk
647  *
648  * 1 word for the FPSCR
649  *
650  * The bounds-checking logic built into user_regset_copyout and friends
651  * means that we can make a simple sequence of calls to map the relevant data
652  * to/from the specified slice of the user regset structure.
653  */
654 static int vfp_get(struct task_struct *target,
655 		   const struct user_regset *regset,
656 		   unsigned int pos, unsigned int count,
657 		   void *kbuf, void __user *ubuf)
658 {
659 	int ret;
660 	struct thread_info *thread = task_thread_info(target);
661 	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
662 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
663 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
664 
665 	vfp_sync_hwstate(thread);
666 
667 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
668 				  &vfp->fpregs,
669 				  user_fpregs_offset,
670 				  user_fpregs_offset + sizeof(vfp->fpregs));
671 	if (ret)
672 		return ret;
673 
674 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
675 				       user_fpregs_offset + sizeof(vfp->fpregs),
676 				       user_fpscr_offset);
677 	if (ret)
678 		return ret;
679 
680 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
681 				   &vfp->fpscr,
682 				   user_fpscr_offset,
683 				   user_fpscr_offset + sizeof(vfp->fpscr));
684 }
685 
686 /*
687  * For vfp_set() a read-modify-write is done on the VFP registers,
688  * in order to avoid writing back a half-modified set of registers on
689  * failure.
690  */
691 static int vfp_set(struct task_struct *target,
692 			  const struct user_regset *regset,
693 			  unsigned int pos, unsigned int count,
694 			  const void *kbuf, const void __user *ubuf)
695 {
696 	int ret;
697 	struct thread_info *thread = task_thread_info(target);
698 	struct vfp_hard_struct new_vfp;
699 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
700 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
701 
702 	vfp_sync_hwstate(thread);
703 	new_vfp = thread->vfpstate.hard;
704 
705 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
706 				  &new_vfp.fpregs,
707 				  user_fpregs_offset,
708 				  user_fpregs_offset + sizeof(new_vfp.fpregs));
709 	if (ret)
710 		return ret;
711 
712 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
713 				user_fpregs_offset + sizeof(new_vfp.fpregs),
714 				user_fpscr_offset);
715 	if (ret)
716 		return ret;
717 
718 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
719 				 &new_vfp.fpscr,
720 				 user_fpscr_offset,
721 				 user_fpscr_offset + sizeof(new_vfp.fpscr));
722 	if (ret)
723 		return ret;
724 
725 	thread->vfpstate.hard = new_vfp;
726 	vfp_flush_hwstate(thread);
727 
728 	return 0;
729 }
730 #endif /* CONFIG_VFP */
731 
732 enum arm_regset {
733 	REGSET_GPR,
734 	REGSET_FPR,
735 #ifdef CONFIG_VFP
736 	REGSET_VFP,
737 #endif
738 };
739 
740 static const struct user_regset arm_regsets[] = {
741 	[REGSET_GPR] = {
742 		.core_note_type = NT_PRSTATUS,
743 		.n = ELF_NGREG,
744 		.size = sizeof(u32),
745 		.align = sizeof(u32),
746 		.get = gpr_get,
747 		.set = gpr_set
748 	},
749 	[REGSET_FPR] = {
750 		/*
751 		 * For the FPA regs in fpstate, the real fields are a mixture
752 		 * of sizes, so pretend that the registers are word-sized:
753 		 */
754 		.core_note_type = NT_PRFPREG,
755 		.n = sizeof(struct user_fp) / sizeof(u32),
756 		.size = sizeof(u32),
757 		.align = sizeof(u32),
758 		.get = fpa_get,
759 		.set = fpa_set
760 	},
761 #ifdef CONFIG_VFP
762 	[REGSET_VFP] = {
763 		/*
764 		 * Pretend that the VFP regs are word-sized, since the FPSCR is
765 		 * a single word dangling at the end of struct user_vfp:
766 		 */
767 		.core_note_type = NT_ARM_VFP,
768 		.n = ARM_VFPREGS_SIZE / sizeof(u32),
769 		.size = sizeof(u32),
770 		.align = sizeof(u32),
771 		.get = vfp_get,
772 		.set = vfp_set
773 	},
774 #endif /* CONFIG_VFP */
775 };
776 
777 static const struct user_regset_view user_arm_view = {
778 	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
779 	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
780 };
781 
782 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
783 {
784 	return &user_arm_view;
785 }
786 
787 long arch_ptrace(struct task_struct *child, long request,
788 		 unsigned long addr, unsigned long data)
789 {
790 	int ret;
791 	unsigned long __user *datap = (unsigned long __user *) data;
792 
793 	switch (request) {
794 		case PTRACE_PEEKUSR:
795 			ret = ptrace_read_user(child, addr, datap);
796 			break;
797 
798 		case PTRACE_POKEUSR:
799 			ret = ptrace_write_user(child, addr, data);
800 			break;
801 
802 		case PTRACE_GETREGS:
803 			ret = copy_regset_to_user(child,
804 						  &user_arm_view, REGSET_GPR,
805 						  0, sizeof(struct pt_regs),
806 						  datap);
807 			break;
808 
809 		case PTRACE_SETREGS:
810 			ret = copy_regset_from_user(child,
811 						    &user_arm_view, REGSET_GPR,
812 						    0, sizeof(struct pt_regs),
813 						    datap);
814 			break;
815 
816 		case PTRACE_GETFPREGS:
817 			ret = copy_regset_to_user(child,
818 						  &user_arm_view, REGSET_FPR,
819 						  0, sizeof(union fp_state),
820 						  datap);
821 			break;
822 
823 		case PTRACE_SETFPREGS:
824 			ret = copy_regset_from_user(child,
825 						    &user_arm_view, REGSET_FPR,
826 						    0, sizeof(union fp_state),
827 						    datap);
828 			break;
829 
830 #ifdef CONFIG_IWMMXT
831 		case PTRACE_GETWMMXREGS:
832 			ret = ptrace_getwmmxregs(child, datap);
833 			break;
834 
835 		case PTRACE_SETWMMXREGS:
836 			ret = ptrace_setwmmxregs(child, datap);
837 			break;
838 #endif
839 
840 		case PTRACE_GET_THREAD_AREA:
841 			ret = put_user(task_thread_info(child)->tp_value[0],
842 				       datap);
843 			break;
844 
845 		case PTRACE_SET_SYSCALL:
846 			task_thread_info(child)->syscall = data;
847 			ret = 0;
848 			break;
849 
850 #ifdef CONFIG_CRUNCH
851 		case PTRACE_GETCRUNCHREGS:
852 			ret = ptrace_getcrunchregs(child, datap);
853 			break;
854 
855 		case PTRACE_SETCRUNCHREGS:
856 			ret = ptrace_setcrunchregs(child, datap);
857 			break;
858 #endif
859 
860 #ifdef CONFIG_VFP
861 		case PTRACE_GETVFPREGS:
862 			ret = copy_regset_to_user(child,
863 						  &user_arm_view, REGSET_VFP,
864 						  0, ARM_VFPREGS_SIZE,
865 						  datap);
866 			break;
867 
868 		case PTRACE_SETVFPREGS:
869 			ret = copy_regset_from_user(child,
870 						    &user_arm_view, REGSET_VFP,
871 						    0, ARM_VFPREGS_SIZE,
872 						    datap);
873 			break;
874 #endif
875 
876 #ifdef CONFIG_HAVE_HW_BREAKPOINT
877 		case PTRACE_GETHBPREGS:
878 			ret = ptrace_gethbpregs(child, addr,
879 						(unsigned long __user *)data);
880 			break;
881 		case PTRACE_SETHBPREGS:
882 			ret = ptrace_sethbpregs(child, addr,
883 						(unsigned long __user *)data);
884 			break;
885 #endif
886 
887 		default:
888 			ret = ptrace_request(child, request, addr, data);
889 			break;
890 	}
891 
892 	return ret;
893 }
894 
895 enum ptrace_syscall_dir {
896 	PTRACE_SYSCALL_ENTER = 0,
897 	PTRACE_SYSCALL_EXIT,
898 };
899 
900 static void tracehook_report_syscall(struct pt_regs *regs,
901 				    enum ptrace_syscall_dir dir)
902 {
903 	unsigned long ip;
904 
905 	/*
906 	 * IP is used to denote syscall entry/exit:
907 	 * IP = 0 -> entry, =1 -> exit
908 	 */
909 	ip = regs->ARM_ip;
910 	regs->ARM_ip = dir;
911 
912 	if (dir == PTRACE_SYSCALL_EXIT)
913 		tracehook_report_syscall_exit(regs, 0);
914 	else if (tracehook_report_syscall_entry(regs))
915 		current_thread_info()->syscall = -1;
916 
917 	regs->ARM_ip = ip;
918 }
919 
920 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
921 {
922 	current_thread_info()->syscall = scno;
923 
924 	if (test_thread_flag(TIF_SYSCALL_TRACE))
925 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
926 
927 	/* Do seccomp after ptrace; syscall may have changed. */
928 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
929 	if (secure_computing(NULL) == -1)
930 		return -1;
931 #else
932 	/* XXX: remove this once OABI gets fixed */
933 	secure_computing_strict(current_thread_info()->syscall);
934 #endif
935 
936 	/* Tracer or seccomp may have changed syscall. */
937 	scno = current_thread_info()->syscall;
938 
939 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
940 		trace_sys_enter(regs, scno);
941 
942 	audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
943 			    regs->ARM_r3);
944 
945 	return scno;
946 }
947 
948 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
949 {
950 	/*
951 	 * Audit the syscall before anything else, as a debugger may
952 	 * come in and change the current registers.
953 	 */
954 	audit_syscall_exit(regs);
955 
956 	/*
957 	 * Note that we haven't updated the ->syscall field for the
958 	 * current thread. This isn't a problem because it will have
959 	 * been set on syscall entry and there hasn't been an opportunity
960 	 * for a PTRACE_SET_SYSCALL since then.
961 	 */
962 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
963 		trace_sys_exit(regs, regs_return_value(regs));
964 
965 	if (test_thread_flag(TIF_SYSCALL_TRACE))
966 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
967 }
968