xref: /openbmc/linux/arch/arm/kernel/ptrace.c (revision 8bd1369b)
1 /*
2  *  linux/arch/arm/kernel/ptrace.c
3  *
4  *  By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/mm.h>
16 #include <linux/elf.h>
17 #include <linux/smp.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/security.h>
21 #include <linux/init.h>
22 #include <linux/signal.h>
23 #include <linux/uaccess.h>
24 #include <linux/perf_event.h>
25 #include <linux/hw_breakpoint.h>
26 #include <linux/regset.h>
27 #include <linux/audit.h>
28 #include <linux/tracehook.h>
29 #include <linux/unistd.h>
30 
31 #include <asm/pgtable.h>
32 #include <asm/traps.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/syscalls.h>
36 
37 #define REG_PC	15
38 #define REG_PSR	16
39 /*
40  * does not yet catch signals sent when the child dies.
41  * in exit.c or in signal.c.
42  */
43 
44 #if 0
45 /*
46  * Breakpoint SWI instruction: SWI &9F0001
47  */
48 #define BREAKINST_ARM	0xef9f0001
49 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
50 #else
51 /*
52  * New breakpoints - use an undefined instruction.  The ARM architecture
53  * reference manual guarantees that the following instruction space
54  * will produce an undefined instruction exception on all CPUs:
55  *
56  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
57  *  Thumb: 1101 1110 xxxx xxxx
58  */
59 #define BREAKINST_ARM	0xe7f001f0
60 #define BREAKINST_THUMB	0xde01
61 #endif
62 
63 struct pt_regs_offset {
64 	const char *name;
65 	int offset;
66 };
67 
68 #define REG_OFFSET_NAME(r) \
69 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
70 #define REG_OFFSET_END {.name = NULL, .offset = 0}
71 
72 static const struct pt_regs_offset regoffset_table[] = {
73 	REG_OFFSET_NAME(r0),
74 	REG_OFFSET_NAME(r1),
75 	REG_OFFSET_NAME(r2),
76 	REG_OFFSET_NAME(r3),
77 	REG_OFFSET_NAME(r4),
78 	REG_OFFSET_NAME(r5),
79 	REG_OFFSET_NAME(r6),
80 	REG_OFFSET_NAME(r7),
81 	REG_OFFSET_NAME(r8),
82 	REG_OFFSET_NAME(r9),
83 	REG_OFFSET_NAME(r10),
84 	REG_OFFSET_NAME(fp),
85 	REG_OFFSET_NAME(ip),
86 	REG_OFFSET_NAME(sp),
87 	REG_OFFSET_NAME(lr),
88 	REG_OFFSET_NAME(pc),
89 	REG_OFFSET_NAME(cpsr),
90 	REG_OFFSET_NAME(ORIG_r0),
91 	REG_OFFSET_END,
92 };
93 
94 /**
95  * regs_query_register_offset() - query register offset from its name
96  * @name:	the name of a register
97  *
98  * regs_query_register_offset() returns the offset of a register in struct
99  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
100  */
101 int regs_query_register_offset(const char *name)
102 {
103 	const struct pt_regs_offset *roff;
104 	for (roff = regoffset_table; roff->name != NULL; roff++)
105 		if (!strcmp(roff->name, name))
106 			return roff->offset;
107 	return -EINVAL;
108 }
109 
110 /**
111  * regs_query_register_name() - query register name from its offset
112  * @offset:	the offset of a register in struct pt_regs.
113  *
114  * regs_query_register_name() returns the name of a register from its
115  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
116  */
117 const char *regs_query_register_name(unsigned int offset)
118 {
119 	const struct pt_regs_offset *roff;
120 	for (roff = regoffset_table; roff->name != NULL; roff++)
121 		if (roff->offset == offset)
122 			return roff->name;
123 	return NULL;
124 }
125 
126 /**
127  * regs_within_kernel_stack() - check the address in the stack
128  * @regs:      pt_regs which contains kernel stack pointer.
129  * @addr:      address which is checked.
130  *
131  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
132  * If @addr is within the kernel stack, it returns true. If not, returns false.
133  */
134 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
135 {
136 	return ((addr & ~(THREAD_SIZE - 1))  ==
137 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
138 }
139 
140 /**
141  * regs_get_kernel_stack_nth() - get Nth entry of the stack
142  * @regs:	pt_regs which contains kernel stack pointer.
143  * @n:		stack entry number.
144  *
145  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
146  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
147  * this returns 0.
148  */
149 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
150 {
151 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
152 	addr += n;
153 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
154 		return *addr;
155 	else
156 		return 0;
157 }
158 
159 /*
160  * this routine will get a word off of the processes privileged stack.
161  * the offset is how far from the base addr as stored in the THREAD.
162  * this routine assumes that all the privileged stacks are in our
163  * data space.
164  */
165 static inline long get_user_reg(struct task_struct *task, int offset)
166 {
167 	return task_pt_regs(task)->uregs[offset];
168 }
169 
170 /*
171  * this routine will put a word on the processes privileged stack.
172  * the offset is how far from the base addr as stored in the THREAD.
173  * this routine assumes that all the privileged stacks are in our
174  * data space.
175  */
176 static inline int
177 put_user_reg(struct task_struct *task, int offset, long data)
178 {
179 	struct pt_regs newregs, *regs = task_pt_regs(task);
180 	int ret = -EINVAL;
181 
182 	newregs = *regs;
183 	newregs.uregs[offset] = data;
184 
185 	if (valid_user_regs(&newregs)) {
186 		regs->uregs[offset] = data;
187 		ret = 0;
188 	}
189 
190 	return ret;
191 }
192 
193 /*
194  * Called by kernel/ptrace.c when detaching..
195  */
196 void ptrace_disable(struct task_struct *child)
197 {
198 	/* Nothing to do. */
199 }
200 
201 /*
202  * Handle hitting a breakpoint.
203  */
204 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
205 {
206 	siginfo_t info;
207 
208 	clear_siginfo(&info);
209 	info.si_signo = SIGTRAP;
210 	info.si_errno = 0;
211 	info.si_code  = TRAP_BRKPT;
212 	info.si_addr  = (void __user *)instruction_pointer(regs);
213 
214 	force_sig_info(SIGTRAP, &info, tsk);
215 }
216 
217 static int break_trap(struct pt_regs *regs, unsigned int instr)
218 {
219 	ptrace_break(current, regs);
220 	return 0;
221 }
222 
223 static struct undef_hook arm_break_hook = {
224 	.instr_mask	= 0x0fffffff,
225 	.instr_val	= 0x07f001f0,
226 	.cpsr_mask	= PSR_T_BIT,
227 	.cpsr_val	= 0,
228 	.fn		= break_trap,
229 };
230 
231 static struct undef_hook thumb_break_hook = {
232 	.instr_mask	= 0xffff,
233 	.instr_val	= 0xde01,
234 	.cpsr_mask	= PSR_T_BIT,
235 	.cpsr_val	= PSR_T_BIT,
236 	.fn		= break_trap,
237 };
238 
239 static struct undef_hook thumb2_break_hook = {
240 	.instr_mask	= 0xffffffff,
241 	.instr_val	= 0xf7f0a000,
242 	.cpsr_mask	= PSR_T_BIT,
243 	.cpsr_val	= PSR_T_BIT,
244 	.fn		= break_trap,
245 };
246 
247 static int __init ptrace_break_init(void)
248 {
249 	register_undef_hook(&arm_break_hook);
250 	register_undef_hook(&thumb_break_hook);
251 	register_undef_hook(&thumb2_break_hook);
252 	return 0;
253 }
254 
255 core_initcall(ptrace_break_init);
256 
257 /*
258  * Read the word at offset "off" into the "struct user".  We
259  * actually access the pt_regs stored on the kernel stack.
260  */
261 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
262 			    unsigned long __user *ret)
263 {
264 	unsigned long tmp;
265 
266 	if (off & 3)
267 		return -EIO;
268 
269 	tmp = 0;
270 	if (off == PT_TEXT_ADDR)
271 		tmp = tsk->mm->start_code;
272 	else if (off == PT_DATA_ADDR)
273 		tmp = tsk->mm->start_data;
274 	else if (off == PT_TEXT_END_ADDR)
275 		tmp = tsk->mm->end_code;
276 	else if (off < sizeof(struct pt_regs))
277 		tmp = get_user_reg(tsk, off >> 2);
278 	else if (off >= sizeof(struct user))
279 		return -EIO;
280 
281 	return put_user(tmp, ret);
282 }
283 
284 /*
285  * Write the word at offset "off" into "struct user".  We
286  * actually access the pt_regs stored on the kernel stack.
287  */
288 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
289 			     unsigned long val)
290 {
291 	if (off & 3 || off >= sizeof(struct user))
292 		return -EIO;
293 
294 	if (off >= sizeof(struct pt_regs))
295 		return 0;
296 
297 	return put_user_reg(tsk, off >> 2, val);
298 }
299 
300 #ifdef CONFIG_IWMMXT
301 
302 /*
303  * Get the child iWMMXt state.
304  */
305 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
306 {
307 	struct thread_info *thread = task_thread_info(tsk);
308 
309 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
310 		return -ENODATA;
311 	iwmmxt_task_disable(thread);  /* force it to ram */
312 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
313 		? -EFAULT : 0;
314 }
315 
316 /*
317  * Set the child iWMMXt state.
318  */
319 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
320 {
321 	struct thread_info *thread = task_thread_info(tsk);
322 
323 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
324 		return -EACCES;
325 	iwmmxt_task_release(thread);  /* force a reload */
326 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
327 		? -EFAULT : 0;
328 }
329 
330 #endif
331 
332 #ifdef CONFIG_CRUNCH
333 /*
334  * Get the child Crunch state.
335  */
336 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
337 {
338 	struct thread_info *thread = task_thread_info(tsk);
339 
340 	crunch_task_disable(thread);  /* force it to ram */
341 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
342 		? -EFAULT : 0;
343 }
344 
345 /*
346  * Set the child Crunch state.
347  */
348 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
349 {
350 	struct thread_info *thread = task_thread_info(tsk);
351 
352 	crunch_task_release(thread);  /* force a reload */
353 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
354 		? -EFAULT : 0;
355 }
356 #endif
357 
358 #ifdef CONFIG_HAVE_HW_BREAKPOINT
359 /*
360  * Convert a virtual register number into an index for a thread_info
361  * breakpoint array. Breakpoints are identified using positive numbers
362  * whilst watchpoints are negative. The registers are laid out as pairs
363  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
364  * Register 0 is reserved for describing resource information.
365  */
366 static int ptrace_hbp_num_to_idx(long num)
367 {
368 	if (num < 0)
369 		num = (ARM_MAX_BRP << 1) - num;
370 	return (num - 1) >> 1;
371 }
372 
373 /*
374  * Returns the virtual register number for the address of the
375  * breakpoint at index idx.
376  */
377 static long ptrace_hbp_idx_to_num(int idx)
378 {
379 	long mid = ARM_MAX_BRP << 1;
380 	long num = (idx << 1) + 1;
381 	return num > mid ? mid - num : num;
382 }
383 
384 /*
385  * Handle hitting a HW-breakpoint.
386  */
387 static void ptrace_hbptriggered(struct perf_event *bp,
388 				     struct perf_sample_data *data,
389 				     struct pt_regs *regs)
390 {
391 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
392 	long num;
393 	int i;
394 
395 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
396 		if (current->thread.debug.hbp[i] == bp)
397 			break;
398 
399 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
400 
401 	force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
402 }
403 
404 /*
405  * Set ptrace breakpoint pointers to zero for this task.
406  * This is required in order to prevent child processes from unregistering
407  * breakpoints held by their parent.
408  */
409 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
410 {
411 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
412 }
413 
414 /*
415  * Unregister breakpoints from this task and reset the pointers in
416  * the thread_struct.
417  */
418 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
419 {
420 	int i;
421 	struct thread_struct *t = &tsk->thread;
422 
423 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
424 		if (t->debug.hbp[i]) {
425 			unregister_hw_breakpoint(t->debug.hbp[i]);
426 			t->debug.hbp[i] = NULL;
427 		}
428 	}
429 }
430 
431 static u32 ptrace_get_hbp_resource_info(void)
432 {
433 	u8 num_brps, num_wrps, debug_arch, wp_len;
434 	u32 reg = 0;
435 
436 	num_brps	= hw_breakpoint_slots(TYPE_INST);
437 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
438 	debug_arch	= arch_get_debug_arch();
439 	wp_len		= arch_get_max_wp_len();
440 
441 	reg		|= debug_arch;
442 	reg		<<= 8;
443 	reg		|= wp_len;
444 	reg		<<= 8;
445 	reg		|= num_wrps;
446 	reg		<<= 8;
447 	reg		|= num_brps;
448 
449 	return reg;
450 }
451 
452 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
453 {
454 	struct perf_event_attr attr;
455 
456 	ptrace_breakpoint_init(&attr);
457 
458 	/* Initialise fields to sane defaults. */
459 	attr.bp_addr	= 0;
460 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
461 	attr.bp_type	= type;
462 	attr.disabled	= 1;
463 
464 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
465 					   tsk);
466 }
467 
468 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
469 			     unsigned long  __user *data)
470 {
471 	u32 reg;
472 	int idx, ret = 0;
473 	struct perf_event *bp;
474 	struct arch_hw_breakpoint_ctrl arch_ctrl;
475 
476 	if (num == 0) {
477 		reg = ptrace_get_hbp_resource_info();
478 	} else {
479 		idx = ptrace_hbp_num_to_idx(num);
480 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
481 			ret = -EINVAL;
482 			goto out;
483 		}
484 
485 		bp = tsk->thread.debug.hbp[idx];
486 		if (!bp) {
487 			reg = 0;
488 			goto put;
489 		}
490 
491 		arch_ctrl = counter_arch_bp(bp)->ctrl;
492 
493 		/*
494 		 * Fix up the len because we may have adjusted it
495 		 * to compensate for an unaligned address.
496 		 */
497 		while (!(arch_ctrl.len & 0x1))
498 			arch_ctrl.len >>= 1;
499 
500 		if (num & 0x1)
501 			reg = bp->attr.bp_addr;
502 		else
503 			reg = encode_ctrl_reg(arch_ctrl);
504 	}
505 
506 put:
507 	if (put_user(reg, data))
508 		ret = -EFAULT;
509 
510 out:
511 	return ret;
512 }
513 
514 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
515 			     unsigned long __user *data)
516 {
517 	int idx, gen_len, gen_type, implied_type, ret = 0;
518 	u32 user_val;
519 	struct perf_event *bp;
520 	struct arch_hw_breakpoint_ctrl ctrl;
521 	struct perf_event_attr attr;
522 
523 	if (num == 0)
524 		goto out;
525 	else if (num < 0)
526 		implied_type = HW_BREAKPOINT_RW;
527 	else
528 		implied_type = HW_BREAKPOINT_X;
529 
530 	idx = ptrace_hbp_num_to_idx(num);
531 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
532 		ret = -EINVAL;
533 		goto out;
534 	}
535 
536 	if (get_user(user_val, data)) {
537 		ret = -EFAULT;
538 		goto out;
539 	}
540 
541 	bp = tsk->thread.debug.hbp[idx];
542 	if (!bp) {
543 		bp = ptrace_hbp_create(tsk, implied_type);
544 		if (IS_ERR(bp)) {
545 			ret = PTR_ERR(bp);
546 			goto out;
547 		}
548 		tsk->thread.debug.hbp[idx] = bp;
549 	}
550 
551 	attr = bp->attr;
552 
553 	if (num & 0x1) {
554 		/* Address */
555 		attr.bp_addr	= user_val;
556 	} else {
557 		/* Control */
558 		decode_ctrl_reg(user_val, &ctrl);
559 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
560 		if (ret)
561 			goto out;
562 
563 		if ((gen_type & implied_type) != gen_type) {
564 			ret = -EINVAL;
565 			goto out;
566 		}
567 
568 		attr.bp_len	= gen_len;
569 		attr.bp_type	= gen_type;
570 		attr.disabled	= !ctrl.enabled;
571 	}
572 
573 	ret = modify_user_hw_breakpoint(bp, &attr);
574 out:
575 	return ret;
576 }
577 #endif
578 
579 /* regset get/set implementations */
580 
581 static int gpr_get(struct task_struct *target,
582 		   const struct user_regset *regset,
583 		   unsigned int pos, unsigned int count,
584 		   void *kbuf, void __user *ubuf)
585 {
586 	struct pt_regs *regs = task_pt_regs(target);
587 
588 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
589 				   regs,
590 				   0, sizeof(*regs));
591 }
592 
593 static int gpr_set(struct task_struct *target,
594 		   const struct user_regset *regset,
595 		   unsigned int pos, unsigned int count,
596 		   const void *kbuf, const void __user *ubuf)
597 {
598 	int ret;
599 	struct pt_regs newregs = *task_pt_regs(target);
600 
601 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
602 				 &newregs,
603 				 0, sizeof(newregs));
604 	if (ret)
605 		return ret;
606 
607 	if (!valid_user_regs(&newregs))
608 		return -EINVAL;
609 
610 	*task_pt_regs(target) = newregs;
611 	return 0;
612 }
613 
614 static int fpa_get(struct task_struct *target,
615 		   const struct user_regset *regset,
616 		   unsigned int pos, unsigned int count,
617 		   void *kbuf, void __user *ubuf)
618 {
619 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
620 				   &task_thread_info(target)->fpstate,
621 				   0, sizeof(struct user_fp));
622 }
623 
624 static int fpa_set(struct task_struct *target,
625 		   const struct user_regset *regset,
626 		   unsigned int pos, unsigned int count,
627 		   const void *kbuf, const void __user *ubuf)
628 {
629 	struct thread_info *thread = task_thread_info(target);
630 
631 	thread->used_cp[1] = thread->used_cp[2] = 1;
632 
633 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
634 		&thread->fpstate,
635 		0, sizeof(struct user_fp));
636 }
637 
638 #ifdef CONFIG_VFP
639 /*
640  * VFP register get/set implementations.
641  *
642  * With respect to the kernel, struct user_fp is divided into three chunks:
643  * 16 or 32 real VFP registers (d0-d15 or d0-31)
644  *	These are transferred to/from the real registers in the task's
645  *	vfp_hard_struct.  The number of registers depends on the kernel
646  *	configuration.
647  *
648  * 16 or 0 fake VFP registers (d16-d31 or empty)
649  *	i.e., the user_vfp structure has space for 32 registers even if
650  *	the kernel doesn't have them all.
651  *
652  *	vfp_get() reads this chunk as zero where applicable
653  *	vfp_set() ignores this chunk
654  *
655  * 1 word for the FPSCR
656  *
657  * The bounds-checking logic built into user_regset_copyout and friends
658  * means that we can make a simple sequence of calls to map the relevant data
659  * to/from the specified slice of the user regset structure.
660  */
661 static int vfp_get(struct task_struct *target,
662 		   const struct user_regset *regset,
663 		   unsigned int pos, unsigned int count,
664 		   void *kbuf, void __user *ubuf)
665 {
666 	int ret;
667 	struct thread_info *thread = task_thread_info(target);
668 	struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
669 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
670 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
671 
672 	vfp_sync_hwstate(thread);
673 
674 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
675 				  &vfp->fpregs,
676 				  user_fpregs_offset,
677 				  user_fpregs_offset + sizeof(vfp->fpregs));
678 	if (ret)
679 		return ret;
680 
681 	ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
682 				       user_fpregs_offset + sizeof(vfp->fpregs),
683 				       user_fpscr_offset);
684 	if (ret)
685 		return ret;
686 
687 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
688 				   &vfp->fpscr,
689 				   user_fpscr_offset,
690 				   user_fpscr_offset + sizeof(vfp->fpscr));
691 }
692 
693 /*
694  * For vfp_set() a read-modify-write is done on the VFP registers,
695  * in order to avoid writing back a half-modified set of registers on
696  * failure.
697  */
698 static int vfp_set(struct task_struct *target,
699 			  const struct user_regset *regset,
700 			  unsigned int pos, unsigned int count,
701 			  const void *kbuf, const void __user *ubuf)
702 {
703 	int ret;
704 	struct thread_info *thread = task_thread_info(target);
705 	struct vfp_hard_struct new_vfp;
706 	const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
707 	const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
708 
709 	vfp_sync_hwstate(thread);
710 	new_vfp = thread->vfpstate.hard;
711 
712 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
713 				  &new_vfp.fpregs,
714 				  user_fpregs_offset,
715 				  user_fpregs_offset + sizeof(new_vfp.fpregs));
716 	if (ret)
717 		return ret;
718 
719 	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
720 				user_fpregs_offset + sizeof(new_vfp.fpregs),
721 				user_fpscr_offset);
722 	if (ret)
723 		return ret;
724 
725 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
726 				 &new_vfp.fpscr,
727 				 user_fpscr_offset,
728 				 user_fpscr_offset + sizeof(new_vfp.fpscr));
729 	if (ret)
730 		return ret;
731 
732 	thread->vfpstate.hard = new_vfp;
733 	vfp_flush_hwstate(thread);
734 
735 	return 0;
736 }
737 #endif /* CONFIG_VFP */
738 
739 enum arm_regset {
740 	REGSET_GPR,
741 	REGSET_FPR,
742 #ifdef CONFIG_VFP
743 	REGSET_VFP,
744 #endif
745 };
746 
747 static const struct user_regset arm_regsets[] = {
748 	[REGSET_GPR] = {
749 		.core_note_type = NT_PRSTATUS,
750 		.n = ELF_NGREG,
751 		.size = sizeof(u32),
752 		.align = sizeof(u32),
753 		.get = gpr_get,
754 		.set = gpr_set
755 	},
756 	[REGSET_FPR] = {
757 		/*
758 		 * For the FPA regs in fpstate, the real fields are a mixture
759 		 * of sizes, so pretend that the registers are word-sized:
760 		 */
761 		.core_note_type = NT_PRFPREG,
762 		.n = sizeof(struct user_fp) / sizeof(u32),
763 		.size = sizeof(u32),
764 		.align = sizeof(u32),
765 		.get = fpa_get,
766 		.set = fpa_set
767 	},
768 #ifdef CONFIG_VFP
769 	[REGSET_VFP] = {
770 		/*
771 		 * Pretend that the VFP regs are word-sized, since the FPSCR is
772 		 * a single word dangling at the end of struct user_vfp:
773 		 */
774 		.core_note_type = NT_ARM_VFP,
775 		.n = ARM_VFPREGS_SIZE / sizeof(u32),
776 		.size = sizeof(u32),
777 		.align = sizeof(u32),
778 		.get = vfp_get,
779 		.set = vfp_set
780 	},
781 #endif /* CONFIG_VFP */
782 };
783 
784 static const struct user_regset_view user_arm_view = {
785 	.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
786 	.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
787 };
788 
789 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
790 {
791 	return &user_arm_view;
792 }
793 
794 long arch_ptrace(struct task_struct *child, long request,
795 		 unsigned long addr, unsigned long data)
796 {
797 	int ret;
798 	unsigned long __user *datap = (unsigned long __user *) data;
799 
800 	switch (request) {
801 		case PTRACE_PEEKUSR:
802 			ret = ptrace_read_user(child, addr, datap);
803 			break;
804 
805 		case PTRACE_POKEUSR:
806 			ret = ptrace_write_user(child, addr, data);
807 			break;
808 
809 		case PTRACE_GETREGS:
810 			ret = copy_regset_to_user(child,
811 						  &user_arm_view, REGSET_GPR,
812 						  0, sizeof(struct pt_regs),
813 						  datap);
814 			break;
815 
816 		case PTRACE_SETREGS:
817 			ret = copy_regset_from_user(child,
818 						    &user_arm_view, REGSET_GPR,
819 						    0, sizeof(struct pt_regs),
820 						    datap);
821 			break;
822 
823 		case PTRACE_GETFPREGS:
824 			ret = copy_regset_to_user(child,
825 						  &user_arm_view, REGSET_FPR,
826 						  0, sizeof(union fp_state),
827 						  datap);
828 			break;
829 
830 		case PTRACE_SETFPREGS:
831 			ret = copy_regset_from_user(child,
832 						    &user_arm_view, REGSET_FPR,
833 						    0, sizeof(union fp_state),
834 						    datap);
835 			break;
836 
837 #ifdef CONFIG_IWMMXT
838 		case PTRACE_GETWMMXREGS:
839 			ret = ptrace_getwmmxregs(child, datap);
840 			break;
841 
842 		case PTRACE_SETWMMXREGS:
843 			ret = ptrace_setwmmxregs(child, datap);
844 			break;
845 #endif
846 
847 		case PTRACE_GET_THREAD_AREA:
848 			ret = put_user(task_thread_info(child)->tp_value[0],
849 				       datap);
850 			break;
851 
852 		case PTRACE_SET_SYSCALL:
853 			task_thread_info(child)->syscall = data;
854 			ret = 0;
855 			break;
856 
857 #ifdef CONFIG_CRUNCH
858 		case PTRACE_GETCRUNCHREGS:
859 			ret = ptrace_getcrunchregs(child, datap);
860 			break;
861 
862 		case PTRACE_SETCRUNCHREGS:
863 			ret = ptrace_setcrunchregs(child, datap);
864 			break;
865 #endif
866 
867 #ifdef CONFIG_VFP
868 		case PTRACE_GETVFPREGS:
869 			ret = copy_regset_to_user(child,
870 						  &user_arm_view, REGSET_VFP,
871 						  0, ARM_VFPREGS_SIZE,
872 						  datap);
873 			break;
874 
875 		case PTRACE_SETVFPREGS:
876 			ret = copy_regset_from_user(child,
877 						    &user_arm_view, REGSET_VFP,
878 						    0, ARM_VFPREGS_SIZE,
879 						    datap);
880 			break;
881 #endif
882 
883 #ifdef CONFIG_HAVE_HW_BREAKPOINT
884 		case PTRACE_GETHBPREGS:
885 			ret = ptrace_gethbpregs(child, addr,
886 						(unsigned long __user *)data);
887 			break;
888 		case PTRACE_SETHBPREGS:
889 			ret = ptrace_sethbpregs(child, addr,
890 						(unsigned long __user *)data);
891 			break;
892 #endif
893 
894 		default:
895 			ret = ptrace_request(child, request, addr, data);
896 			break;
897 	}
898 
899 	return ret;
900 }
901 
902 enum ptrace_syscall_dir {
903 	PTRACE_SYSCALL_ENTER = 0,
904 	PTRACE_SYSCALL_EXIT,
905 };
906 
907 static void tracehook_report_syscall(struct pt_regs *regs,
908 				    enum ptrace_syscall_dir dir)
909 {
910 	unsigned long ip;
911 
912 	/*
913 	 * IP is used to denote syscall entry/exit:
914 	 * IP = 0 -> entry, =1 -> exit
915 	 */
916 	ip = regs->ARM_ip;
917 	regs->ARM_ip = dir;
918 
919 	if (dir == PTRACE_SYSCALL_EXIT)
920 		tracehook_report_syscall_exit(regs, 0);
921 	else if (tracehook_report_syscall_entry(regs))
922 		current_thread_info()->syscall = -1;
923 
924 	regs->ARM_ip = ip;
925 }
926 
927 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
928 {
929 	current_thread_info()->syscall = scno;
930 
931 	if (test_thread_flag(TIF_SYSCALL_TRACE))
932 		tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
933 
934 	/* Do seccomp after ptrace; syscall may have changed. */
935 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
936 	if (secure_computing(NULL) == -1)
937 		return -1;
938 #else
939 	/* XXX: remove this once OABI gets fixed */
940 	secure_computing_strict(current_thread_info()->syscall);
941 #endif
942 
943 	/* Tracer or seccomp may have changed syscall. */
944 	scno = current_thread_info()->syscall;
945 
946 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
947 		trace_sys_enter(regs, scno);
948 
949 	audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
950 			    regs->ARM_r3);
951 
952 	return scno;
953 }
954 
955 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
956 {
957 	/*
958 	 * Audit the syscall before anything else, as a debugger may
959 	 * come in and change the current registers.
960 	 */
961 	audit_syscall_exit(regs);
962 
963 	/*
964 	 * Note that we haven't updated the ->syscall field for the
965 	 * current thread. This isn't a problem because it will have
966 	 * been set on syscall entry and there hasn't been an opportunity
967 	 * for a PTRACE_SET_SYSCALL since then.
968 	 */
969 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
970 		trace_sys_exit(regs, regs_return_value(regs));
971 
972 	if (test_thread_flag(TIF_SYSCALL_TRACE))
973 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
974 }
975