xref: /openbmc/linux/arch/arm/kernel/ptrace.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  *  linux/arch/arm/kernel/ptrace.c
3  *
4  *  By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/smp.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/init.h>
20 #include <linux/signal.h>
21 #include <linux/uaccess.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 
25 #include <asm/pgtable.h>
26 #include <asm/system.h>
27 #include <asm/traps.h>
28 
29 #include "ptrace.h"
30 
31 #define REG_PC	15
32 #define REG_PSR	16
33 /*
34  * does not yet catch signals sent when the child dies.
35  * in exit.c or in signal.c.
36  */
37 
38 #if 0
39 /*
40  * Breakpoint SWI instruction: SWI &9F0001
41  */
42 #define BREAKINST_ARM	0xef9f0001
43 #define BREAKINST_THUMB	0xdf00		/* fill this in later */
44 #else
45 /*
46  * New breakpoints - use an undefined instruction.  The ARM architecture
47  * reference manual guarantees that the following instruction space
48  * will produce an undefined instruction exception on all CPUs:
49  *
50  *  ARM:   xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
51  *  Thumb: 1101 1110 xxxx xxxx
52  */
53 #define BREAKINST_ARM	0xe7f001f0
54 #define BREAKINST_THUMB	0xde01
55 #endif
56 
57 struct pt_regs_offset {
58 	const char *name;
59 	int offset;
60 };
61 
62 #define REG_OFFSET_NAME(r) \
63 	{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
64 #define REG_OFFSET_END {.name = NULL, .offset = 0}
65 
66 static const struct pt_regs_offset regoffset_table[] = {
67 	REG_OFFSET_NAME(r0),
68 	REG_OFFSET_NAME(r1),
69 	REG_OFFSET_NAME(r2),
70 	REG_OFFSET_NAME(r3),
71 	REG_OFFSET_NAME(r4),
72 	REG_OFFSET_NAME(r5),
73 	REG_OFFSET_NAME(r6),
74 	REG_OFFSET_NAME(r7),
75 	REG_OFFSET_NAME(r8),
76 	REG_OFFSET_NAME(r9),
77 	REG_OFFSET_NAME(r10),
78 	REG_OFFSET_NAME(fp),
79 	REG_OFFSET_NAME(ip),
80 	REG_OFFSET_NAME(sp),
81 	REG_OFFSET_NAME(lr),
82 	REG_OFFSET_NAME(pc),
83 	REG_OFFSET_NAME(cpsr),
84 	REG_OFFSET_NAME(ORIG_r0),
85 	REG_OFFSET_END,
86 };
87 
88 /**
89  * regs_query_register_offset() - query register offset from its name
90  * @name:	the name of a register
91  *
92  * regs_query_register_offset() returns the offset of a register in struct
93  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
94  */
95 int regs_query_register_offset(const char *name)
96 {
97 	const struct pt_regs_offset *roff;
98 	for (roff = regoffset_table; roff->name != NULL; roff++)
99 		if (!strcmp(roff->name, name))
100 			return roff->offset;
101 	return -EINVAL;
102 }
103 
104 /**
105  * regs_query_register_name() - query register name from its offset
106  * @offset:	the offset of a register in struct pt_regs.
107  *
108  * regs_query_register_name() returns the name of a register from its
109  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
110  */
111 const char *regs_query_register_name(unsigned int offset)
112 {
113 	const struct pt_regs_offset *roff;
114 	for (roff = regoffset_table; roff->name != NULL; roff++)
115 		if (roff->offset == offset)
116 			return roff->name;
117 	return NULL;
118 }
119 
120 /**
121  * regs_within_kernel_stack() - check the address in the stack
122  * @regs:      pt_regs which contains kernel stack pointer.
123  * @addr:      address which is checked.
124  *
125  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
126  * If @addr is within the kernel stack, it returns true. If not, returns false.
127  */
128 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
129 {
130 	return ((addr & ~(THREAD_SIZE - 1))  ==
131 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
132 }
133 
134 /**
135  * regs_get_kernel_stack_nth() - get Nth entry of the stack
136  * @regs:	pt_regs which contains kernel stack pointer.
137  * @n:		stack entry number.
138  *
139  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
140  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
141  * this returns 0.
142  */
143 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
144 {
145 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
146 	addr += n;
147 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
148 		return *addr;
149 	else
150 		return 0;
151 }
152 
153 /*
154  * this routine will get a word off of the processes privileged stack.
155  * the offset is how far from the base addr as stored in the THREAD.
156  * this routine assumes that all the privileged stacks are in our
157  * data space.
158  */
159 static inline long get_user_reg(struct task_struct *task, int offset)
160 {
161 	return task_pt_regs(task)->uregs[offset];
162 }
163 
164 /*
165  * this routine will put a word on the processes privileged stack.
166  * the offset is how far from the base addr as stored in the THREAD.
167  * this routine assumes that all the privileged stacks are in our
168  * data space.
169  */
170 static inline int
171 put_user_reg(struct task_struct *task, int offset, long data)
172 {
173 	struct pt_regs newregs, *regs = task_pt_regs(task);
174 	int ret = -EINVAL;
175 
176 	newregs = *regs;
177 	newregs.uregs[offset] = data;
178 
179 	if (valid_user_regs(&newregs)) {
180 		regs->uregs[offset] = data;
181 		ret = 0;
182 	}
183 
184 	return ret;
185 }
186 
187 static inline int
188 read_u32(struct task_struct *task, unsigned long addr, u32 *res)
189 {
190 	int ret;
191 
192 	ret = access_process_vm(task, addr, res, sizeof(*res), 0);
193 
194 	return ret == sizeof(*res) ? 0 : -EIO;
195 }
196 
197 static inline int
198 read_instr(struct task_struct *task, unsigned long addr, u32 *res)
199 {
200 	int ret;
201 
202 	if (addr & 1) {
203 		u16 val;
204 		ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
205 		ret = ret == sizeof(val) ? 0 : -EIO;
206 		*res = val;
207 	} else {
208 		u32 val;
209 		ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
210 		ret = ret == sizeof(val) ? 0 : -EIO;
211 		*res = val;
212 	}
213 	return ret;
214 }
215 
216 /*
217  * Get value of register `rn' (in the instruction)
218  */
219 static unsigned long
220 ptrace_getrn(struct task_struct *child, unsigned long insn)
221 {
222 	unsigned int reg = (insn >> 16) & 15;
223 	unsigned long val;
224 
225 	val = get_user_reg(child, reg);
226 	if (reg == 15)
227 		val += 8;
228 
229 	return val;
230 }
231 
232 /*
233  * Get value of operand 2 (in an ALU instruction)
234  */
235 static unsigned long
236 ptrace_getaluop2(struct task_struct *child, unsigned long insn)
237 {
238 	unsigned long val;
239 	int shift;
240 	int type;
241 
242 	if (insn & 1 << 25) {
243 		val = insn & 255;
244 		shift = (insn >> 8) & 15;
245 		type = 3;
246 	} else {
247 		val = get_user_reg (child, insn & 15);
248 
249 		if (insn & (1 << 4))
250 			shift = (int)get_user_reg (child, (insn >> 8) & 15);
251 		else
252 			shift = (insn >> 7) & 31;
253 
254 		type = (insn >> 5) & 3;
255 	}
256 
257 	switch (type) {
258 	case 0:	val <<= shift;	break;
259 	case 1:	val >>= shift;	break;
260 	case 2:
261 		val = (((signed long)val) >> shift);
262 		break;
263 	case 3:
264  		val = (val >> shift) | (val << (32 - shift));
265 		break;
266 	}
267 	return val;
268 }
269 
270 /*
271  * Get value of operand 2 (in a LDR instruction)
272  */
273 static unsigned long
274 ptrace_getldrop2(struct task_struct *child, unsigned long insn)
275 {
276 	unsigned long val;
277 	int shift;
278 	int type;
279 
280 	val = get_user_reg(child, insn & 15);
281 	shift = (insn >> 7) & 31;
282 	type = (insn >> 5) & 3;
283 
284 	switch (type) {
285 	case 0:	val <<= shift;	break;
286 	case 1:	val >>= shift;	break;
287 	case 2:
288 		val = (((signed long)val) >> shift);
289 		break;
290 	case 3:
291  		val = (val >> shift) | (val << (32 - shift));
292 		break;
293 	}
294 	return val;
295 }
296 
297 #define OP_MASK	0x01e00000
298 #define OP_AND	0x00000000
299 #define OP_EOR	0x00200000
300 #define OP_SUB	0x00400000
301 #define OP_RSB	0x00600000
302 #define OP_ADD	0x00800000
303 #define OP_ADC	0x00a00000
304 #define OP_SBC	0x00c00000
305 #define OP_RSC	0x00e00000
306 #define OP_ORR	0x01800000
307 #define OP_MOV	0x01a00000
308 #define OP_BIC	0x01c00000
309 #define OP_MVN	0x01e00000
310 
311 static unsigned long
312 get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
313 {
314 	u32 alt = 0;
315 
316 	switch (insn & 0x0e000000) {
317 	case 0x00000000:
318 	case 0x02000000: {
319 		/*
320 		 * data processing
321 		 */
322 		long aluop1, aluop2, ccbit;
323 
324 	        if ((insn & 0x0fffffd0) == 0x012fff10) {
325 		        /*
326 			 * bx or blx
327 			 */
328 			alt = get_user_reg(child, insn & 15);
329 			break;
330 		}
331 
332 
333 		if ((insn & 0xf000) != 0xf000)
334 			break;
335 
336 		aluop1 = ptrace_getrn(child, insn);
337 		aluop2 = ptrace_getaluop2(child, insn);
338 		ccbit  = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
339 
340 		switch (insn & OP_MASK) {
341 		case OP_AND: alt = aluop1 & aluop2;		break;
342 		case OP_EOR: alt = aluop1 ^ aluop2;		break;
343 		case OP_SUB: alt = aluop1 - aluop2;		break;
344 		case OP_RSB: alt = aluop2 - aluop1;		break;
345 		case OP_ADD: alt = aluop1 + aluop2;		break;
346 		case OP_ADC: alt = aluop1 + aluop2 + ccbit;	break;
347 		case OP_SBC: alt = aluop1 - aluop2 + ccbit;	break;
348 		case OP_RSC: alt = aluop2 - aluop1 + ccbit;	break;
349 		case OP_ORR: alt = aluop1 | aluop2;		break;
350 		case OP_MOV: alt = aluop2;			break;
351 		case OP_BIC: alt = aluop1 & ~aluop2;		break;
352 		case OP_MVN: alt = ~aluop2;			break;
353 		}
354 		break;
355 	}
356 
357 	case 0x04000000:
358 	case 0x06000000:
359 		/*
360 		 * ldr
361 		 */
362 		if ((insn & 0x0010f000) == 0x0010f000) {
363 			unsigned long base;
364 
365 			base = ptrace_getrn(child, insn);
366 			if (insn & 1 << 24) {
367 				long aluop2;
368 
369 				if (insn & 0x02000000)
370 					aluop2 = ptrace_getldrop2(child, insn);
371 				else
372 					aluop2 = insn & 0xfff;
373 
374 				if (insn & 1 << 23)
375 					base += aluop2;
376 				else
377 					base -= aluop2;
378 			}
379 			read_u32(child, base, &alt);
380 		}
381 		break;
382 
383 	case 0x08000000:
384 		/*
385 		 * ldm
386 		 */
387 		if ((insn & 0x00108000) == 0x00108000) {
388 			unsigned long base;
389 			unsigned int nr_regs;
390 
391 			if (insn & (1 << 23)) {
392 				nr_regs = hweight16(insn & 65535) << 2;
393 
394 				if (!(insn & (1 << 24)))
395 					nr_regs -= 4;
396 			} else {
397 				if (insn & (1 << 24))
398 					nr_regs = -4;
399 				else
400 					nr_regs = 0;
401 			}
402 
403 			base = ptrace_getrn(child, insn);
404 
405 			read_u32(child, base + nr_regs, &alt);
406 			break;
407 		}
408 		break;
409 
410 	case 0x0a000000: {
411 		/*
412 		 * bl or b
413 		 */
414 		signed long displ;
415 		/* It's a branch/branch link: instead of trying to
416 		 * figure out whether the branch will be taken or not,
417 		 * we'll put a breakpoint at both locations.  This is
418 		 * simpler, more reliable, and probably not a whole lot
419 		 * slower than the alternative approach of emulating the
420 		 * branch.
421 		 */
422 		displ = (insn & 0x00ffffff) << 8;
423 		displ = (displ >> 6) + 8;
424 		if (displ != 0 && displ != 4)
425 			alt = pc + displ;
426 	    }
427 	    break;
428 	}
429 
430 	return alt;
431 }
432 
433 static int
434 swap_insn(struct task_struct *task, unsigned long addr,
435 	  void *old_insn, void *new_insn, int size)
436 {
437 	int ret;
438 
439 	ret = access_process_vm(task, addr, old_insn, size, 0);
440 	if (ret == size)
441 		ret = access_process_vm(task, addr, new_insn, size, 1);
442 	return ret;
443 }
444 
445 static void
446 add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
447 {
448 	int nr = dbg->nsaved;
449 
450 	if (nr < 2) {
451 		u32 new_insn = BREAKINST_ARM;
452 		int res;
453 
454 		res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
455 
456 		if (res == 4) {
457 			dbg->bp[nr].address = addr;
458 			dbg->nsaved += 1;
459 		}
460 	} else
461 		printk(KERN_ERR "ptrace: too many breakpoints\n");
462 }
463 
464 /*
465  * Clear one breakpoint in the user program.  We copy what the hardware
466  * does and use bit 0 of the address to indicate whether this is a Thumb
467  * breakpoint or an ARM breakpoint.
468  */
469 static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
470 {
471 	unsigned long addr = bp->address;
472 	union debug_insn old_insn;
473 	int ret;
474 
475 	if (addr & 1) {
476 		ret = swap_insn(task, addr & ~1, &old_insn.thumb,
477 				&bp->insn.thumb, 2);
478 
479 		if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
480 			printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
481 				"0x%08lx (0x%04x)\n", task->comm,
482 				task_pid_nr(task), addr, old_insn.thumb);
483 	} else {
484 		ret = swap_insn(task, addr & ~3, &old_insn.arm,
485 				&bp->insn.arm, 4);
486 
487 		if (ret != 4 || old_insn.arm != BREAKINST_ARM)
488 			printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
489 				"0x%08lx (0x%08x)\n", task->comm,
490 				task_pid_nr(task), addr, old_insn.arm);
491 	}
492 }
493 
494 void ptrace_set_bpt(struct task_struct *child)
495 {
496 	struct pt_regs *regs;
497 	unsigned long pc;
498 	u32 insn;
499 	int res;
500 
501 	regs = task_pt_regs(child);
502 	pc = instruction_pointer(regs);
503 
504 	if (thumb_mode(regs)) {
505 		printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
506 		return;
507 	}
508 
509 	res = read_instr(child, pc, &insn);
510 	if (!res) {
511 		struct debug_info *dbg = &child->thread.debug;
512 		unsigned long alt;
513 
514 		dbg->nsaved = 0;
515 
516 		alt = get_branch_address(child, pc, insn);
517 		if (alt)
518 			add_breakpoint(child, dbg, alt);
519 
520 		/*
521 		 * Note that we ignore the result of setting the above
522 		 * breakpoint since it may fail.  When it does, this is
523 		 * not so much an error, but a forewarning that we may
524 		 * be receiving a prefetch abort shortly.
525 		 *
526 		 * If we don't set this breakpoint here, then we can
527 		 * lose control of the thread during single stepping.
528 		 */
529 		if (!alt || predicate(insn) != PREDICATE_ALWAYS)
530 			add_breakpoint(child, dbg, pc + 4);
531 	}
532 }
533 
534 /*
535  * Ensure no single-step breakpoint is pending.  Returns non-zero
536  * value if child was being single-stepped.
537  */
538 void ptrace_cancel_bpt(struct task_struct *child)
539 {
540 	int i, nsaved = child->thread.debug.nsaved;
541 
542 	child->thread.debug.nsaved = 0;
543 
544 	if (nsaved > 2) {
545 		printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
546 		nsaved = 2;
547 	}
548 
549 	for (i = 0; i < nsaved; i++)
550 		clear_breakpoint(child, &child->thread.debug.bp[i]);
551 }
552 
553 void user_disable_single_step(struct task_struct *task)
554 {
555 	task->ptrace &= ~PT_SINGLESTEP;
556 	ptrace_cancel_bpt(task);
557 }
558 
559 void user_enable_single_step(struct task_struct *task)
560 {
561 	task->ptrace |= PT_SINGLESTEP;
562 }
563 
564 /*
565  * Called by kernel/ptrace.c when detaching..
566  */
567 void ptrace_disable(struct task_struct *child)
568 {
569 	user_disable_single_step(child);
570 }
571 
572 /*
573  * Handle hitting a breakpoint.
574  */
575 void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
576 {
577 	siginfo_t info;
578 
579 	ptrace_cancel_bpt(tsk);
580 
581 	info.si_signo = SIGTRAP;
582 	info.si_errno = 0;
583 	info.si_code  = TRAP_BRKPT;
584 	info.si_addr  = (void __user *)instruction_pointer(regs);
585 
586 	force_sig_info(SIGTRAP, &info, tsk);
587 }
588 
589 static int break_trap(struct pt_regs *regs, unsigned int instr)
590 {
591 	ptrace_break(current, regs);
592 	return 0;
593 }
594 
595 static struct undef_hook arm_break_hook = {
596 	.instr_mask	= 0x0fffffff,
597 	.instr_val	= 0x07f001f0,
598 	.cpsr_mask	= PSR_T_BIT,
599 	.cpsr_val	= 0,
600 	.fn		= break_trap,
601 };
602 
603 static struct undef_hook thumb_break_hook = {
604 	.instr_mask	= 0xffff,
605 	.instr_val	= 0xde01,
606 	.cpsr_mask	= PSR_T_BIT,
607 	.cpsr_val	= PSR_T_BIT,
608 	.fn		= break_trap,
609 };
610 
611 static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
612 {
613 	unsigned int instr2;
614 	void __user *pc;
615 
616 	/* Check the second half of the instruction.  */
617 	pc = (void __user *)(instruction_pointer(regs) + 2);
618 
619 	if (processor_mode(regs) == SVC_MODE) {
620 		instr2 = *(u16 *) pc;
621 	} else {
622 		get_user(instr2, (u16 __user *)pc);
623 	}
624 
625 	if (instr2 == 0xa000) {
626 		ptrace_break(current, regs);
627 		return 0;
628 	} else {
629 		return 1;
630 	}
631 }
632 
633 static struct undef_hook thumb2_break_hook = {
634 	.instr_mask	= 0xffff,
635 	.instr_val	= 0xf7f0,
636 	.cpsr_mask	= PSR_T_BIT,
637 	.cpsr_val	= PSR_T_BIT,
638 	.fn		= thumb2_break_trap,
639 };
640 
641 static int __init ptrace_break_init(void)
642 {
643 	register_undef_hook(&arm_break_hook);
644 	register_undef_hook(&thumb_break_hook);
645 	register_undef_hook(&thumb2_break_hook);
646 	return 0;
647 }
648 
649 core_initcall(ptrace_break_init);
650 
651 /*
652  * Read the word at offset "off" into the "struct user".  We
653  * actually access the pt_regs stored on the kernel stack.
654  */
655 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
656 			    unsigned long __user *ret)
657 {
658 	unsigned long tmp;
659 
660 	if (off & 3 || off >= sizeof(struct user))
661 		return -EIO;
662 
663 	tmp = 0;
664 	if (off == PT_TEXT_ADDR)
665 		tmp = tsk->mm->start_code;
666 	else if (off == PT_DATA_ADDR)
667 		tmp = tsk->mm->start_data;
668 	else if (off == PT_TEXT_END_ADDR)
669 		tmp = tsk->mm->end_code;
670 	else if (off < sizeof(struct pt_regs))
671 		tmp = get_user_reg(tsk, off >> 2);
672 
673 	return put_user(tmp, ret);
674 }
675 
676 /*
677  * Write the word at offset "off" into "struct user".  We
678  * actually access the pt_regs stored on the kernel stack.
679  */
680 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
681 			     unsigned long val)
682 {
683 	if (off & 3 || off >= sizeof(struct user))
684 		return -EIO;
685 
686 	if (off >= sizeof(struct pt_regs))
687 		return 0;
688 
689 	return put_user_reg(tsk, off >> 2, val);
690 }
691 
692 /*
693  * Get all user integer registers.
694  */
695 static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
696 {
697 	struct pt_regs *regs = task_pt_regs(tsk);
698 
699 	return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
700 }
701 
702 /*
703  * Set all user integer registers.
704  */
705 static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
706 {
707 	struct pt_regs newregs;
708 	int ret;
709 
710 	ret = -EFAULT;
711 	if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
712 		struct pt_regs *regs = task_pt_regs(tsk);
713 
714 		ret = -EINVAL;
715 		if (valid_user_regs(&newregs)) {
716 			*regs = newregs;
717 			ret = 0;
718 		}
719 	}
720 
721 	return ret;
722 }
723 
724 /*
725  * Get the child FPU state.
726  */
727 static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
728 {
729 	return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
730 			    sizeof(struct user_fp)) ? -EFAULT : 0;
731 }
732 
733 /*
734  * Set the child FPU state.
735  */
736 static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
737 {
738 	struct thread_info *thread = task_thread_info(tsk);
739 	thread->used_cp[1] = thread->used_cp[2] = 1;
740 	return copy_from_user(&thread->fpstate, ufp,
741 			      sizeof(struct user_fp)) ? -EFAULT : 0;
742 }
743 
744 #ifdef CONFIG_IWMMXT
745 
746 /*
747  * Get the child iWMMXt state.
748  */
749 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
750 {
751 	struct thread_info *thread = task_thread_info(tsk);
752 
753 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
754 		return -ENODATA;
755 	iwmmxt_task_disable(thread);  /* force it to ram */
756 	return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
757 		? -EFAULT : 0;
758 }
759 
760 /*
761  * Set the child iWMMXt state.
762  */
763 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
764 {
765 	struct thread_info *thread = task_thread_info(tsk);
766 
767 	if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
768 		return -EACCES;
769 	iwmmxt_task_release(thread);  /* force a reload */
770 	return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
771 		? -EFAULT : 0;
772 }
773 
774 #endif
775 
776 #ifdef CONFIG_CRUNCH
777 /*
778  * Get the child Crunch state.
779  */
780 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
781 {
782 	struct thread_info *thread = task_thread_info(tsk);
783 
784 	crunch_task_disable(thread);  /* force it to ram */
785 	return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
786 		? -EFAULT : 0;
787 }
788 
789 /*
790  * Set the child Crunch state.
791  */
792 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
793 {
794 	struct thread_info *thread = task_thread_info(tsk);
795 
796 	crunch_task_release(thread);  /* force a reload */
797 	return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
798 		? -EFAULT : 0;
799 }
800 #endif
801 
802 #ifdef CONFIG_VFP
803 /*
804  * Get the child VFP state.
805  */
806 static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
807 {
808 	struct thread_info *thread = task_thread_info(tsk);
809 	union vfp_state *vfp = &thread->vfpstate;
810 	struct user_vfp __user *ufp = data;
811 
812 	vfp_sync_hwstate(thread);
813 
814 	/* copy the floating point registers */
815 	if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
816 			 sizeof(vfp->hard.fpregs)))
817 		return -EFAULT;
818 
819 	/* copy the status and control register */
820 	if (put_user(vfp->hard.fpscr, &ufp->fpscr))
821 		return -EFAULT;
822 
823 	return 0;
824 }
825 
826 /*
827  * Set the child VFP state.
828  */
829 static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
830 {
831 	struct thread_info *thread = task_thread_info(tsk);
832 	union vfp_state *vfp = &thread->vfpstate;
833 	struct user_vfp __user *ufp = data;
834 
835 	vfp_sync_hwstate(thread);
836 
837 	/* copy the floating point registers */
838 	if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
839 			   sizeof(vfp->hard.fpregs)))
840 		return -EFAULT;
841 
842 	/* copy the status and control register */
843 	if (get_user(vfp->hard.fpscr, &ufp->fpscr))
844 		return -EFAULT;
845 
846 	vfp_flush_hwstate(thread);
847 
848 	return 0;
849 }
850 #endif
851 
852 #ifdef CONFIG_HAVE_HW_BREAKPOINT
853 /*
854  * Convert a virtual register number into an index for a thread_info
855  * breakpoint array. Breakpoints are identified using positive numbers
856  * whilst watchpoints are negative. The registers are laid out as pairs
857  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
858  * Register 0 is reserved for describing resource information.
859  */
860 static int ptrace_hbp_num_to_idx(long num)
861 {
862 	if (num < 0)
863 		num = (ARM_MAX_BRP << 1) - num;
864 	return (num - 1) >> 1;
865 }
866 
867 /*
868  * Returns the virtual register number for the address of the
869  * breakpoint at index idx.
870  */
871 static long ptrace_hbp_idx_to_num(int idx)
872 {
873 	long mid = ARM_MAX_BRP << 1;
874 	long num = (idx << 1) + 1;
875 	return num > mid ? mid - num : num;
876 }
877 
878 /*
879  * Handle hitting a HW-breakpoint.
880  */
881 static void ptrace_hbptriggered(struct perf_event *bp, int unused,
882 				     struct perf_sample_data *data,
883 				     struct pt_regs *regs)
884 {
885 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
886 	long num;
887 	int i;
888 	siginfo_t info;
889 
890 	for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
891 		if (current->thread.debug.hbp[i] == bp)
892 			break;
893 
894 	num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
895 
896 	info.si_signo	= SIGTRAP;
897 	info.si_errno	= (int)num;
898 	info.si_code	= TRAP_HWBKPT;
899 	info.si_addr	= (void __user *)(bkpt->trigger);
900 
901 	force_sig_info(SIGTRAP, &info, current);
902 }
903 
904 /*
905  * Set ptrace breakpoint pointers to zero for this task.
906  * This is required in order to prevent child processes from unregistering
907  * breakpoints held by their parent.
908  */
909 void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
910 {
911 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
912 }
913 
914 /*
915  * Unregister breakpoints from this task and reset the pointers in
916  * the thread_struct.
917  */
918 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
919 {
920 	int i;
921 	struct thread_struct *t = &tsk->thread;
922 
923 	for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
924 		if (t->debug.hbp[i]) {
925 			unregister_hw_breakpoint(t->debug.hbp[i]);
926 			t->debug.hbp[i] = NULL;
927 		}
928 	}
929 }
930 
931 static u32 ptrace_get_hbp_resource_info(void)
932 {
933 	u8 num_brps, num_wrps, debug_arch, wp_len;
934 	u32 reg = 0;
935 
936 	num_brps	= hw_breakpoint_slots(TYPE_INST);
937 	num_wrps	= hw_breakpoint_slots(TYPE_DATA);
938 	debug_arch	= arch_get_debug_arch();
939 	wp_len		= arch_get_max_wp_len();
940 
941 	reg		|= debug_arch;
942 	reg		<<= 8;
943 	reg		|= wp_len;
944 	reg		<<= 8;
945 	reg		|= num_wrps;
946 	reg		<<= 8;
947 	reg		|= num_brps;
948 
949 	return reg;
950 }
951 
952 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
953 {
954 	struct perf_event_attr attr;
955 
956 	ptrace_breakpoint_init(&attr);
957 
958 	/* Initialise fields to sane defaults. */
959 	attr.bp_addr	= 0;
960 	attr.bp_len	= HW_BREAKPOINT_LEN_4;
961 	attr.bp_type	= type;
962 	attr.disabled	= 1;
963 
964 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
965 }
966 
967 static int ptrace_gethbpregs(struct task_struct *tsk, long num,
968 			     unsigned long  __user *data)
969 {
970 	u32 reg;
971 	int idx, ret = 0;
972 	struct perf_event *bp;
973 	struct arch_hw_breakpoint_ctrl arch_ctrl;
974 
975 	if (num == 0) {
976 		reg = ptrace_get_hbp_resource_info();
977 	} else {
978 		idx = ptrace_hbp_num_to_idx(num);
979 		if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
980 			ret = -EINVAL;
981 			goto out;
982 		}
983 
984 		bp = tsk->thread.debug.hbp[idx];
985 		if (!bp) {
986 			reg = 0;
987 			goto put;
988 		}
989 
990 		arch_ctrl = counter_arch_bp(bp)->ctrl;
991 
992 		/*
993 		 * Fix up the len because we may have adjusted it
994 		 * to compensate for an unaligned address.
995 		 */
996 		while (!(arch_ctrl.len & 0x1))
997 			arch_ctrl.len >>= 1;
998 
999 		if (idx & 0x1)
1000 			reg = encode_ctrl_reg(arch_ctrl);
1001 		else
1002 			reg = bp->attr.bp_addr;
1003 	}
1004 
1005 put:
1006 	if (put_user(reg, data))
1007 		ret = -EFAULT;
1008 
1009 out:
1010 	return ret;
1011 }
1012 
1013 static int ptrace_sethbpregs(struct task_struct *tsk, long num,
1014 			     unsigned long __user *data)
1015 {
1016 	int idx, gen_len, gen_type, implied_type, ret = 0;
1017 	u32 user_val;
1018 	struct perf_event *bp;
1019 	struct arch_hw_breakpoint_ctrl ctrl;
1020 	struct perf_event_attr attr;
1021 
1022 	if (num == 0)
1023 		goto out;
1024 	else if (num < 0)
1025 		implied_type = HW_BREAKPOINT_RW;
1026 	else
1027 		implied_type = HW_BREAKPOINT_X;
1028 
1029 	idx = ptrace_hbp_num_to_idx(num);
1030 	if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
1031 		ret = -EINVAL;
1032 		goto out;
1033 	}
1034 
1035 	if (get_user(user_val, data)) {
1036 		ret = -EFAULT;
1037 		goto out;
1038 	}
1039 
1040 	bp = tsk->thread.debug.hbp[idx];
1041 	if (!bp) {
1042 		bp = ptrace_hbp_create(tsk, implied_type);
1043 		if (IS_ERR(bp)) {
1044 			ret = PTR_ERR(bp);
1045 			goto out;
1046 		}
1047 		tsk->thread.debug.hbp[idx] = bp;
1048 	}
1049 
1050 	attr = bp->attr;
1051 
1052 	if (num & 0x1) {
1053 		/* Address */
1054 		attr.bp_addr	= user_val;
1055 	} else {
1056 		/* Control */
1057 		decode_ctrl_reg(user_val, &ctrl);
1058 		ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
1059 		if (ret)
1060 			goto out;
1061 
1062 		if ((gen_type & implied_type) != gen_type) {
1063 			ret = -EINVAL;
1064 			goto out;
1065 		}
1066 
1067 		attr.bp_len	= gen_len;
1068 		attr.bp_type	= gen_type;
1069 		attr.disabled	= !ctrl.enabled;
1070 	}
1071 
1072 	ret = modify_user_hw_breakpoint(bp, &attr);
1073 out:
1074 	return ret;
1075 }
1076 #endif
1077 
1078 long arch_ptrace(struct task_struct *child, long request,
1079 		 unsigned long addr, unsigned long data)
1080 {
1081 	int ret;
1082 	unsigned long __user *datap = (unsigned long __user *) data;
1083 
1084 	switch (request) {
1085 		case PTRACE_PEEKUSR:
1086 			ret = ptrace_read_user(child, addr, datap);
1087 			break;
1088 
1089 		case PTRACE_POKEUSR:
1090 			ret = ptrace_write_user(child, addr, data);
1091 			break;
1092 
1093 		case PTRACE_GETREGS:
1094 			ret = ptrace_getregs(child, datap);
1095 			break;
1096 
1097 		case PTRACE_SETREGS:
1098 			ret = ptrace_setregs(child, datap);
1099 			break;
1100 
1101 		case PTRACE_GETFPREGS:
1102 			ret = ptrace_getfpregs(child, datap);
1103 			break;
1104 
1105 		case PTRACE_SETFPREGS:
1106 			ret = ptrace_setfpregs(child, datap);
1107 			break;
1108 
1109 #ifdef CONFIG_IWMMXT
1110 		case PTRACE_GETWMMXREGS:
1111 			ret = ptrace_getwmmxregs(child, datap);
1112 			break;
1113 
1114 		case PTRACE_SETWMMXREGS:
1115 			ret = ptrace_setwmmxregs(child, datap);
1116 			break;
1117 #endif
1118 
1119 		case PTRACE_GET_THREAD_AREA:
1120 			ret = put_user(task_thread_info(child)->tp_value,
1121 				       datap);
1122 			break;
1123 
1124 		case PTRACE_SET_SYSCALL:
1125 			task_thread_info(child)->syscall = data;
1126 			ret = 0;
1127 			break;
1128 
1129 #ifdef CONFIG_CRUNCH
1130 		case PTRACE_GETCRUNCHREGS:
1131 			ret = ptrace_getcrunchregs(child, datap);
1132 			break;
1133 
1134 		case PTRACE_SETCRUNCHREGS:
1135 			ret = ptrace_setcrunchregs(child, datap);
1136 			break;
1137 #endif
1138 
1139 #ifdef CONFIG_VFP
1140 		case PTRACE_GETVFPREGS:
1141 			ret = ptrace_getvfpregs(child, datap);
1142 			break;
1143 
1144 		case PTRACE_SETVFPREGS:
1145 			ret = ptrace_setvfpregs(child, datap);
1146 			break;
1147 #endif
1148 
1149 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1150 		case PTRACE_GETHBPREGS:
1151 			ret = ptrace_gethbpregs(child, addr,
1152 						(unsigned long __user *)data);
1153 			break;
1154 		case PTRACE_SETHBPREGS:
1155 			ret = ptrace_sethbpregs(child, addr,
1156 						(unsigned long __user *)data);
1157 			break;
1158 #endif
1159 
1160 		default:
1161 			ret = ptrace_request(child, request, addr, data);
1162 			break;
1163 	}
1164 
1165 	return ret;
1166 }
1167 
1168 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1169 {
1170 	unsigned long ip;
1171 
1172 	if (!test_thread_flag(TIF_SYSCALL_TRACE))
1173 		return scno;
1174 	if (!(current->ptrace & PT_PTRACED))
1175 		return scno;
1176 
1177 	/*
1178 	 * Save IP.  IP is used to denote syscall entry/exit:
1179 	 *  IP = 0 -> entry, = 1 -> exit
1180 	 */
1181 	ip = regs->ARM_ip;
1182 	regs->ARM_ip = why;
1183 
1184 	current_thread_info()->syscall = scno;
1185 
1186 	/* the 0x80 provides a way for the tracing parent to distinguish
1187 	   between a syscall stop and SIGTRAP delivery */
1188 	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1189 				 ? 0x80 : 0));
1190 	/*
1191 	 * this isn't the same as continuing with a signal, but it will do
1192 	 * for normal use.  strace only continues with a signal if the
1193 	 * stopping signal is not SIGTRAP.  -brl
1194 	 */
1195 	if (current->exit_code) {
1196 		send_sig(current->exit_code, current, 1);
1197 		current->exit_code = 0;
1198 	}
1199 	regs->ARM_ip = ip;
1200 
1201 	return current_thread_info()->syscall;
1202 }
1203