xref: /openbmc/linux/arch/xtensa/kernel/ptrace.c (revision 7ae5c03a)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2001 - 2007  Tensilica Inc.
7  *
8  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
9  * Chris Zankel <chris@zankel.net>
10  * Scott Foehner<sfoehner@yahoo.com>,
11  * Kevin Chea
12  * Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
13  */
14 
15 #include <linux/audit.h>
16 #include <linux/errno.h>
17 #include <linux/hw_breakpoint.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/perf_event.h>
21 #include <linux/ptrace.h>
22 #include <linux/regset.h>
23 #include <linux/sched.h>
24 #include <linux/sched/task_stack.h>
25 #include <linux/seccomp.h>
26 #include <linux/security.h>
27 #include <linux/signal.h>
28 #include <linux/smp.h>
29 #include <linux/uaccess.h>
30 
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/syscalls.h>
33 
34 #include <asm/coprocessor.h>
35 #include <asm/elf.h>
36 #include <asm/page.h>
37 #include <asm/ptrace.h>
38 
39 static int gpr_get(struct task_struct *target,
40 		   const struct user_regset *regset,
41 		   struct membuf to)
42 {
43 	struct pt_regs *regs = task_pt_regs(target);
44 	struct user_pt_regs newregs = {
45 		.pc = regs->pc,
46 		.ps = regs->ps & ~(1 << PS_EXCM_BIT),
47 		.lbeg = regs->lbeg,
48 		.lend = regs->lend,
49 		.lcount = regs->lcount,
50 		.sar = regs->sar,
51 		.threadptr = regs->threadptr,
52 		.windowbase = regs->windowbase,
53 		.windowstart = regs->windowstart,
54 		.syscall = regs->syscall,
55 	};
56 
57 	memcpy(newregs.a,
58 	       regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4,
59 	       regs->windowbase * 16);
60 	memcpy(newregs.a + regs->windowbase * 4,
61 	       regs->areg,
62 	       (WSBITS - regs->windowbase) * 16);
63 
64 	return membuf_write(&to, &newregs, sizeof(newregs));
65 }
66 
67 static int gpr_set(struct task_struct *target,
68 		   const struct user_regset *regset,
69 		   unsigned int pos, unsigned int count,
70 		   const void *kbuf, const void __user *ubuf)
71 {
72 	int ret;
73 	struct user_pt_regs newregs = {0};
74 	struct pt_regs *regs;
75 	const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
76 
77 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
78 	if (ret)
79 		return ret;
80 
81 	if (newregs.windowbase >= XCHAL_NUM_AREGS / 4)
82 		return -EINVAL;
83 
84 	regs = task_pt_regs(target);
85 	regs->pc = newregs.pc;
86 	regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask);
87 	regs->lbeg = newregs.lbeg;
88 	regs->lend = newregs.lend;
89 	regs->lcount = newregs.lcount;
90 	regs->sar = newregs.sar;
91 	regs->threadptr = newregs.threadptr;
92 
93 	if (newregs.syscall)
94 		regs->syscall = newregs.syscall;
95 
96 	if (newregs.windowbase != regs->windowbase ||
97 	    newregs.windowstart != regs->windowstart) {
98 		u32 rotws, wmask;
99 
100 		rotws = (((newregs.windowstart |
101 			   (newregs.windowstart << WSBITS)) >>
102 			  newregs.windowbase) &
103 			 ((1 << WSBITS) - 1)) & ~1;
104 		wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
105 			(rotws & 0xF) | 1;
106 		regs->windowbase = newregs.windowbase;
107 		regs->windowstart = newregs.windowstart;
108 		regs->wmask = wmask;
109 	}
110 
111 	memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4,
112 	       newregs.a, newregs.windowbase * 16);
113 	memcpy(regs->areg, newregs.a + newregs.windowbase * 4,
114 	       (WSBITS - newregs.windowbase) * 16);
115 
116 	return 0;
117 }
118 
119 static int tie_get(struct task_struct *target,
120 		   const struct user_regset *regset,
121 		   struct membuf to)
122 {
123 	int ret;
124 	struct pt_regs *regs = task_pt_regs(target);
125 	struct thread_info *ti = task_thread_info(target);
126 	elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
127 
128 	if (!newregs)
129 		return -ENOMEM;
130 
131 	newregs->opt = regs->xtregs_opt;
132 	newregs->user = ti->xtregs_user;
133 
134 #if XTENSA_HAVE_COPROCESSORS
135 	/* Flush all coprocessor registers to memory. */
136 	coprocessor_flush_all(ti);
137 	newregs->cp0 = ti->xtregs_cp.cp0;
138 	newregs->cp1 = ti->xtregs_cp.cp1;
139 	newregs->cp2 = ti->xtregs_cp.cp2;
140 	newregs->cp3 = ti->xtregs_cp.cp3;
141 	newregs->cp4 = ti->xtregs_cp.cp4;
142 	newregs->cp5 = ti->xtregs_cp.cp5;
143 	newregs->cp6 = ti->xtregs_cp.cp6;
144 	newregs->cp7 = ti->xtregs_cp.cp7;
145 #endif
146 	ret = membuf_write(&to, newregs, sizeof(*newregs));
147 	kfree(newregs);
148 	return ret;
149 }
150 
151 static int tie_set(struct task_struct *target,
152 		   const struct user_regset *regset,
153 		   unsigned int pos, unsigned int count,
154 		   const void *kbuf, const void __user *ubuf)
155 {
156 	int ret;
157 	struct pt_regs *regs = task_pt_regs(target);
158 	struct thread_info *ti = task_thread_info(target);
159 	elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
160 
161 	if (!newregs)
162 		return -ENOMEM;
163 
164 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
165 				 newregs, 0, -1);
166 
167 	if (ret)
168 		goto exit;
169 	regs->xtregs_opt = newregs->opt;
170 	ti->xtregs_user = newregs->user;
171 
172 #if XTENSA_HAVE_COPROCESSORS
173 	/* Flush all coprocessors before we overwrite them. */
174 	coprocessor_flush_release_all(ti);
175 	ti->xtregs_cp.cp0 = newregs->cp0;
176 	ti->xtregs_cp.cp1 = newregs->cp1;
177 	ti->xtregs_cp.cp2 = newregs->cp2;
178 	ti->xtregs_cp.cp3 = newregs->cp3;
179 	ti->xtregs_cp.cp4 = newregs->cp4;
180 	ti->xtregs_cp.cp5 = newregs->cp5;
181 	ti->xtregs_cp.cp6 = newregs->cp6;
182 	ti->xtregs_cp.cp7 = newregs->cp7;
183 #endif
184 exit:
185 	kfree(newregs);
186 	return ret;
187 }
188 
189 enum xtensa_regset {
190 	REGSET_GPR,
191 	REGSET_TIE,
192 };
193 
194 static const struct user_regset xtensa_regsets[] = {
195 	[REGSET_GPR] = {
196 		.core_note_type = NT_PRSTATUS,
197 		.n = sizeof(struct user_pt_regs) / sizeof(u32),
198 		.size = sizeof(u32),
199 		.align = sizeof(u32),
200 		.regset_get = gpr_get,
201 		.set = gpr_set,
202 	},
203 	[REGSET_TIE] = {
204 		.core_note_type = NT_PRFPREG,
205 		.n = sizeof(elf_xtregs_t) / sizeof(u32),
206 		.size = sizeof(u32),
207 		.align = sizeof(u32),
208 		.regset_get = tie_get,
209 		.set = tie_set,
210 	},
211 };
212 
213 static const struct user_regset_view user_xtensa_view = {
214 	.name = "xtensa",
215 	.e_machine = EM_XTENSA,
216 	.regsets = xtensa_regsets,
217 	.n = ARRAY_SIZE(xtensa_regsets)
218 };
219 
220 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
221 {
222 	return &user_xtensa_view;
223 }
224 
225 void user_enable_single_step(struct task_struct *child)
226 {
227 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
228 }
229 
230 void user_disable_single_step(struct task_struct *child)
231 {
232 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
233 }
234 
235 /*
236  * Called by kernel/ptrace.c when detaching to disable single stepping.
237  */
238 
239 void ptrace_disable(struct task_struct *child)
240 {
241 	/* Nothing to do.. */
242 }
243 
244 static int ptrace_getregs(struct task_struct *child, void __user *uregs)
245 {
246 	return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR,
247 				   0, sizeof(xtensa_gregset_t), uregs);
248 }
249 
250 static int ptrace_setregs(struct task_struct *child, void __user *uregs)
251 {
252 	return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR,
253 				     0, sizeof(xtensa_gregset_t), uregs);
254 }
255 
256 static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
257 {
258 	return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE,
259 				   0, sizeof(elf_xtregs_t), uregs);
260 }
261 
262 static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
263 {
264 	return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE,
265 				     0, sizeof(elf_xtregs_t), uregs);
266 }
267 
268 static int ptrace_peekusr(struct task_struct *child, long regno,
269 			  long __user *ret)
270 {
271 	struct pt_regs *regs;
272 	unsigned long tmp;
273 
274 	regs = task_pt_regs(child);
275 	tmp = 0;  /* Default return value. */
276 
277 	switch(regno) {
278 	case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
279 		tmp = regs->areg[regno - REG_AR_BASE];
280 		break;
281 
282 	case REG_A_BASE ... REG_A_BASE + 15:
283 		tmp = regs->areg[regno - REG_A_BASE];
284 		break;
285 
286 	case REG_PC:
287 		tmp = regs->pc;
288 		break;
289 
290 	case REG_PS:
291 		/* Note: PS.EXCM is not set while user task is running;
292 		 * its being set in regs is for exception handling
293 		 * convenience.
294 		 */
295 		tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
296 		break;
297 
298 	case REG_WB:
299 		break;		/* tmp = 0 */
300 
301 	case REG_WS:
302 		{
303 			unsigned long wb = regs->windowbase;
304 			unsigned long ws = regs->windowstart;
305 			tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
306 				((1 << WSBITS) - 1);
307 			break;
308 		}
309 	case REG_LBEG:
310 		tmp = regs->lbeg;
311 		break;
312 
313 	case REG_LEND:
314 		tmp = regs->lend;
315 		break;
316 
317 	case REG_LCOUNT:
318 		tmp = regs->lcount;
319 		break;
320 
321 	case REG_SAR:
322 		tmp = regs->sar;
323 		break;
324 
325 	case SYSCALL_NR:
326 		tmp = regs->syscall;
327 		break;
328 
329 	default:
330 		return -EIO;
331 	}
332 	return put_user(tmp, ret);
333 }
334 
335 static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
336 {
337 	struct pt_regs *regs;
338 	regs = task_pt_regs(child);
339 
340 	switch (regno) {
341 	case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
342 		regs->areg[regno - REG_AR_BASE] = val;
343 		break;
344 
345 	case REG_A_BASE ... REG_A_BASE + 15:
346 		regs->areg[regno - REG_A_BASE] = val;
347 		break;
348 
349 	case REG_PC:
350 		regs->pc = val;
351 		break;
352 
353 	case SYSCALL_NR:
354 		regs->syscall = val;
355 		break;
356 
357 	default:
358 		return -EIO;
359 	}
360 	return 0;
361 }
362 
363 #ifdef CONFIG_HAVE_HW_BREAKPOINT
364 static void ptrace_hbptriggered(struct perf_event *bp,
365 				struct perf_sample_data *data,
366 				struct pt_regs *regs)
367 {
368 	int i;
369 	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
370 
371 	if (bp->attr.bp_type & HW_BREAKPOINT_X) {
372 		for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
373 			if (current->thread.ptrace_bp[i] == bp)
374 				break;
375 		i <<= 1;
376 	} else {
377 		for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
378 			if (current->thread.ptrace_wp[i] == bp)
379 				break;
380 		i = (i << 1) | 1;
381 	}
382 
383 	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
384 }
385 
386 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
387 {
388 	struct perf_event_attr attr;
389 
390 	ptrace_breakpoint_init(&attr);
391 
392 	/* Initialise fields to sane defaults. */
393 	attr.bp_addr	= 0;
394 	attr.bp_len	= 1;
395 	attr.bp_type	= type;
396 	attr.disabled	= 1;
397 
398 	return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
399 					   tsk);
400 }
401 
402 /*
403  * Address bit 0 choose instruction (0) or data (1) break register, bits
404  * 31..1 are the register number.
405  * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words:
406  * address (0) and control (1).
407  * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set.
408  * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is
409  * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a
410  * breakpoint. To set a breakpoint length must be a power of 2 in the range
411  * 1..64 and the address must be length-aligned.
412  */
413 
414 static long ptrace_gethbpregs(struct task_struct *child, long addr,
415 			      long __user *datap)
416 {
417 	struct perf_event *bp;
418 	u32 user_data[2] = {0};
419 	bool dbreak = addr & 1;
420 	unsigned idx = addr >> 1;
421 
422 	if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
423 	    (dbreak && idx >= XCHAL_NUM_DBREAK))
424 		return -EINVAL;
425 
426 	if (dbreak)
427 		bp = child->thread.ptrace_wp[idx];
428 	else
429 		bp = child->thread.ptrace_bp[idx];
430 
431 	if (bp) {
432 		user_data[0] = bp->attr.bp_addr;
433 		user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
434 		if (dbreak) {
435 			if (bp->attr.bp_type & HW_BREAKPOINT_R)
436 				user_data[1] |= DBREAKC_LOAD_MASK;
437 			if (bp->attr.bp_type & HW_BREAKPOINT_W)
438 				user_data[1] |= DBREAKC_STOR_MASK;
439 		}
440 	}
441 
442 	if (copy_to_user(datap, user_data, sizeof(user_data)))
443 		return -EFAULT;
444 
445 	return 0;
446 }
447 
448 static long ptrace_sethbpregs(struct task_struct *child, long addr,
449 			      long __user *datap)
450 {
451 	struct perf_event *bp;
452 	struct perf_event_attr attr;
453 	u32 user_data[2];
454 	bool dbreak = addr & 1;
455 	unsigned idx = addr >> 1;
456 	int bp_type = 0;
457 
458 	if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
459 	    (dbreak && idx >= XCHAL_NUM_DBREAK))
460 		return -EINVAL;
461 
462 	if (copy_from_user(user_data, datap, sizeof(user_data)))
463 		return -EFAULT;
464 
465 	if (dbreak) {
466 		bp = child->thread.ptrace_wp[idx];
467 		if (user_data[1] & DBREAKC_LOAD_MASK)
468 			bp_type |= HW_BREAKPOINT_R;
469 		if (user_data[1] & DBREAKC_STOR_MASK)
470 			bp_type |= HW_BREAKPOINT_W;
471 	} else {
472 		bp = child->thread.ptrace_bp[idx];
473 		bp_type = HW_BREAKPOINT_X;
474 	}
475 
476 	if (!bp) {
477 		bp = ptrace_hbp_create(child,
478 				       bp_type ? bp_type : HW_BREAKPOINT_RW);
479 		if (IS_ERR(bp))
480 			return PTR_ERR(bp);
481 		if (dbreak)
482 			child->thread.ptrace_wp[idx] = bp;
483 		else
484 			child->thread.ptrace_bp[idx] = bp;
485 	}
486 
487 	attr = bp->attr;
488 	attr.bp_addr = user_data[0];
489 	attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
490 	attr.bp_type = bp_type;
491 	attr.disabled = !attr.bp_len;
492 
493 	return modify_user_hw_breakpoint(bp, &attr);
494 }
495 #endif
496 
497 long arch_ptrace(struct task_struct *child, long request,
498 		 unsigned long addr, unsigned long data)
499 {
500 	int ret = -EPERM;
501 	void __user *datap = (void __user *) data;
502 
503 	switch (request) {
504 	case PTRACE_PEEKUSR:	/* read register specified by addr. */
505 		ret = ptrace_peekusr(child, addr, datap);
506 		break;
507 
508 	case PTRACE_POKEUSR:	/* write register specified by addr. */
509 		ret = ptrace_pokeusr(child, addr, data);
510 		break;
511 
512 	case PTRACE_GETREGS:
513 		ret = ptrace_getregs(child, datap);
514 		break;
515 
516 	case PTRACE_SETREGS:
517 		ret = ptrace_setregs(child, datap);
518 		break;
519 
520 	case PTRACE_GETXTREGS:
521 		ret = ptrace_getxregs(child, datap);
522 		break;
523 
524 	case PTRACE_SETXTREGS:
525 		ret = ptrace_setxregs(child, datap);
526 		break;
527 #ifdef CONFIG_HAVE_HW_BREAKPOINT
528 	case PTRACE_GETHBPREGS:
529 		ret = ptrace_gethbpregs(child, addr, datap);
530 		break;
531 
532 	case PTRACE_SETHBPREGS:
533 		ret = ptrace_sethbpregs(child, addr, datap);
534 		break;
535 #endif
536 	default:
537 		ret = ptrace_request(child, request, addr, data);
538 		break;
539 	}
540 
541 	return ret;
542 }
543 
544 void do_syscall_trace_leave(struct pt_regs *regs);
545 int do_syscall_trace_enter(struct pt_regs *regs)
546 {
547 	if (regs->syscall == NO_SYSCALL)
548 		regs->areg[2] = -ENOSYS;
549 
550 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
551 	    ptrace_report_syscall_entry(regs)) {
552 		regs->areg[2] = -ENOSYS;
553 		regs->syscall = NO_SYSCALL;
554 		return 0;
555 	}
556 
557 	if (regs->syscall == NO_SYSCALL ||
558 	    secure_computing() == -1) {
559 		do_syscall_trace_leave(regs);
560 		return 0;
561 	}
562 
563 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
564 		trace_sys_enter(regs, syscall_get_nr(current, regs));
565 
566 	audit_syscall_entry(regs->syscall, regs->areg[6],
567 			    regs->areg[3], regs->areg[4],
568 			    regs->areg[5]);
569 	return 1;
570 }
571 
572 void do_syscall_trace_leave(struct pt_regs *regs)
573 {
574 	int step;
575 
576 	audit_syscall_exit(regs);
577 
578 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
579 		trace_sys_exit(regs, regs_return_value(regs));
580 
581 	step = test_thread_flag(TIF_SINGLESTEP);
582 
583 	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
584 		ptrace_report_syscall_exit(regs, step);
585 }
586