xref: /openbmc/linux/arch/x86/kernel/ptrace.c (revision 53809828)
1 /* By Ross Biro 1/23/92 */
2 /*
3  * Pentium III FXSR, SSE support
4  *	Gareth Hughes <gareth@valinux.com>, May 2000
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/mm.h>
11 #include <linux/smp.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/ptrace.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/rcupdate.h>
25 #include <linux/export.h>
26 #include <linux/context_tracking.h>
27 
28 #include <linux/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/processor.h>
31 #include <asm/fpu/internal.h>
32 #include <asm/fpu/signal.h>
33 #include <asm/fpu/regset.h>
34 #include <asm/debugreg.h>
35 #include <asm/ldt.h>
36 #include <asm/desc.h>
37 #include <asm/prctl.h>
38 #include <asm/proto.h>
39 #include <asm/hw_breakpoint.h>
40 #include <asm/traps.h>
41 #include <asm/syscall.h>
42 #include <asm/fsgsbase.h>
43 
44 #include "tls.h"
45 
46 enum x86_regset {
47 	REGSET_GENERAL,
48 	REGSET_FP,
49 	REGSET_XFP,
50 	REGSET_IOPERM64 = REGSET_XFP,
51 	REGSET_XSTATE,
52 	REGSET_TLS,
53 	REGSET_IOPERM32,
54 };
55 
56 struct pt_regs_offset {
57 	const char *name;
58 	int offset;
59 };
60 
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define REG_OFFSET_END {.name = NULL, .offset = 0}
63 
64 static const struct pt_regs_offset regoffset_table[] = {
65 #ifdef CONFIG_X86_64
66 	REG_OFFSET_NAME(r15),
67 	REG_OFFSET_NAME(r14),
68 	REG_OFFSET_NAME(r13),
69 	REG_OFFSET_NAME(r12),
70 	REG_OFFSET_NAME(r11),
71 	REG_OFFSET_NAME(r10),
72 	REG_OFFSET_NAME(r9),
73 	REG_OFFSET_NAME(r8),
74 #endif
75 	REG_OFFSET_NAME(bx),
76 	REG_OFFSET_NAME(cx),
77 	REG_OFFSET_NAME(dx),
78 	REG_OFFSET_NAME(si),
79 	REG_OFFSET_NAME(di),
80 	REG_OFFSET_NAME(bp),
81 	REG_OFFSET_NAME(ax),
82 #ifdef CONFIG_X86_32
83 	REG_OFFSET_NAME(ds),
84 	REG_OFFSET_NAME(es),
85 	REG_OFFSET_NAME(fs),
86 	REG_OFFSET_NAME(gs),
87 #endif
88 	REG_OFFSET_NAME(orig_ax),
89 	REG_OFFSET_NAME(ip),
90 	REG_OFFSET_NAME(cs),
91 	REG_OFFSET_NAME(flags),
92 	REG_OFFSET_NAME(sp),
93 	REG_OFFSET_NAME(ss),
94 	REG_OFFSET_END,
95 };
96 
97 /**
98  * regs_query_register_offset() - query register offset from its name
99  * @name:	the name of a register
100  *
101  * regs_query_register_offset() returns the offset of a register in struct
102  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103  */
104 int regs_query_register_offset(const char *name)
105 {
106 	const struct pt_regs_offset *roff;
107 	for (roff = regoffset_table; roff->name != NULL; roff++)
108 		if (!strcmp(roff->name, name))
109 			return roff->offset;
110 	return -EINVAL;
111 }
112 
113 /**
114  * regs_query_register_name() - query register name from its offset
115  * @offset:	the offset of a register in struct pt_regs.
116  *
117  * regs_query_register_name() returns the name of a register from its
118  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
119  */
120 const char *regs_query_register_name(unsigned int offset)
121 {
122 	const struct pt_regs_offset *roff;
123 	for (roff = regoffset_table; roff->name != NULL; roff++)
124 		if (roff->offset == offset)
125 			return roff->name;
126 	return NULL;
127 }
128 
129 /*
130  * does not yet catch signals sent when the child dies.
131  * in exit.c or in signal.c.
132  */
133 
134 /*
135  * Determines which flags the user has access to [1 = access, 0 = no access].
136  */
137 #define FLAG_MASK_32		((unsigned long)			\
138 				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
139 				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
140 				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
141 				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
142 				  X86_EFLAGS_RF | X86_EFLAGS_AC))
143 
144 /*
145  * Determines whether a value may be installed in a segment register.
146  */
147 static inline bool invalid_selector(u16 value)
148 {
149 	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
150 }
151 
152 #ifdef CONFIG_X86_32
153 
154 #define FLAG_MASK		FLAG_MASK_32
155 
156 /*
157  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
158  * when it traps.  The previous stack will be directly underneath the saved
159  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
160  *
161  * Now, if the stack is empty, '&regs->sp' is out of range. In this
162  * case we try to take the previous stack. To always return a non-null
163  * stack pointer we fall back to regs as stack if no previous stack
164  * exists.
165  *
166  * This is valid only for kernel mode traps.
167  */
168 unsigned long kernel_stack_pointer(struct pt_regs *regs)
169 {
170 	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
171 	unsigned long sp = (unsigned long)&regs->sp;
172 	u32 *prev_esp;
173 
174 	if (context == (sp & ~(THREAD_SIZE - 1)))
175 		return sp;
176 
177 	prev_esp = (u32 *)(context);
178 	if (*prev_esp)
179 		return (unsigned long)*prev_esp;
180 
181 	return (unsigned long)regs;
182 }
183 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
184 
185 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
186 {
187 	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
188 	return &regs->bx + (regno >> 2);
189 }
190 
191 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
192 {
193 	/*
194 	 * Returning the value truncates it to 16 bits.
195 	 */
196 	unsigned int retval;
197 	if (offset != offsetof(struct user_regs_struct, gs))
198 		retval = *pt_regs_access(task_pt_regs(task), offset);
199 	else {
200 		if (task == current)
201 			retval = get_user_gs(task_pt_regs(task));
202 		else
203 			retval = task_user_gs(task);
204 	}
205 	return retval;
206 }
207 
208 static int set_segment_reg(struct task_struct *task,
209 			   unsigned long offset, u16 value)
210 {
211 	/*
212 	 * The value argument was already truncated to 16 bits.
213 	 */
214 	if (invalid_selector(value))
215 		return -EIO;
216 
217 	/*
218 	 * For %cs and %ss we cannot permit a null selector.
219 	 * We can permit a bogus selector as long as it has USER_RPL.
220 	 * Null selectors are fine for other segment registers, but
221 	 * we will never get back to user mode with invalid %cs or %ss
222 	 * and will take the trap in iret instead.  Much code relies
223 	 * on user_mode() to distinguish a user trap frame (which can
224 	 * safely use invalid selectors) from a kernel trap frame.
225 	 */
226 	switch (offset) {
227 	case offsetof(struct user_regs_struct, cs):
228 	case offsetof(struct user_regs_struct, ss):
229 		if (unlikely(value == 0))
230 			return -EIO;
231 
232 	default:
233 		*pt_regs_access(task_pt_regs(task), offset) = value;
234 		break;
235 
236 	case offsetof(struct user_regs_struct, gs):
237 		if (task == current)
238 			set_user_gs(task_pt_regs(task), value);
239 		else
240 			task_user_gs(task) = value;
241 	}
242 
243 	return 0;
244 }
245 
246 #else  /* CONFIG_X86_64 */
247 
248 #define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
249 
250 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
251 {
252 	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
253 	return &regs->r15 + (offset / sizeof(regs->r15));
254 }
255 
256 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
257 {
258 	/*
259 	 * Returning the value truncates it to 16 bits.
260 	 */
261 	unsigned int seg;
262 
263 	switch (offset) {
264 	case offsetof(struct user_regs_struct, fs):
265 		if (task == current) {
266 			/* Older gas can't assemble movq %?s,%r?? */
267 			asm("movl %%fs,%0" : "=r" (seg));
268 			return seg;
269 		}
270 		return task->thread.fsindex;
271 	case offsetof(struct user_regs_struct, gs):
272 		if (task == current) {
273 			asm("movl %%gs,%0" : "=r" (seg));
274 			return seg;
275 		}
276 		return task->thread.gsindex;
277 	case offsetof(struct user_regs_struct, ds):
278 		if (task == current) {
279 			asm("movl %%ds,%0" : "=r" (seg));
280 			return seg;
281 		}
282 		return task->thread.ds;
283 	case offsetof(struct user_regs_struct, es):
284 		if (task == current) {
285 			asm("movl %%es,%0" : "=r" (seg));
286 			return seg;
287 		}
288 		return task->thread.es;
289 
290 	case offsetof(struct user_regs_struct, cs):
291 	case offsetof(struct user_regs_struct, ss):
292 		break;
293 	}
294 	return *pt_regs_access(task_pt_regs(task), offset);
295 }
296 
297 static int set_segment_reg(struct task_struct *task,
298 			   unsigned long offset, u16 value)
299 {
300 	/*
301 	 * The value argument was already truncated to 16 bits.
302 	 */
303 	if (invalid_selector(value))
304 		return -EIO;
305 
306 	switch (offset) {
307 	case offsetof(struct user_regs_struct,fs):
308 		task->thread.fsindex = value;
309 		if (task == current)
310 			loadsegment(fs, task->thread.fsindex);
311 		break;
312 	case offsetof(struct user_regs_struct,gs):
313 		task->thread.gsindex = value;
314 		if (task == current)
315 			load_gs_index(task->thread.gsindex);
316 		break;
317 	case offsetof(struct user_regs_struct,ds):
318 		task->thread.ds = value;
319 		if (task == current)
320 			loadsegment(ds, task->thread.ds);
321 		break;
322 	case offsetof(struct user_regs_struct,es):
323 		task->thread.es = value;
324 		if (task == current)
325 			loadsegment(es, task->thread.es);
326 		break;
327 
328 		/*
329 		 * Can't actually change these in 64-bit mode.
330 		 */
331 	case offsetof(struct user_regs_struct,cs):
332 		if (unlikely(value == 0))
333 			return -EIO;
334 		task_pt_regs(task)->cs = value;
335 		break;
336 	case offsetof(struct user_regs_struct,ss):
337 		if (unlikely(value == 0))
338 			return -EIO;
339 		task_pt_regs(task)->ss = value;
340 		break;
341 	}
342 
343 	return 0;
344 }
345 
346 #endif	/* CONFIG_X86_32 */
347 
348 static unsigned long get_flags(struct task_struct *task)
349 {
350 	unsigned long retval = task_pt_regs(task)->flags;
351 
352 	/*
353 	 * If the debugger set TF, hide it from the readout.
354 	 */
355 	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
356 		retval &= ~X86_EFLAGS_TF;
357 
358 	return retval;
359 }
360 
361 static int set_flags(struct task_struct *task, unsigned long value)
362 {
363 	struct pt_regs *regs = task_pt_regs(task);
364 
365 	/*
366 	 * If the user value contains TF, mark that
367 	 * it was not "us" (the debugger) that set it.
368 	 * If not, make sure it stays set if we had.
369 	 */
370 	if (value & X86_EFLAGS_TF)
371 		clear_tsk_thread_flag(task, TIF_FORCED_TF);
372 	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
373 		value |= X86_EFLAGS_TF;
374 
375 	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
376 
377 	return 0;
378 }
379 
380 static int putreg(struct task_struct *child,
381 		  unsigned long offset, unsigned long value)
382 {
383 	switch (offset) {
384 	case offsetof(struct user_regs_struct, cs):
385 	case offsetof(struct user_regs_struct, ds):
386 	case offsetof(struct user_regs_struct, es):
387 	case offsetof(struct user_regs_struct, fs):
388 	case offsetof(struct user_regs_struct, gs):
389 	case offsetof(struct user_regs_struct, ss):
390 		return set_segment_reg(child, offset, value);
391 
392 	case offsetof(struct user_regs_struct, flags):
393 		return set_flags(child, value);
394 
395 #ifdef CONFIG_X86_64
396 	case offsetof(struct user_regs_struct,fs_base):
397 		if (value >= TASK_SIZE_MAX)
398 			return -EIO;
399 		/*
400 		 * When changing the FS base, use the same
401 		 * mechanism as for do_arch_prctl_64().
402 		 */
403 		if (child->thread.fsbase != value)
404 			return x86_fsbase_write_task(child, value);
405 		return 0;
406 	case offsetof(struct user_regs_struct,gs_base):
407 		/*
408 		 * Exactly the same here as the %fs handling above.
409 		 */
410 		if (value >= TASK_SIZE_MAX)
411 			return -EIO;
412 		if (child->thread.gsbase != value)
413 			return x86_gsbase_write_task(child, value);
414 		return 0;
415 #endif
416 	}
417 
418 	*pt_regs_access(task_pt_regs(child), offset) = value;
419 	return 0;
420 }
421 
422 static unsigned long getreg(struct task_struct *task, unsigned long offset)
423 {
424 	switch (offset) {
425 	case offsetof(struct user_regs_struct, cs):
426 	case offsetof(struct user_regs_struct, ds):
427 	case offsetof(struct user_regs_struct, es):
428 	case offsetof(struct user_regs_struct, fs):
429 	case offsetof(struct user_regs_struct, gs):
430 	case offsetof(struct user_regs_struct, ss):
431 		return get_segment_reg(task, offset);
432 
433 	case offsetof(struct user_regs_struct, flags):
434 		return get_flags(task);
435 
436 #ifdef CONFIG_X86_64
437 	case offsetof(struct user_regs_struct, fs_base):
438 		return x86_fsbase_read_task(task);
439 	case offsetof(struct user_regs_struct, gs_base):
440 		return x86_gsbase_read_task(task);
441 #endif
442 	}
443 
444 	return *pt_regs_access(task_pt_regs(task), offset);
445 }
446 
447 static int genregs_get(struct task_struct *target,
448 		       const struct user_regset *regset,
449 		       unsigned int pos, unsigned int count,
450 		       void *kbuf, void __user *ubuf)
451 {
452 	if (kbuf) {
453 		unsigned long *k = kbuf;
454 		while (count >= sizeof(*k)) {
455 			*k++ = getreg(target, pos);
456 			count -= sizeof(*k);
457 			pos += sizeof(*k);
458 		}
459 	} else {
460 		unsigned long __user *u = ubuf;
461 		while (count >= sizeof(*u)) {
462 			if (__put_user(getreg(target, pos), u++))
463 				return -EFAULT;
464 			count -= sizeof(*u);
465 			pos += sizeof(*u);
466 		}
467 	}
468 
469 	return 0;
470 }
471 
472 static int genregs_set(struct task_struct *target,
473 		       const struct user_regset *regset,
474 		       unsigned int pos, unsigned int count,
475 		       const void *kbuf, const void __user *ubuf)
476 {
477 	int ret = 0;
478 	if (kbuf) {
479 		const unsigned long *k = kbuf;
480 		while (count >= sizeof(*k) && !ret) {
481 			ret = putreg(target, pos, *k++);
482 			count -= sizeof(*k);
483 			pos += sizeof(*k);
484 		}
485 	} else {
486 		const unsigned long  __user *u = ubuf;
487 		while (count >= sizeof(*u) && !ret) {
488 			unsigned long word;
489 			ret = __get_user(word, u++);
490 			if (ret)
491 				break;
492 			ret = putreg(target, pos, word);
493 			count -= sizeof(*u);
494 			pos += sizeof(*u);
495 		}
496 	}
497 	return ret;
498 }
499 
500 static void ptrace_triggered(struct perf_event *bp,
501 			     struct perf_sample_data *data,
502 			     struct pt_regs *regs)
503 {
504 	int i;
505 	struct thread_struct *thread = &(current->thread);
506 
507 	/*
508 	 * Store in the virtual DR6 register the fact that the breakpoint
509 	 * was hit so the thread's debugger will see it.
510 	 */
511 	for (i = 0; i < HBP_NUM; i++) {
512 		if (thread->ptrace_bps[i] == bp)
513 			break;
514 	}
515 
516 	thread->debugreg6 |= (DR_TRAP0 << i);
517 }
518 
519 /*
520  * Walk through every ptrace breakpoints for this thread and
521  * build the dr7 value on top of their attributes.
522  *
523  */
524 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
525 {
526 	int i;
527 	int dr7 = 0;
528 	struct arch_hw_breakpoint *info;
529 
530 	for (i = 0; i < HBP_NUM; i++) {
531 		if (bp[i] && !bp[i]->attr.disabled) {
532 			info = counter_arch_bp(bp[i]);
533 			dr7 |= encode_dr7(i, info->len, info->type);
534 		}
535 	}
536 
537 	return dr7;
538 }
539 
540 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
541 					int len, int type, bool disabled)
542 {
543 	int err, bp_len, bp_type;
544 
545 	err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
546 	if (!err) {
547 		attr->bp_len = bp_len;
548 		attr->bp_type = bp_type;
549 		attr->disabled = disabled;
550 	}
551 
552 	return err;
553 }
554 
555 static struct perf_event *
556 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
557 				unsigned long addr, bool disabled)
558 {
559 	struct perf_event_attr attr;
560 	int err;
561 
562 	ptrace_breakpoint_init(&attr);
563 	attr.bp_addr = addr;
564 
565 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
566 	if (err)
567 		return ERR_PTR(err);
568 
569 	return register_user_hw_breakpoint(&attr, ptrace_triggered,
570 						 NULL, tsk);
571 }
572 
573 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
574 					int disabled)
575 {
576 	struct perf_event_attr attr = bp->attr;
577 	int err;
578 
579 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
580 	if (err)
581 		return err;
582 
583 	return modify_user_hw_breakpoint(bp, &attr);
584 }
585 
586 /*
587  * Handle ptrace writes to debug register 7.
588  */
589 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
590 {
591 	struct thread_struct *thread = &tsk->thread;
592 	unsigned long old_dr7;
593 	bool second_pass = false;
594 	int i, rc, ret = 0;
595 
596 	data &= ~DR_CONTROL_RESERVED;
597 	old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
598 
599 restore:
600 	rc = 0;
601 	for (i = 0; i < HBP_NUM; i++) {
602 		unsigned len, type;
603 		bool disabled = !decode_dr7(data, i, &len, &type);
604 		struct perf_event *bp = thread->ptrace_bps[i];
605 
606 		if (!bp) {
607 			if (disabled)
608 				continue;
609 
610 			bp = ptrace_register_breakpoint(tsk,
611 					len, type, 0, disabled);
612 			if (IS_ERR(bp)) {
613 				rc = PTR_ERR(bp);
614 				break;
615 			}
616 
617 			thread->ptrace_bps[i] = bp;
618 			continue;
619 		}
620 
621 		rc = ptrace_modify_breakpoint(bp, len, type, disabled);
622 		if (rc)
623 			break;
624 	}
625 
626 	/* Restore if the first pass failed, second_pass shouldn't fail. */
627 	if (rc && !WARN_ON(second_pass)) {
628 		ret = rc;
629 		data = old_dr7;
630 		second_pass = true;
631 		goto restore;
632 	}
633 
634 	return ret;
635 }
636 
637 /*
638  * Handle PTRACE_PEEKUSR calls for the debug register area.
639  */
640 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
641 {
642 	struct thread_struct *thread = &tsk->thread;
643 	unsigned long val = 0;
644 
645 	if (n < HBP_NUM) {
646 		struct perf_event *bp = thread->ptrace_bps[n];
647 
648 		if (bp)
649 			val = bp->hw.info.address;
650 	} else if (n == 6) {
651 		val = thread->debugreg6;
652 	} else if (n == 7) {
653 		val = thread->ptrace_dr7;
654 	}
655 	return val;
656 }
657 
658 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
659 				      unsigned long addr)
660 {
661 	struct thread_struct *t = &tsk->thread;
662 	struct perf_event *bp = t->ptrace_bps[nr];
663 	int err = 0;
664 
665 	if (!bp) {
666 		/*
667 		 * Put stub len and type to create an inactive but correct bp.
668 		 *
669 		 * CHECKME: the previous code returned -EIO if the addr wasn't
670 		 * a valid task virtual addr. The new one will return -EINVAL in
671 		 *  this case.
672 		 * -EINVAL may be what we want for in-kernel breakpoints users,
673 		 * but -EIO looks better for ptrace, since we refuse a register
674 		 * writing for the user. And anyway this is the previous
675 		 * behaviour.
676 		 */
677 		bp = ptrace_register_breakpoint(tsk,
678 				X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
679 				addr, true);
680 		if (IS_ERR(bp))
681 			err = PTR_ERR(bp);
682 		else
683 			t->ptrace_bps[nr] = bp;
684 	} else {
685 		struct perf_event_attr attr = bp->attr;
686 
687 		attr.bp_addr = addr;
688 		err = modify_user_hw_breakpoint(bp, &attr);
689 	}
690 
691 	return err;
692 }
693 
694 /*
695  * Handle PTRACE_POKEUSR calls for the debug register area.
696  */
697 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
698 			       unsigned long val)
699 {
700 	struct thread_struct *thread = &tsk->thread;
701 	/* There are no DR4 or DR5 registers */
702 	int rc = -EIO;
703 
704 	if (n < HBP_NUM) {
705 		rc = ptrace_set_breakpoint_addr(tsk, n, val);
706 	} else if (n == 6) {
707 		thread->debugreg6 = val;
708 		rc = 0;
709 	} else if (n == 7) {
710 		rc = ptrace_write_dr7(tsk, val);
711 		if (!rc)
712 			thread->ptrace_dr7 = val;
713 	}
714 	return rc;
715 }
716 
717 /*
718  * These access the current or another (stopped) task's io permission
719  * bitmap for debugging or core dump.
720  */
721 static int ioperm_active(struct task_struct *target,
722 			 const struct user_regset *regset)
723 {
724 	return target->thread.io_bitmap_max / regset->size;
725 }
726 
727 static int ioperm_get(struct task_struct *target,
728 		      const struct user_regset *regset,
729 		      unsigned int pos, unsigned int count,
730 		      void *kbuf, void __user *ubuf)
731 {
732 	if (!target->thread.io_bitmap_ptr)
733 		return -ENXIO;
734 
735 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
736 				   target->thread.io_bitmap_ptr,
737 				   0, IO_BITMAP_BYTES);
738 }
739 
740 /*
741  * Called by kernel/ptrace.c when detaching..
742  *
743  * Make sure the single step bit is not set.
744  */
745 void ptrace_disable(struct task_struct *child)
746 {
747 	user_disable_single_step(child);
748 #ifdef TIF_SYSCALL_EMU
749 	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
750 #endif
751 }
752 
753 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
754 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
755 #endif
756 
757 long arch_ptrace(struct task_struct *child, long request,
758 		 unsigned long addr, unsigned long data)
759 {
760 	int ret;
761 	unsigned long __user *datap = (unsigned long __user *)data;
762 
763 	switch (request) {
764 	/* read the word at location addr in the USER area. */
765 	case PTRACE_PEEKUSR: {
766 		unsigned long tmp;
767 
768 		ret = -EIO;
769 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
770 			break;
771 
772 		tmp = 0;  /* Default return condition */
773 		if (addr < sizeof(struct user_regs_struct))
774 			tmp = getreg(child, addr);
775 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
776 			 addr <= offsetof(struct user, u_debugreg[7])) {
777 			addr -= offsetof(struct user, u_debugreg[0]);
778 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
779 		}
780 		ret = put_user(tmp, datap);
781 		break;
782 	}
783 
784 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
785 		ret = -EIO;
786 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
787 			break;
788 
789 		if (addr < sizeof(struct user_regs_struct))
790 			ret = putreg(child, addr, data);
791 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
792 			 addr <= offsetof(struct user, u_debugreg[7])) {
793 			addr -= offsetof(struct user, u_debugreg[0]);
794 			ret = ptrace_set_debugreg(child,
795 						  addr / sizeof(data), data);
796 		}
797 		break;
798 
799 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
800 		return copy_regset_to_user(child,
801 					   task_user_regset_view(current),
802 					   REGSET_GENERAL,
803 					   0, sizeof(struct user_regs_struct),
804 					   datap);
805 
806 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
807 		return copy_regset_from_user(child,
808 					     task_user_regset_view(current),
809 					     REGSET_GENERAL,
810 					     0, sizeof(struct user_regs_struct),
811 					     datap);
812 
813 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
814 		return copy_regset_to_user(child,
815 					   task_user_regset_view(current),
816 					   REGSET_FP,
817 					   0, sizeof(struct user_i387_struct),
818 					   datap);
819 
820 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
821 		return copy_regset_from_user(child,
822 					     task_user_regset_view(current),
823 					     REGSET_FP,
824 					     0, sizeof(struct user_i387_struct),
825 					     datap);
826 
827 #ifdef CONFIG_X86_32
828 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
829 		return copy_regset_to_user(child, &user_x86_32_view,
830 					   REGSET_XFP,
831 					   0, sizeof(struct user_fxsr_struct),
832 					   datap) ? -EIO : 0;
833 
834 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
835 		return copy_regset_from_user(child, &user_x86_32_view,
836 					     REGSET_XFP,
837 					     0, sizeof(struct user_fxsr_struct),
838 					     datap) ? -EIO : 0;
839 #endif
840 
841 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
842 	case PTRACE_GET_THREAD_AREA:
843 		if ((int) addr < 0)
844 			return -EIO;
845 		ret = do_get_thread_area(child, addr,
846 					(struct user_desc __user *)data);
847 		break;
848 
849 	case PTRACE_SET_THREAD_AREA:
850 		if ((int) addr < 0)
851 			return -EIO;
852 		ret = do_set_thread_area(child, addr,
853 					(struct user_desc __user *)data, 0);
854 		break;
855 #endif
856 
857 #ifdef CONFIG_X86_64
858 		/* normal 64bit interface to access TLS data.
859 		   Works just like arch_prctl, except that the arguments
860 		   are reversed. */
861 	case PTRACE_ARCH_PRCTL:
862 		ret = do_arch_prctl_64(child, data, addr);
863 		break;
864 #endif
865 
866 	default:
867 		ret = ptrace_request(child, request, addr, data);
868 		break;
869 	}
870 
871 	return ret;
872 }
873 
874 #ifdef CONFIG_IA32_EMULATION
875 
876 #include <linux/compat.h>
877 #include <linux/syscalls.h>
878 #include <asm/ia32.h>
879 #include <asm/user32.h>
880 
881 #define R32(l,q)							\
882 	case offsetof(struct user32, regs.l):				\
883 		regs->q = value; break
884 
885 #define SEG32(rs)							\
886 	case offsetof(struct user32, regs.rs):				\
887 		return set_segment_reg(child,				\
888 				       offsetof(struct user_regs_struct, rs), \
889 				       value);				\
890 		break
891 
892 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
893 {
894 	struct pt_regs *regs = task_pt_regs(child);
895 
896 	switch (regno) {
897 
898 	SEG32(cs);
899 	SEG32(ds);
900 	SEG32(es);
901 	SEG32(fs);
902 	SEG32(gs);
903 	SEG32(ss);
904 
905 	R32(ebx, bx);
906 	R32(ecx, cx);
907 	R32(edx, dx);
908 	R32(edi, di);
909 	R32(esi, si);
910 	R32(ebp, bp);
911 	R32(eax, ax);
912 	R32(eip, ip);
913 	R32(esp, sp);
914 
915 	case offsetof(struct user32, regs.orig_eax):
916 		/*
917 		 * Warning: bizarre corner case fixup here.  A 32-bit
918 		 * debugger setting orig_eax to -1 wants to disable
919 		 * syscall restart.  Make sure that the syscall
920 		 * restart code sign-extends orig_ax.  Also make sure
921 		 * we interpret the -ERESTART* codes correctly if
922 		 * loaded into regs->ax in case the task is not
923 		 * actually still sitting at the exit from a 32-bit
924 		 * syscall with TS_COMPAT still set.
925 		 */
926 		regs->orig_ax = value;
927 		if (syscall_get_nr(child, regs) >= 0)
928 			child->thread_info.status |= TS_I386_REGS_POKED;
929 		break;
930 
931 	case offsetof(struct user32, regs.eflags):
932 		return set_flags(child, value);
933 
934 	case offsetof(struct user32, u_debugreg[0]) ...
935 		offsetof(struct user32, u_debugreg[7]):
936 		regno -= offsetof(struct user32, u_debugreg[0]);
937 		return ptrace_set_debugreg(child, regno / 4, value);
938 
939 	default:
940 		if (regno > sizeof(struct user32) || (regno & 3))
941 			return -EIO;
942 
943 		/*
944 		 * Other dummy fields in the virtual user structure
945 		 * are ignored
946 		 */
947 		break;
948 	}
949 	return 0;
950 }
951 
952 #undef R32
953 #undef SEG32
954 
955 #define R32(l,q)							\
956 	case offsetof(struct user32, regs.l):				\
957 		*val = regs->q; break
958 
959 #define SEG32(rs)							\
960 	case offsetof(struct user32, regs.rs):				\
961 		*val = get_segment_reg(child,				\
962 				       offsetof(struct user_regs_struct, rs)); \
963 		break
964 
965 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
966 {
967 	struct pt_regs *regs = task_pt_regs(child);
968 
969 	switch (regno) {
970 
971 	SEG32(ds);
972 	SEG32(es);
973 	SEG32(fs);
974 	SEG32(gs);
975 
976 	R32(cs, cs);
977 	R32(ss, ss);
978 	R32(ebx, bx);
979 	R32(ecx, cx);
980 	R32(edx, dx);
981 	R32(edi, di);
982 	R32(esi, si);
983 	R32(ebp, bp);
984 	R32(eax, ax);
985 	R32(orig_eax, orig_ax);
986 	R32(eip, ip);
987 	R32(esp, sp);
988 
989 	case offsetof(struct user32, regs.eflags):
990 		*val = get_flags(child);
991 		break;
992 
993 	case offsetof(struct user32, u_debugreg[0]) ...
994 		offsetof(struct user32, u_debugreg[7]):
995 		regno -= offsetof(struct user32, u_debugreg[0]);
996 		*val = ptrace_get_debugreg(child, regno / 4);
997 		break;
998 
999 	default:
1000 		if (regno > sizeof(struct user32) || (regno & 3))
1001 			return -EIO;
1002 
1003 		/*
1004 		 * Other dummy fields in the virtual user structure
1005 		 * are ignored
1006 		 */
1007 		*val = 0;
1008 		break;
1009 	}
1010 	return 0;
1011 }
1012 
1013 #undef R32
1014 #undef SEG32
1015 
1016 static int genregs32_get(struct task_struct *target,
1017 			 const struct user_regset *regset,
1018 			 unsigned int pos, unsigned int count,
1019 			 void *kbuf, void __user *ubuf)
1020 {
1021 	if (kbuf) {
1022 		compat_ulong_t *k = kbuf;
1023 		while (count >= sizeof(*k)) {
1024 			getreg32(target, pos, k++);
1025 			count -= sizeof(*k);
1026 			pos += sizeof(*k);
1027 		}
1028 	} else {
1029 		compat_ulong_t __user *u = ubuf;
1030 		while (count >= sizeof(*u)) {
1031 			compat_ulong_t word;
1032 			getreg32(target, pos, &word);
1033 			if (__put_user(word, u++))
1034 				return -EFAULT;
1035 			count -= sizeof(*u);
1036 			pos += sizeof(*u);
1037 		}
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 static int genregs32_set(struct task_struct *target,
1044 			 const struct user_regset *regset,
1045 			 unsigned int pos, unsigned int count,
1046 			 const void *kbuf, const void __user *ubuf)
1047 {
1048 	int ret = 0;
1049 	if (kbuf) {
1050 		const compat_ulong_t *k = kbuf;
1051 		while (count >= sizeof(*k) && !ret) {
1052 			ret = putreg32(target, pos, *k++);
1053 			count -= sizeof(*k);
1054 			pos += sizeof(*k);
1055 		}
1056 	} else {
1057 		const compat_ulong_t __user *u = ubuf;
1058 		while (count >= sizeof(*u) && !ret) {
1059 			compat_ulong_t word;
1060 			ret = __get_user(word, u++);
1061 			if (ret)
1062 				break;
1063 			ret = putreg32(target, pos, word);
1064 			count -= sizeof(*u);
1065 			pos += sizeof(*u);
1066 		}
1067 	}
1068 	return ret;
1069 }
1070 
1071 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1072 			     compat_ulong_t caddr, compat_ulong_t cdata)
1073 {
1074 	unsigned long addr = caddr;
1075 	unsigned long data = cdata;
1076 	void __user *datap = compat_ptr(data);
1077 	int ret;
1078 	__u32 val;
1079 
1080 	switch (request) {
1081 	case PTRACE_PEEKUSR:
1082 		ret = getreg32(child, addr, &val);
1083 		if (ret == 0)
1084 			ret = put_user(val, (__u32 __user *)datap);
1085 		break;
1086 
1087 	case PTRACE_POKEUSR:
1088 		ret = putreg32(child, addr, data);
1089 		break;
1090 
1091 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1092 		return copy_regset_to_user(child, &user_x86_32_view,
1093 					   REGSET_GENERAL,
1094 					   0, sizeof(struct user_regs_struct32),
1095 					   datap);
1096 
1097 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1098 		return copy_regset_from_user(child, &user_x86_32_view,
1099 					     REGSET_GENERAL, 0,
1100 					     sizeof(struct user_regs_struct32),
1101 					     datap);
1102 
1103 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1104 		return copy_regset_to_user(child, &user_x86_32_view,
1105 					   REGSET_FP, 0,
1106 					   sizeof(struct user_i387_ia32_struct),
1107 					   datap);
1108 
1109 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1110 		return copy_regset_from_user(
1111 			child, &user_x86_32_view, REGSET_FP,
1112 			0, sizeof(struct user_i387_ia32_struct), datap);
1113 
1114 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
1115 		return copy_regset_to_user(child, &user_x86_32_view,
1116 					   REGSET_XFP, 0,
1117 					   sizeof(struct user32_fxsr_struct),
1118 					   datap);
1119 
1120 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
1121 		return copy_regset_from_user(child, &user_x86_32_view,
1122 					     REGSET_XFP, 0,
1123 					     sizeof(struct user32_fxsr_struct),
1124 					     datap);
1125 
1126 	case PTRACE_GET_THREAD_AREA:
1127 	case PTRACE_SET_THREAD_AREA:
1128 		return arch_ptrace(child, request, addr, data);
1129 
1130 	default:
1131 		return compat_ptrace_request(child, request, addr, data);
1132 	}
1133 
1134 	return ret;
1135 }
1136 #endif /* CONFIG_IA32_EMULATION */
1137 
1138 #ifdef CONFIG_X86_X32_ABI
1139 static long x32_arch_ptrace(struct task_struct *child,
1140 			    compat_long_t request, compat_ulong_t caddr,
1141 			    compat_ulong_t cdata)
1142 {
1143 	unsigned long addr = caddr;
1144 	unsigned long data = cdata;
1145 	void __user *datap = compat_ptr(data);
1146 	int ret;
1147 
1148 	switch (request) {
1149 	/* Read 32bits at location addr in the USER area.  Only allow
1150 	   to return the lower 32bits of segment and debug registers.  */
1151 	case PTRACE_PEEKUSR: {
1152 		u32 tmp;
1153 
1154 		ret = -EIO;
1155 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1156 		    addr < offsetof(struct user_regs_struct, cs))
1157 			break;
1158 
1159 		tmp = 0;  /* Default return condition */
1160 		if (addr < sizeof(struct user_regs_struct))
1161 			tmp = getreg(child, addr);
1162 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1163 			 addr <= offsetof(struct user, u_debugreg[7])) {
1164 			addr -= offsetof(struct user, u_debugreg[0]);
1165 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1166 		}
1167 		ret = put_user(tmp, (__u32 __user *)datap);
1168 		break;
1169 	}
1170 
1171 	/* Write the word at location addr in the USER area.  Only allow
1172 	   to update segment and debug registers with the upper 32bits
1173 	   zero-extended. */
1174 	case PTRACE_POKEUSR:
1175 		ret = -EIO;
1176 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1177 		    addr < offsetof(struct user_regs_struct, cs))
1178 			break;
1179 
1180 		if (addr < sizeof(struct user_regs_struct))
1181 			ret = putreg(child, addr, data);
1182 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1183 			 addr <= offsetof(struct user, u_debugreg[7])) {
1184 			addr -= offsetof(struct user, u_debugreg[0]);
1185 			ret = ptrace_set_debugreg(child,
1186 						  addr / sizeof(data), data);
1187 		}
1188 		break;
1189 
1190 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1191 		return copy_regset_to_user(child,
1192 					   task_user_regset_view(current),
1193 					   REGSET_GENERAL,
1194 					   0, sizeof(struct user_regs_struct),
1195 					   datap);
1196 
1197 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1198 		return copy_regset_from_user(child,
1199 					     task_user_regset_view(current),
1200 					     REGSET_GENERAL,
1201 					     0, sizeof(struct user_regs_struct),
1202 					     datap);
1203 
1204 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1205 		return copy_regset_to_user(child,
1206 					   task_user_regset_view(current),
1207 					   REGSET_FP,
1208 					   0, sizeof(struct user_i387_struct),
1209 					   datap);
1210 
1211 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1212 		return copy_regset_from_user(child,
1213 					     task_user_regset_view(current),
1214 					     REGSET_FP,
1215 					     0, sizeof(struct user_i387_struct),
1216 					     datap);
1217 
1218 	default:
1219 		return compat_ptrace_request(child, request, addr, data);
1220 	}
1221 
1222 	return ret;
1223 }
1224 #endif
1225 
1226 #ifdef CONFIG_COMPAT
1227 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1228 			compat_ulong_t caddr, compat_ulong_t cdata)
1229 {
1230 #ifdef CONFIG_X86_X32_ABI
1231 	if (!in_ia32_syscall())
1232 		return x32_arch_ptrace(child, request, caddr, cdata);
1233 #endif
1234 #ifdef CONFIG_IA32_EMULATION
1235 	return ia32_arch_ptrace(child, request, caddr, cdata);
1236 #else
1237 	return 0;
1238 #endif
1239 }
1240 #endif	/* CONFIG_COMPAT */
1241 
1242 #ifdef CONFIG_X86_64
1243 
1244 static struct user_regset x86_64_regsets[] __ro_after_init = {
1245 	[REGSET_GENERAL] = {
1246 		.core_note_type = NT_PRSTATUS,
1247 		.n = sizeof(struct user_regs_struct) / sizeof(long),
1248 		.size = sizeof(long), .align = sizeof(long),
1249 		.get = genregs_get, .set = genregs_set
1250 	},
1251 	[REGSET_FP] = {
1252 		.core_note_type = NT_PRFPREG,
1253 		.n = sizeof(struct user_i387_struct) / sizeof(long),
1254 		.size = sizeof(long), .align = sizeof(long),
1255 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1256 	},
1257 	[REGSET_XSTATE] = {
1258 		.core_note_type = NT_X86_XSTATE,
1259 		.size = sizeof(u64), .align = sizeof(u64),
1260 		.active = xstateregs_active, .get = xstateregs_get,
1261 		.set = xstateregs_set
1262 	},
1263 	[REGSET_IOPERM64] = {
1264 		.core_note_type = NT_386_IOPERM,
1265 		.n = IO_BITMAP_LONGS,
1266 		.size = sizeof(long), .align = sizeof(long),
1267 		.active = ioperm_active, .get = ioperm_get
1268 	},
1269 };
1270 
1271 static const struct user_regset_view user_x86_64_view = {
1272 	.name = "x86_64", .e_machine = EM_X86_64,
1273 	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1274 };
1275 
1276 #else  /* CONFIG_X86_32 */
1277 
1278 #define user_regs_struct32	user_regs_struct
1279 #define genregs32_get		genregs_get
1280 #define genregs32_set		genregs_set
1281 
1282 #endif	/* CONFIG_X86_64 */
1283 
1284 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1285 static struct user_regset x86_32_regsets[] __ro_after_init = {
1286 	[REGSET_GENERAL] = {
1287 		.core_note_type = NT_PRSTATUS,
1288 		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
1289 		.size = sizeof(u32), .align = sizeof(u32),
1290 		.get = genregs32_get, .set = genregs32_set
1291 	},
1292 	[REGSET_FP] = {
1293 		.core_note_type = NT_PRFPREG,
1294 		.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1295 		.size = sizeof(u32), .align = sizeof(u32),
1296 		.active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
1297 	},
1298 	[REGSET_XFP] = {
1299 		.core_note_type = NT_PRXFPREG,
1300 		.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1301 		.size = sizeof(u32), .align = sizeof(u32),
1302 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1303 	},
1304 	[REGSET_XSTATE] = {
1305 		.core_note_type = NT_X86_XSTATE,
1306 		.size = sizeof(u64), .align = sizeof(u64),
1307 		.active = xstateregs_active, .get = xstateregs_get,
1308 		.set = xstateregs_set
1309 	},
1310 	[REGSET_TLS] = {
1311 		.core_note_type = NT_386_TLS,
1312 		.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1313 		.size = sizeof(struct user_desc),
1314 		.align = sizeof(struct user_desc),
1315 		.active = regset_tls_active,
1316 		.get = regset_tls_get, .set = regset_tls_set
1317 	},
1318 	[REGSET_IOPERM32] = {
1319 		.core_note_type = NT_386_IOPERM,
1320 		.n = IO_BITMAP_BYTES / sizeof(u32),
1321 		.size = sizeof(u32), .align = sizeof(u32),
1322 		.active = ioperm_active, .get = ioperm_get
1323 	},
1324 };
1325 
1326 static const struct user_regset_view user_x86_32_view = {
1327 	.name = "i386", .e_machine = EM_386,
1328 	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1329 };
1330 #endif
1331 
1332 /*
1333  * This represents bytes 464..511 in the memory layout exported through
1334  * the REGSET_XSTATE interface.
1335  */
1336 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1337 
1338 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1339 {
1340 #ifdef CONFIG_X86_64
1341 	x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1342 #endif
1343 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1344 	x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1345 #endif
1346 	xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1347 }
1348 
1349 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1350 {
1351 #ifdef CONFIG_IA32_EMULATION
1352 	if (!user_64bit_mode(task_pt_regs(task)))
1353 #endif
1354 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1355 		return &user_x86_32_view;
1356 #endif
1357 #ifdef CONFIG_X86_64
1358 	return &user_x86_64_view;
1359 #endif
1360 }
1361 
1362 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1363 					 int error_code, int si_code)
1364 {
1365 	tsk->thread.trap_nr = X86_TRAP_DB;
1366 	tsk->thread.error_code = error_code;
1367 
1368 	/* Send us the fake SIGTRAP */
1369 	force_sig_fault(SIGTRAP, si_code,
1370 			user_mode(regs) ? (void __user *)regs->ip : NULL, tsk);
1371 }
1372 
1373 void user_single_step_report(struct pt_regs *regs)
1374 {
1375 	send_sigtrap(current, regs, 0, TRAP_BRKPT);
1376 }
1377