xref: /openbmc/linux/arch/x86/kernel/ptrace.c (revision a86854d0)
1 /* By Ross Biro 1/23/92 */
2 /*
3  * Pentium III FXSR, SSE support
4  *	Gareth Hughes <gareth@valinux.com>, May 2000
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/mm.h>
11 #include <linux/smp.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/ptrace.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22 #include <linux/perf_event.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/rcupdate.h>
25 #include <linux/export.h>
26 #include <linux/context_tracking.h>
27 
28 #include <linux/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/processor.h>
31 #include <asm/fpu/internal.h>
32 #include <asm/fpu/signal.h>
33 #include <asm/fpu/regset.h>
34 #include <asm/debugreg.h>
35 #include <asm/ldt.h>
36 #include <asm/desc.h>
37 #include <asm/prctl.h>
38 #include <asm/proto.h>
39 #include <asm/hw_breakpoint.h>
40 #include <asm/traps.h>
41 #include <asm/syscall.h>
42 
43 #include "tls.h"
44 
45 enum x86_regset {
46 	REGSET_GENERAL,
47 	REGSET_FP,
48 	REGSET_XFP,
49 	REGSET_IOPERM64 = REGSET_XFP,
50 	REGSET_XSTATE,
51 	REGSET_TLS,
52 	REGSET_IOPERM32,
53 };
54 
55 struct pt_regs_offset {
56 	const char *name;
57 	int offset;
58 };
59 
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define REG_OFFSET_END {.name = NULL, .offset = 0}
62 
63 static const struct pt_regs_offset regoffset_table[] = {
64 #ifdef CONFIG_X86_64
65 	REG_OFFSET_NAME(r15),
66 	REG_OFFSET_NAME(r14),
67 	REG_OFFSET_NAME(r13),
68 	REG_OFFSET_NAME(r12),
69 	REG_OFFSET_NAME(r11),
70 	REG_OFFSET_NAME(r10),
71 	REG_OFFSET_NAME(r9),
72 	REG_OFFSET_NAME(r8),
73 #endif
74 	REG_OFFSET_NAME(bx),
75 	REG_OFFSET_NAME(cx),
76 	REG_OFFSET_NAME(dx),
77 	REG_OFFSET_NAME(si),
78 	REG_OFFSET_NAME(di),
79 	REG_OFFSET_NAME(bp),
80 	REG_OFFSET_NAME(ax),
81 #ifdef CONFIG_X86_32
82 	REG_OFFSET_NAME(ds),
83 	REG_OFFSET_NAME(es),
84 	REG_OFFSET_NAME(fs),
85 	REG_OFFSET_NAME(gs),
86 #endif
87 	REG_OFFSET_NAME(orig_ax),
88 	REG_OFFSET_NAME(ip),
89 	REG_OFFSET_NAME(cs),
90 	REG_OFFSET_NAME(flags),
91 	REG_OFFSET_NAME(sp),
92 	REG_OFFSET_NAME(ss),
93 	REG_OFFSET_END,
94 };
95 
96 /**
97  * regs_query_register_offset() - query register offset from its name
98  * @name:	the name of a register
99  *
100  * regs_query_register_offset() returns the offset of a register in struct
101  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102  */
103 int regs_query_register_offset(const char *name)
104 {
105 	const struct pt_regs_offset *roff;
106 	for (roff = regoffset_table; roff->name != NULL; roff++)
107 		if (!strcmp(roff->name, name))
108 			return roff->offset;
109 	return -EINVAL;
110 }
111 
112 /**
113  * regs_query_register_name() - query register name from its offset
114  * @offset:	the offset of a register in struct pt_regs.
115  *
116  * regs_query_register_name() returns the name of a register from its
117  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
118  */
119 const char *regs_query_register_name(unsigned int offset)
120 {
121 	const struct pt_regs_offset *roff;
122 	for (roff = regoffset_table; roff->name != NULL; roff++)
123 		if (roff->offset == offset)
124 			return roff->name;
125 	return NULL;
126 }
127 
128 /*
129  * does not yet catch signals sent when the child dies.
130  * in exit.c or in signal.c.
131  */
132 
133 /*
134  * Determines which flags the user has access to [1 = access, 0 = no access].
135  */
136 #define FLAG_MASK_32		((unsigned long)			\
137 				 (X86_EFLAGS_CF | X86_EFLAGS_PF |	\
138 				  X86_EFLAGS_AF | X86_EFLAGS_ZF |	\
139 				  X86_EFLAGS_SF | X86_EFLAGS_TF |	\
140 				  X86_EFLAGS_DF | X86_EFLAGS_OF |	\
141 				  X86_EFLAGS_RF | X86_EFLAGS_AC))
142 
143 /*
144  * Determines whether a value may be installed in a segment register.
145  */
146 static inline bool invalid_selector(u16 value)
147 {
148 	return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
149 }
150 
151 #ifdef CONFIG_X86_32
152 
153 #define FLAG_MASK		FLAG_MASK_32
154 
155 /*
156  * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
157  * when it traps.  The previous stack will be directly underneath the saved
158  * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
159  *
160  * Now, if the stack is empty, '&regs->sp' is out of range. In this
161  * case we try to take the previous stack. To always return a non-null
162  * stack pointer we fall back to regs as stack if no previous stack
163  * exists.
164  *
165  * This is valid only for kernel mode traps.
166  */
167 unsigned long kernel_stack_pointer(struct pt_regs *regs)
168 {
169 	unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
170 	unsigned long sp = (unsigned long)&regs->sp;
171 	u32 *prev_esp;
172 
173 	if (context == (sp & ~(THREAD_SIZE - 1)))
174 		return sp;
175 
176 	prev_esp = (u32 *)(context);
177 	if (*prev_esp)
178 		return (unsigned long)*prev_esp;
179 
180 	return (unsigned long)regs;
181 }
182 EXPORT_SYMBOL_GPL(kernel_stack_pointer);
183 
184 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
185 {
186 	BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
187 	return &regs->bx + (regno >> 2);
188 }
189 
190 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
191 {
192 	/*
193 	 * Returning the value truncates it to 16 bits.
194 	 */
195 	unsigned int retval;
196 	if (offset != offsetof(struct user_regs_struct, gs))
197 		retval = *pt_regs_access(task_pt_regs(task), offset);
198 	else {
199 		if (task == current)
200 			retval = get_user_gs(task_pt_regs(task));
201 		else
202 			retval = task_user_gs(task);
203 	}
204 	return retval;
205 }
206 
207 static int set_segment_reg(struct task_struct *task,
208 			   unsigned long offset, u16 value)
209 {
210 	/*
211 	 * The value argument was already truncated to 16 bits.
212 	 */
213 	if (invalid_selector(value))
214 		return -EIO;
215 
216 	/*
217 	 * For %cs and %ss we cannot permit a null selector.
218 	 * We can permit a bogus selector as long as it has USER_RPL.
219 	 * Null selectors are fine for other segment registers, but
220 	 * we will never get back to user mode with invalid %cs or %ss
221 	 * and will take the trap in iret instead.  Much code relies
222 	 * on user_mode() to distinguish a user trap frame (which can
223 	 * safely use invalid selectors) from a kernel trap frame.
224 	 */
225 	switch (offset) {
226 	case offsetof(struct user_regs_struct, cs):
227 	case offsetof(struct user_regs_struct, ss):
228 		if (unlikely(value == 0))
229 			return -EIO;
230 
231 	default:
232 		*pt_regs_access(task_pt_regs(task), offset) = value;
233 		break;
234 
235 	case offsetof(struct user_regs_struct, gs):
236 		if (task == current)
237 			set_user_gs(task_pt_regs(task), value);
238 		else
239 			task_user_gs(task) = value;
240 	}
241 
242 	return 0;
243 }
244 
245 #else  /* CONFIG_X86_64 */
246 
247 #define FLAG_MASK		(FLAG_MASK_32 | X86_EFLAGS_NT)
248 
249 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
250 {
251 	BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
252 	return &regs->r15 + (offset / sizeof(regs->r15));
253 }
254 
255 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
256 {
257 	/*
258 	 * Returning the value truncates it to 16 bits.
259 	 */
260 	unsigned int seg;
261 
262 	switch (offset) {
263 	case offsetof(struct user_regs_struct, fs):
264 		if (task == current) {
265 			/* Older gas can't assemble movq %?s,%r?? */
266 			asm("movl %%fs,%0" : "=r" (seg));
267 			return seg;
268 		}
269 		return task->thread.fsindex;
270 	case offsetof(struct user_regs_struct, gs):
271 		if (task == current) {
272 			asm("movl %%gs,%0" : "=r" (seg));
273 			return seg;
274 		}
275 		return task->thread.gsindex;
276 	case offsetof(struct user_regs_struct, ds):
277 		if (task == current) {
278 			asm("movl %%ds,%0" : "=r" (seg));
279 			return seg;
280 		}
281 		return task->thread.ds;
282 	case offsetof(struct user_regs_struct, es):
283 		if (task == current) {
284 			asm("movl %%es,%0" : "=r" (seg));
285 			return seg;
286 		}
287 		return task->thread.es;
288 
289 	case offsetof(struct user_regs_struct, cs):
290 	case offsetof(struct user_regs_struct, ss):
291 		break;
292 	}
293 	return *pt_regs_access(task_pt_regs(task), offset);
294 }
295 
296 static int set_segment_reg(struct task_struct *task,
297 			   unsigned long offset, u16 value)
298 {
299 	/*
300 	 * The value argument was already truncated to 16 bits.
301 	 */
302 	if (invalid_selector(value))
303 		return -EIO;
304 
305 	switch (offset) {
306 	case offsetof(struct user_regs_struct,fs):
307 		task->thread.fsindex = value;
308 		if (task == current)
309 			loadsegment(fs, task->thread.fsindex);
310 		break;
311 	case offsetof(struct user_regs_struct,gs):
312 		task->thread.gsindex = value;
313 		if (task == current)
314 			load_gs_index(task->thread.gsindex);
315 		break;
316 	case offsetof(struct user_regs_struct,ds):
317 		task->thread.ds = value;
318 		if (task == current)
319 			loadsegment(ds, task->thread.ds);
320 		break;
321 	case offsetof(struct user_regs_struct,es):
322 		task->thread.es = value;
323 		if (task == current)
324 			loadsegment(es, task->thread.es);
325 		break;
326 
327 		/*
328 		 * Can't actually change these in 64-bit mode.
329 		 */
330 	case offsetof(struct user_regs_struct,cs):
331 		if (unlikely(value == 0))
332 			return -EIO;
333 		task_pt_regs(task)->cs = value;
334 		break;
335 	case offsetof(struct user_regs_struct,ss):
336 		if (unlikely(value == 0))
337 			return -EIO;
338 		task_pt_regs(task)->ss = value;
339 		break;
340 	}
341 
342 	return 0;
343 }
344 
345 #endif	/* CONFIG_X86_32 */
346 
347 static unsigned long get_flags(struct task_struct *task)
348 {
349 	unsigned long retval = task_pt_regs(task)->flags;
350 
351 	/*
352 	 * If the debugger set TF, hide it from the readout.
353 	 */
354 	if (test_tsk_thread_flag(task, TIF_FORCED_TF))
355 		retval &= ~X86_EFLAGS_TF;
356 
357 	return retval;
358 }
359 
360 static int set_flags(struct task_struct *task, unsigned long value)
361 {
362 	struct pt_regs *regs = task_pt_regs(task);
363 
364 	/*
365 	 * If the user value contains TF, mark that
366 	 * it was not "us" (the debugger) that set it.
367 	 * If not, make sure it stays set if we had.
368 	 */
369 	if (value & X86_EFLAGS_TF)
370 		clear_tsk_thread_flag(task, TIF_FORCED_TF);
371 	else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
372 		value |= X86_EFLAGS_TF;
373 
374 	regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
375 
376 	return 0;
377 }
378 
379 static int putreg(struct task_struct *child,
380 		  unsigned long offset, unsigned long value)
381 {
382 	switch (offset) {
383 	case offsetof(struct user_regs_struct, cs):
384 	case offsetof(struct user_regs_struct, ds):
385 	case offsetof(struct user_regs_struct, es):
386 	case offsetof(struct user_regs_struct, fs):
387 	case offsetof(struct user_regs_struct, gs):
388 	case offsetof(struct user_regs_struct, ss):
389 		return set_segment_reg(child, offset, value);
390 
391 	case offsetof(struct user_regs_struct, flags):
392 		return set_flags(child, value);
393 
394 #ifdef CONFIG_X86_64
395 	case offsetof(struct user_regs_struct,fs_base):
396 		if (value >= TASK_SIZE_MAX)
397 			return -EIO;
398 		/*
399 		 * When changing the segment base, use do_arch_prctl_64
400 		 * to set either thread.fs or thread.fsindex and the
401 		 * corresponding GDT slot.
402 		 */
403 		if (child->thread.fsbase != value)
404 			return do_arch_prctl_64(child, ARCH_SET_FS, value);
405 		return 0;
406 	case offsetof(struct user_regs_struct,gs_base):
407 		/*
408 		 * Exactly the same here as the %fs handling above.
409 		 */
410 		if (value >= TASK_SIZE_MAX)
411 			return -EIO;
412 		if (child->thread.gsbase != value)
413 			return do_arch_prctl_64(child, ARCH_SET_GS, value);
414 		return 0;
415 #endif
416 	}
417 
418 	*pt_regs_access(task_pt_regs(child), offset) = value;
419 	return 0;
420 }
421 
422 static unsigned long getreg(struct task_struct *task, unsigned long offset)
423 {
424 	switch (offset) {
425 	case offsetof(struct user_regs_struct, cs):
426 	case offsetof(struct user_regs_struct, ds):
427 	case offsetof(struct user_regs_struct, es):
428 	case offsetof(struct user_regs_struct, fs):
429 	case offsetof(struct user_regs_struct, gs):
430 	case offsetof(struct user_regs_struct, ss):
431 		return get_segment_reg(task, offset);
432 
433 	case offsetof(struct user_regs_struct, flags):
434 		return get_flags(task);
435 
436 #ifdef CONFIG_X86_64
437 	case offsetof(struct user_regs_struct, fs_base): {
438 		/*
439 		 * XXX: This will not behave as expected if called on
440 		 * current or if fsindex != 0.
441 		 */
442 		return task->thread.fsbase;
443 	}
444 	case offsetof(struct user_regs_struct, gs_base): {
445 		/*
446 		 * XXX: This will not behave as expected if called on
447 		 * current or if fsindex != 0.
448 		 */
449 		return task->thread.gsbase;
450 	}
451 #endif
452 	}
453 
454 	return *pt_regs_access(task_pt_regs(task), offset);
455 }
456 
457 static int genregs_get(struct task_struct *target,
458 		       const struct user_regset *regset,
459 		       unsigned int pos, unsigned int count,
460 		       void *kbuf, void __user *ubuf)
461 {
462 	if (kbuf) {
463 		unsigned long *k = kbuf;
464 		while (count >= sizeof(*k)) {
465 			*k++ = getreg(target, pos);
466 			count -= sizeof(*k);
467 			pos += sizeof(*k);
468 		}
469 	} else {
470 		unsigned long __user *u = ubuf;
471 		while (count >= sizeof(*u)) {
472 			if (__put_user(getreg(target, pos), u++))
473 				return -EFAULT;
474 			count -= sizeof(*u);
475 			pos += sizeof(*u);
476 		}
477 	}
478 
479 	return 0;
480 }
481 
482 static int genregs_set(struct task_struct *target,
483 		       const struct user_regset *regset,
484 		       unsigned int pos, unsigned int count,
485 		       const void *kbuf, const void __user *ubuf)
486 {
487 	int ret = 0;
488 	if (kbuf) {
489 		const unsigned long *k = kbuf;
490 		while (count >= sizeof(*k) && !ret) {
491 			ret = putreg(target, pos, *k++);
492 			count -= sizeof(*k);
493 			pos += sizeof(*k);
494 		}
495 	} else {
496 		const unsigned long  __user *u = ubuf;
497 		while (count >= sizeof(*u) && !ret) {
498 			unsigned long word;
499 			ret = __get_user(word, u++);
500 			if (ret)
501 				break;
502 			ret = putreg(target, pos, word);
503 			count -= sizeof(*u);
504 			pos += sizeof(*u);
505 		}
506 	}
507 	return ret;
508 }
509 
510 static void ptrace_triggered(struct perf_event *bp,
511 			     struct perf_sample_data *data,
512 			     struct pt_regs *regs)
513 {
514 	int i;
515 	struct thread_struct *thread = &(current->thread);
516 
517 	/*
518 	 * Store in the virtual DR6 register the fact that the breakpoint
519 	 * was hit so the thread's debugger will see it.
520 	 */
521 	for (i = 0; i < HBP_NUM; i++) {
522 		if (thread->ptrace_bps[i] == bp)
523 			break;
524 	}
525 
526 	thread->debugreg6 |= (DR_TRAP0 << i);
527 }
528 
529 /*
530  * Walk through every ptrace breakpoints for this thread and
531  * build the dr7 value on top of their attributes.
532  *
533  */
534 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
535 {
536 	int i;
537 	int dr7 = 0;
538 	struct arch_hw_breakpoint *info;
539 
540 	for (i = 0; i < HBP_NUM; i++) {
541 		if (bp[i] && !bp[i]->attr.disabled) {
542 			info = counter_arch_bp(bp[i]);
543 			dr7 |= encode_dr7(i, info->len, info->type);
544 		}
545 	}
546 
547 	return dr7;
548 }
549 
550 static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
551 					int len, int type, bool disabled)
552 {
553 	int err, bp_len, bp_type;
554 
555 	err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
556 	if (!err) {
557 		attr->bp_len = bp_len;
558 		attr->bp_type = bp_type;
559 		attr->disabled = disabled;
560 	}
561 
562 	return err;
563 }
564 
565 static struct perf_event *
566 ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
567 				unsigned long addr, bool disabled)
568 {
569 	struct perf_event_attr attr;
570 	int err;
571 
572 	ptrace_breakpoint_init(&attr);
573 	attr.bp_addr = addr;
574 
575 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
576 	if (err)
577 		return ERR_PTR(err);
578 
579 	return register_user_hw_breakpoint(&attr, ptrace_triggered,
580 						 NULL, tsk);
581 }
582 
583 static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
584 					int disabled)
585 {
586 	struct perf_event_attr attr = bp->attr;
587 	int err;
588 
589 	err = ptrace_fill_bp_fields(&attr, len, type, disabled);
590 	if (err)
591 		return err;
592 
593 	return modify_user_hw_breakpoint(bp, &attr);
594 }
595 
596 /*
597  * Handle ptrace writes to debug register 7.
598  */
599 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
600 {
601 	struct thread_struct *thread = &tsk->thread;
602 	unsigned long old_dr7;
603 	bool second_pass = false;
604 	int i, rc, ret = 0;
605 
606 	data &= ~DR_CONTROL_RESERVED;
607 	old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
608 
609 restore:
610 	rc = 0;
611 	for (i = 0; i < HBP_NUM; i++) {
612 		unsigned len, type;
613 		bool disabled = !decode_dr7(data, i, &len, &type);
614 		struct perf_event *bp = thread->ptrace_bps[i];
615 
616 		if (!bp) {
617 			if (disabled)
618 				continue;
619 
620 			bp = ptrace_register_breakpoint(tsk,
621 					len, type, 0, disabled);
622 			if (IS_ERR(bp)) {
623 				rc = PTR_ERR(bp);
624 				break;
625 			}
626 
627 			thread->ptrace_bps[i] = bp;
628 			continue;
629 		}
630 
631 		rc = ptrace_modify_breakpoint(bp, len, type, disabled);
632 		if (rc)
633 			break;
634 	}
635 
636 	/* Restore if the first pass failed, second_pass shouldn't fail. */
637 	if (rc && !WARN_ON(second_pass)) {
638 		ret = rc;
639 		data = old_dr7;
640 		second_pass = true;
641 		goto restore;
642 	}
643 
644 	return ret;
645 }
646 
647 /*
648  * Handle PTRACE_PEEKUSR calls for the debug register area.
649  */
650 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
651 {
652 	struct thread_struct *thread = &tsk->thread;
653 	unsigned long val = 0;
654 
655 	if (n < HBP_NUM) {
656 		struct perf_event *bp = thread->ptrace_bps[n];
657 
658 		if (bp)
659 			val = bp->hw.info.address;
660 	} else if (n == 6) {
661 		val = thread->debugreg6;
662 	} else if (n == 7) {
663 		val = thread->ptrace_dr7;
664 	}
665 	return val;
666 }
667 
668 static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
669 				      unsigned long addr)
670 {
671 	struct thread_struct *t = &tsk->thread;
672 	struct perf_event *bp = t->ptrace_bps[nr];
673 	int err = 0;
674 
675 	if (!bp) {
676 		/*
677 		 * Put stub len and type to create an inactive but correct bp.
678 		 *
679 		 * CHECKME: the previous code returned -EIO if the addr wasn't
680 		 * a valid task virtual addr. The new one will return -EINVAL in
681 		 *  this case.
682 		 * -EINVAL may be what we want for in-kernel breakpoints users,
683 		 * but -EIO looks better for ptrace, since we refuse a register
684 		 * writing for the user. And anyway this is the previous
685 		 * behaviour.
686 		 */
687 		bp = ptrace_register_breakpoint(tsk,
688 				X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
689 				addr, true);
690 		if (IS_ERR(bp))
691 			err = PTR_ERR(bp);
692 		else
693 			t->ptrace_bps[nr] = bp;
694 	} else {
695 		struct perf_event_attr attr = bp->attr;
696 
697 		attr.bp_addr = addr;
698 		err = modify_user_hw_breakpoint(bp, &attr);
699 	}
700 
701 	return err;
702 }
703 
704 /*
705  * Handle PTRACE_POKEUSR calls for the debug register area.
706  */
707 static int ptrace_set_debugreg(struct task_struct *tsk, int n,
708 			       unsigned long val)
709 {
710 	struct thread_struct *thread = &tsk->thread;
711 	/* There are no DR4 or DR5 registers */
712 	int rc = -EIO;
713 
714 	if (n < HBP_NUM) {
715 		rc = ptrace_set_breakpoint_addr(tsk, n, val);
716 	} else if (n == 6) {
717 		thread->debugreg6 = val;
718 		rc = 0;
719 	} else if (n == 7) {
720 		rc = ptrace_write_dr7(tsk, val);
721 		if (!rc)
722 			thread->ptrace_dr7 = val;
723 	}
724 	return rc;
725 }
726 
727 /*
728  * These access the current or another (stopped) task's io permission
729  * bitmap for debugging or core dump.
730  */
731 static int ioperm_active(struct task_struct *target,
732 			 const struct user_regset *regset)
733 {
734 	return target->thread.io_bitmap_max / regset->size;
735 }
736 
737 static int ioperm_get(struct task_struct *target,
738 		      const struct user_regset *regset,
739 		      unsigned int pos, unsigned int count,
740 		      void *kbuf, void __user *ubuf)
741 {
742 	if (!target->thread.io_bitmap_ptr)
743 		return -ENXIO;
744 
745 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
746 				   target->thread.io_bitmap_ptr,
747 				   0, IO_BITMAP_BYTES);
748 }
749 
750 /*
751  * Called by kernel/ptrace.c when detaching..
752  *
753  * Make sure the single step bit is not set.
754  */
755 void ptrace_disable(struct task_struct *child)
756 {
757 	user_disable_single_step(child);
758 #ifdef TIF_SYSCALL_EMU
759 	clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
760 #endif
761 }
762 
763 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
764 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
765 #endif
766 
767 long arch_ptrace(struct task_struct *child, long request,
768 		 unsigned long addr, unsigned long data)
769 {
770 	int ret;
771 	unsigned long __user *datap = (unsigned long __user *)data;
772 
773 	switch (request) {
774 	/* read the word at location addr in the USER area. */
775 	case PTRACE_PEEKUSR: {
776 		unsigned long tmp;
777 
778 		ret = -EIO;
779 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
780 			break;
781 
782 		tmp = 0;  /* Default return condition */
783 		if (addr < sizeof(struct user_regs_struct))
784 			tmp = getreg(child, addr);
785 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
786 			 addr <= offsetof(struct user, u_debugreg[7])) {
787 			addr -= offsetof(struct user, u_debugreg[0]);
788 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
789 		}
790 		ret = put_user(tmp, datap);
791 		break;
792 	}
793 
794 	case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
795 		ret = -EIO;
796 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
797 			break;
798 
799 		if (addr < sizeof(struct user_regs_struct))
800 			ret = putreg(child, addr, data);
801 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
802 			 addr <= offsetof(struct user, u_debugreg[7])) {
803 			addr -= offsetof(struct user, u_debugreg[0]);
804 			ret = ptrace_set_debugreg(child,
805 						  addr / sizeof(data), data);
806 		}
807 		break;
808 
809 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
810 		return copy_regset_to_user(child,
811 					   task_user_regset_view(current),
812 					   REGSET_GENERAL,
813 					   0, sizeof(struct user_regs_struct),
814 					   datap);
815 
816 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
817 		return copy_regset_from_user(child,
818 					     task_user_regset_view(current),
819 					     REGSET_GENERAL,
820 					     0, sizeof(struct user_regs_struct),
821 					     datap);
822 
823 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
824 		return copy_regset_to_user(child,
825 					   task_user_regset_view(current),
826 					   REGSET_FP,
827 					   0, sizeof(struct user_i387_struct),
828 					   datap);
829 
830 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
831 		return copy_regset_from_user(child,
832 					     task_user_regset_view(current),
833 					     REGSET_FP,
834 					     0, sizeof(struct user_i387_struct),
835 					     datap);
836 
837 #ifdef CONFIG_X86_32
838 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
839 		return copy_regset_to_user(child, &user_x86_32_view,
840 					   REGSET_XFP,
841 					   0, sizeof(struct user_fxsr_struct),
842 					   datap) ? -EIO : 0;
843 
844 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
845 		return copy_regset_from_user(child, &user_x86_32_view,
846 					     REGSET_XFP,
847 					     0, sizeof(struct user_fxsr_struct),
848 					     datap) ? -EIO : 0;
849 #endif
850 
851 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
852 	case PTRACE_GET_THREAD_AREA:
853 		if ((int) addr < 0)
854 			return -EIO;
855 		ret = do_get_thread_area(child, addr,
856 					(struct user_desc __user *)data);
857 		break;
858 
859 	case PTRACE_SET_THREAD_AREA:
860 		if ((int) addr < 0)
861 			return -EIO;
862 		ret = do_set_thread_area(child, addr,
863 					(struct user_desc __user *)data, 0);
864 		break;
865 #endif
866 
867 #ifdef CONFIG_X86_64
868 		/* normal 64bit interface to access TLS data.
869 		   Works just like arch_prctl, except that the arguments
870 		   are reversed. */
871 	case PTRACE_ARCH_PRCTL:
872 		ret = do_arch_prctl_64(child, data, addr);
873 		break;
874 #endif
875 
876 	default:
877 		ret = ptrace_request(child, request, addr, data);
878 		break;
879 	}
880 
881 	return ret;
882 }
883 
884 #ifdef CONFIG_IA32_EMULATION
885 
886 #include <linux/compat.h>
887 #include <linux/syscalls.h>
888 #include <asm/ia32.h>
889 #include <asm/user32.h>
890 
891 #define R32(l,q)							\
892 	case offsetof(struct user32, regs.l):				\
893 		regs->q = value; break
894 
895 #define SEG32(rs)							\
896 	case offsetof(struct user32, regs.rs):				\
897 		return set_segment_reg(child,				\
898 				       offsetof(struct user_regs_struct, rs), \
899 				       value);				\
900 		break
901 
902 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
903 {
904 	struct pt_regs *regs = task_pt_regs(child);
905 
906 	switch (regno) {
907 
908 	SEG32(cs);
909 	SEG32(ds);
910 	SEG32(es);
911 	SEG32(fs);
912 	SEG32(gs);
913 	SEG32(ss);
914 
915 	R32(ebx, bx);
916 	R32(ecx, cx);
917 	R32(edx, dx);
918 	R32(edi, di);
919 	R32(esi, si);
920 	R32(ebp, bp);
921 	R32(eax, ax);
922 	R32(eip, ip);
923 	R32(esp, sp);
924 
925 	case offsetof(struct user32, regs.orig_eax):
926 		/*
927 		 * Warning: bizarre corner case fixup here.  A 32-bit
928 		 * debugger setting orig_eax to -1 wants to disable
929 		 * syscall restart.  Make sure that the syscall
930 		 * restart code sign-extends orig_ax.  Also make sure
931 		 * we interpret the -ERESTART* codes correctly if
932 		 * loaded into regs->ax in case the task is not
933 		 * actually still sitting at the exit from a 32-bit
934 		 * syscall with TS_COMPAT still set.
935 		 */
936 		regs->orig_ax = value;
937 		if (syscall_get_nr(child, regs) >= 0)
938 			child->thread_info.status |= TS_I386_REGS_POKED;
939 		break;
940 
941 	case offsetof(struct user32, regs.eflags):
942 		return set_flags(child, value);
943 
944 	case offsetof(struct user32, u_debugreg[0]) ...
945 		offsetof(struct user32, u_debugreg[7]):
946 		regno -= offsetof(struct user32, u_debugreg[0]);
947 		return ptrace_set_debugreg(child, regno / 4, value);
948 
949 	default:
950 		if (regno > sizeof(struct user32) || (regno & 3))
951 			return -EIO;
952 
953 		/*
954 		 * Other dummy fields in the virtual user structure
955 		 * are ignored
956 		 */
957 		break;
958 	}
959 	return 0;
960 }
961 
962 #undef R32
963 #undef SEG32
964 
965 #define R32(l,q)							\
966 	case offsetof(struct user32, regs.l):				\
967 		*val = regs->q; break
968 
969 #define SEG32(rs)							\
970 	case offsetof(struct user32, regs.rs):				\
971 		*val = get_segment_reg(child,				\
972 				       offsetof(struct user_regs_struct, rs)); \
973 		break
974 
975 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
976 {
977 	struct pt_regs *regs = task_pt_regs(child);
978 
979 	switch (regno) {
980 
981 	SEG32(ds);
982 	SEG32(es);
983 	SEG32(fs);
984 	SEG32(gs);
985 
986 	R32(cs, cs);
987 	R32(ss, ss);
988 	R32(ebx, bx);
989 	R32(ecx, cx);
990 	R32(edx, dx);
991 	R32(edi, di);
992 	R32(esi, si);
993 	R32(ebp, bp);
994 	R32(eax, ax);
995 	R32(orig_eax, orig_ax);
996 	R32(eip, ip);
997 	R32(esp, sp);
998 
999 	case offsetof(struct user32, regs.eflags):
1000 		*val = get_flags(child);
1001 		break;
1002 
1003 	case offsetof(struct user32, u_debugreg[0]) ...
1004 		offsetof(struct user32, u_debugreg[7]):
1005 		regno -= offsetof(struct user32, u_debugreg[0]);
1006 		*val = ptrace_get_debugreg(child, regno / 4);
1007 		break;
1008 
1009 	default:
1010 		if (regno > sizeof(struct user32) || (regno & 3))
1011 			return -EIO;
1012 
1013 		/*
1014 		 * Other dummy fields in the virtual user structure
1015 		 * are ignored
1016 		 */
1017 		*val = 0;
1018 		break;
1019 	}
1020 	return 0;
1021 }
1022 
1023 #undef R32
1024 #undef SEG32
1025 
1026 static int genregs32_get(struct task_struct *target,
1027 			 const struct user_regset *regset,
1028 			 unsigned int pos, unsigned int count,
1029 			 void *kbuf, void __user *ubuf)
1030 {
1031 	if (kbuf) {
1032 		compat_ulong_t *k = kbuf;
1033 		while (count >= sizeof(*k)) {
1034 			getreg32(target, pos, k++);
1035 			count -= sizeof(*k);
1036 			pos += sizeof(*k);
1037 		}
1038 	} else {
1039 		compat_ulong_t __user *u = ubuf;
1040 		while (count >= sizeof(*u)) {
1041 			compat_ulong_t word;
1042 			getreg32(target, pos, &word);
1043 			if (__put_user(word, u++))
1044 				return -EFAULT;
1045 			count -= sizeof(*u);
1046 			pos += sizeof(*u);
1047 		}
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static int genregs32_set(struct task_struct *target,
1054 			 const struct user_regset *regset,
1055 			 unsigned int pos, unsigned int count,
1056 			 const void *kbuf, const void __user *ubuf)
1057 {
1058 	int ret = 0;
1059 	if (kbuf) {
1060 		const compat_ulong_t *k = kbuf;
1061 		while (count >= sizeof(*k) && !ret) {
1062 			ret = putreg32(target, pos, *k++);
1063 			count -= sizeof(*k);
1064 			pos += sizeof(*k);
1065 		}
1066 	} else {
1067 		const compat_ulong_t __user *u = ubuf;
1068 		while (count >= sizeof(*u) && !ret) {
1069 			compat_ulong_t word;
1070 			ret = __get_user(word, u++);
1071 			if (ret)
1072 				break;
1073 			ret = putreg32(target, pos, word);
1074 			count -= sizeof(*u);
1075 			pos += sizeof(*u);
1076 		}
1077 	}
1078 	return ret;
1079 }
1080 
1081 static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1082 			     compat_ulong_t caddr, compat_ulong_t cdata)
1083 {
1084 	unsigned long addr = caddr;
1085 	unsigned long data = cdata;
1086 	void __user *datap = compat_ptr(data);
1087 	int ret;
1088 	__u32 val;
1089 
1090 	switch (request) {
1091 	case PTRACE_PEEKUSR:
1092 		ret = getreg32(child, addr, &val);
1093 		if (ret == 0)
1094 			ret = put_user(val, (__u32 __user *)datap);
1095 		break;
1096 
1097 	case PTRACE_POKEUSR:
1098 		ret = putreg32(child, addr, data);
1099 		break;
1100 
1101 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1102 		return copy_regset_to_user(child, &user_x86_32_view,
1103 					   REGSET_GENERAL,
1104 					   0, sizeof(struct user_regs_struct32),
1105 					   datap);
1106 
1107 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1108 		return copy_regset_from_user(child, &user_x86_32_view,
1109 					     REGSET_GENERAL, 0,
1110 					     sizeof(struct user_regs_struct32),
1111 					     datap);
1112 
1113 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1114 		return copy_regset_to_user(child, &user_x86_32_view,
1115 					   REGSET_FP, 0,
1116 					   sizeof(struct user_i387_ia32_struct),
1117 					   datap);
1118 
1119 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1120 		return copy_regset_from_user(
1121 			child, &user_x86_32_view, REGSET_FP,
1122 			0, sizeof(struct user_i387_ia32_struct), datap);
1123 
1124 	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
1125 		return copy_regset_to_user(child, &user_x86_32_view,
1126 					   REGSET_XFP, 0,
1127 					   sizeof(struct user32_fxsr_struct),
1128 					   datap);
1129 
1130 	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
1131 		return copy_regset_from_user(child, &user_x86_32_view,
1132 					     REGSET_XFP, 0,
1133 					     sizeof(struct user32_fxsr_struct),
1134 					     datap);
1135 
1136 	case PTRACE_GET_THREAD_AREA:
1137 	case PTRACE_SET_THREAD_AREA:
1138 		return arch_ptrace(child, request, addr, data);
1139 
1140 	default:
1141 		return compat_ptrace_request(child, request, addr, data);
1142 	}
1143 
1144 	return ret;
1145 }
1146 #endif /* CONFIG_IA32_EMULATION */
1147 
1148 #ifdef CONFIG_X86_X32_ABI
1149 static long x32_arch_ptrace(struct task_struct *child,
1150 			    compat_long_t request, compat_ulong_t caddr,
1151 			    compat_ulong_t cdata)
1152 {
1153 	unsigned long addr = caddr;
1154 	unsigned long data = cdata;
1155 	void __user *datap = compat_ptr(data);
1156 	int ret;
1157 
1158 	switch (request) {
1159 	/* Read 32bits at location addr in the USER area.  Only allow
1160 	   to return the lower 32bits of segment and debug registers.  */
1161 	case PTRACE_PEEKUSR: {
1162 		u32 tmp;
1163 
1164 		ret = -EIO;
1165 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1166 		    addr < offsetof(struct user_regs_struct, cs))
1167 			break;
1168 
1169 		tmp = 0;  /* Default return condition */
1170 		if (addr < sizeof(struct user_regs_struct))
1171 			tmp = getreg(child, addr);
1172 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1173 			 addr <= offsetof(struct user, u_debugreg[7])) {
1174 			addr -= offsetof(struct user, u_debugreg[0]);
1175 			tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1176 		}
1177 		ret = put_user(tmp, (__u32 __user *)datap);
1178 		break;
1179 	}
1180 
1181 	/* Write the word at location addr in the USER area.  Only allow
1182 	   to update segment and debug registers with the upper 32bits
1183 	   zero-extended. */
1184 	case PTRACE_POKEUSR:
1185 		ret = -EIO;
1186 		if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1187 		    addr < offsetof(struct user_regs_struct, cs))
1188 			break;
1189 
1190 		if (addr < sizeof(struct user_regs_struct))
1191 			ret = putreg(child, addr, data);
1192 		else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1193 			 addr <= offsetof(struct user, u_debugreg[7])) {
1194 			addr -= offsetof(struct user, u_debugreg[0]);
1195 			ret = ptrace_set_debugreg(child,
1196 						  addr / sizeof(data), data);
1197 		}
1198 		break;
1199 
1200 	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
1201 		return copy_regset_to_user(child,
1202 					   task_user_regset_view(current),
1203 					   REGSET_GENERAL,
1204 					   0, sizeof(struct user_regs_struct),
1205 					   datap);
1206 
1207 	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1208 		return copy_regset_from_user(child,
1209 					     task_user_regset_view(current),
1210 					     REGSET_GENERAL,
1211 					     0, sizeof(struct user_regs_struct),
1212 					     datap);
1213 
1214 	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
1215 		return copy_regset_to_user(child,
1216 					   task_user_regset_view(current),
1217 					   REGSET_FP,
1218 					   0, sizeof(struct user_i387_struct),
1219 					   datap);
1220 
1221 	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
1222 		return copy_regset_from_user(child,
1223 					     task_user_regset_view(current),
1224 					     REGSET_FP,
1225 					     0, sizeof(struct user_i387_struct),
1226 					     datap);
1227 
1228 	default:
1229 		return compat_ptrace_request(child, request, addr, data);
1230 	}
1231 
1232 	return ret;
1233 }
1234 #endif
1235 
1236 #ifdef CONFIG_COMPAT
1237 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1238 			compat_ulong_t caddr, compat_ulong_t cdata)
1239 {
1240 #ifdef CONFIG_X86_X32_ABI
1241 	if (!in_ia32_syscall())
1242 		return x32_arch_ptrace(child, request, caddr, cdata);
1243 #endif
1244 #ifdef CONFIG_IA32_EMULATION
1245 	return ia32_arch_ptrace(child, request, caddr, cdata);
1246 #else
1247 	return 0;
1248 #endif
1249 }
1250 #endif	/* CONFIG_COMPAT */
1251 
1252 #ifdef CONFIG_X86_64
1253 
1254 static struct user_regset x86_64_regsets[] __ro_after_init = {
1255 	[REGSET_GENERAL] = {
1256 		.core_note_type = NT_PRSTATUS,
1257 		.n = sizeof(struct user_regs_struct) / sizeof(long),
1258 		.size = sizeof(long), .align = sizeof(long),
1259 		.get = genregs_get, .set = genregs_set
1260 	},
1261 	[REGSET_FP] = {
1262 		.core_note_type = NT_PRFPREG,
1263 		.n = sizeof(struct user_i387_struct) / sizeof(long),
1264 		.size = sizeof(long), .align = sizeof(long),
1265 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1266 	},
1267 	[REGSET_XSTATE] = {
1268 		.core_note_type = NT_X86_XSTATE,
1269 		.size = sizeof(u64), .align = sizeof(u64),
1270 		.active = xstateregs_active, .get = xstateregs_get,
1271 		.set = xstateregs_set
1272 	},
1273 	[REGSET_IOPERM64] = {
1274 		.core_note_type = NT_386_IOPERM,
1275 		.n = IO_BITMAP_LONGS,
1276 		.size = sizeof(long), .align = sizeof(long),
1277 		.active = ioperm_active, .get = ioperm_get
1278 	},
1279 };
1280 
1281 static const struct user_regset_view user_x86_64_view = {
1282 	.name = "x86_64", .e_machine = EM_X86_64,
1283 	.regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1284 };
1285 
1286 #else  /* CONFIG_X86_32 */
1287 
1288 #define user_regs_struct32	user_regs_struct
1289 #define genregs32_get		genregs_get
1290 #define genregs32_set		genregs_set
1291 
1292 #endif	/* CONFIG_X86_64 */
1293 
1294 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1295 static struct user_regset x86_32_regsets[] __ro_after_init = {
1296 	[REGSET_GENERAL] = {
1297 		.core_note_type = NT_PRSTATUS,
1298 		.n = sizeof(struct user_regs_struct32) / sizeof(u32),
1299 		.size = sizeof(u32), .align = sizeof(u32),
1300 		.get = genregs32_get, .set = genregs32_set
1301 	},
1302 	[REGSET_FP] = {
1303 		.core_note_type = NT_PRFPREG,
1304 		.n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1305 		.size = sizeof(u32), .align = sizeof(u32),
1306 		.active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
1307 	},
1308 	[REGSET_XFP] = {
1309 		.core_note_type = NT_PRXFPREG,
1310 		.n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1311 		.size = sizeof(u32), .align = sizeof(u32),
1312 		.active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1313 	},
1314 	[REGSET_XSTATE] = {
1315 		.core_note_type = NT_X86_XSTATE,
1316 		.size = sizeof(u64), .align = sizeof(u64),
1317 		.active = xstateregs_active, .get = xstateregs_get,
1318 		.set = xstateregs_set
1319 	},
1320 	[REGSET_TLS] = {
1321 		.core_note_type = NT_386_TLS,
1322 		.n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1323 		.size = sizeof(struct user_desc),
1324 		.align = sizeof(struct user_desc),
1325 		.active = regset_tls_active,
1326 		.get = regset_tls_get, .set = regset_tls_set
1327 	},
1328 	[REGSET_IOPERM32] = {
1329 		.core_note_type = NT_386_IOPERM,
1330 		.n = IO_BITMAP_BYTES / sizeof(u32),
1331 		.size = sizeof(u32), .align = sizeof(u32),
1332 		.active = ioperm_active, .get = ioperm_get
1333 	},
1334 };
1335 
1336 static const struct user_regset_view user_x86_32_view = {
1337 	.name = "i386", .e_machine = EM_386,
1338 	.regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1339 };
1340 #endif
1341 
1342 /*
1343  * This represents bytes 464..511 in the memory layout exported through
1344  * the REGSET_XSTATE interface.
1345  */
1346 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1347 
1348 void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1349 {
1350 #ifdef CONFIG_X86_64
1351 	x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1352 #endif
1353 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1354 	x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1355 #endif
1356 	xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1357 }
1358 
1359 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1360 {
1361 #ifdef CONFIG_IA32_EMULATION
1362 	if (!user_64bit_mode(task_pt_regs(task)))
1363 #endif
1364 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1365 		return &user_x86_32_view;
1366 #endif
1367 #ifdef CONFIG_X86_64
1368 	return &user_x86_64_view;
1369 #endif
1370 }
1371 
1372 static void fill_sigtrap_info(struct task_struct *tsk,
1373 				struct pt_regs *regs,
1374 				int error_code, int si_code,
1375 				struct siginfo *info)
1376 {
1377 	tsk->thread.trap_nr = X86_TRAP_DB;
1378 	tsk->thread.error_code = error_code;
1379 
1380 	info->si_signo = SIGTRAP;
1381 	info->si_code = si_code;
1382 	info->si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
1383 }
1384 
1385 void user_single_step_siginfo(struct task_struct *tsk,
1386 				struct pt_regs *regs,
1387 				struct siginfo *info)
1388 {
1389 	fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
1390 }
1391 
1392 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1393 					 int error_code, int si_code)
1394 {
1395 	struct siginfo info;
1396 
1397 	clear_siginfo(&info);
1398 	fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
1399 	/* Send us the fake SIGTRAP */
1400 	force_sig_info(SIGTRAP, &info, tsk);
1401 }
1402