xref: /openbmc/linux/arch/ia64/kernel/ptrace.c (revision e8e0929d)
1 /*
2  * Kernel support for the ptrace() and syscall tracing interfaces.
3  *
4  * Copyright (C) 1999-2005 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  * Copyright (C) 2006 Intel Co
7  *  2006-08-12	- IA64 Native Utrace implementation support added by
8  *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *
10  * Derived from the x86 and Alpha versions.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/signal.h>
22 #include <linux/regset.h>
23 #include <linux/elf.h>
24 #include <linux/tracehook.h>
25 
26 #include <asm/pgtable.h>
27 #include <asm/processor.h>
28 #include <asm/ptrace_offsets.h>
29 #include <asm/rse.h>
30 #include <asm/system.h>
31 #include <asm/uaccess.h>
32 #include <asm/unwind.h>
33 #ifdef CONFIG_PERFMON
34 #include <asm/perfmon.h>
35 #endif
36 
37 #include "entry.h"
38 
39 /*
40  * Bits in the PSR that we allow ptrace() to change:
41  *	be, up, ac, mfl, mfh (the user mask; five bits total)
42  *	db (debug breakpoint fault; one bit)
43  *	id (instruction debug fault disable; one bit)
44  *	dd (data debug fault disable; one bit)
45  *	ri (restart instruction; two bits)
46  *	is (instruction set; one bit)
47  */
48 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
49 		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
50 
51 #define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
52 #define PFM_MASK	MASK(38)
53 
54 #define PTRACE_DEBUG	0
55 
56 #if PTRACE_DEBUG
57 # define dprintk(format...)	printk(format)
58 # define inline
59 #else
60 # define dprintk(format...)
61 #endif
62 
63 /* Return TRUE if PT was created due to kernel-entry via a system-call.  */
64 
65 static inline int
66 in_syscall (struct pt_regs *pt)
67 {
68 	return (long) pt->cr_ifs >= 0;
69 }
70 
71 /*
72  * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
73  * bitset where bit i is set iff the NaT bit of register i is set.
74  */
75 unsigned long
76 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
77 {
78 #	define GET_BITS(first, last, unat)				\
79 	({								\
80 		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
81 		unsigned long nbits = (last - first + 1);		\
82 		unsigned long mask = MASK(nbits) << first;		\
83 		unsigned long dist;					\
84 		if (bit < first)					\
85 			dist = 64 + bit - first;			\
86 		else							\
87 			dist = bit - first;				\
88 		ia64_rotr(unat, dist) & mask;				\
89 	})
90 	unsigned long val;
91 
92 	/*
93 	 * Registers that are stored consecutively in struct pt_regs
94 	 * can be handled in parallel.  If the register order in
95 	 * struct_pt_regs changes, this code MUST be updated.
96 	 */
97 	val  = GET_BITS( 1,  1, scratch_unat);
98 	val |= GET_BITS( 2,  3, scratch_unat);
99 	val |= GET_BITS(12, 13, scratch_unat);
100 	val |= GET_BITS(14, 14, scratch_unat);
101 	val |= GET_BITS(15, 15, scratch_unat);
102 	val |= GET_BITS( 8, 11, scratch_unat);
103 	val |= GET_BITS(16, 31, scratch_unat);
104 	return val;
105 
106 #	undef GET_BITS
107 }
108 
109 /*
110  * Set the NaT bits for the scratch registers according to NAT and
111  * return the resulting unat (assuming the scratch registers are
112  * stored in PT).
113  */
114 unsigned long
115 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
116 {
117 #	define PUT_BITS(first, last, nat)				\
118 	({								\
119 		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
120 		unsigned long nbits = (last - first + 1);		\
121 		unsigned long mask = MASK(nbits) << first;		\
122 		long dist;						\
123 		if (bit < first)					\
124 			dist = 64 + bit - first;			\
125 		else							\
126 			dist = bit - first;				\
127 		ia64_rotl(nat & mask, dist);				\
128 	})
129 	unsigned long scratch_unat;
130 
131 	/*
132 	 * Registers that are stored consecutively in struct pt_regs
133 	 * can be handled in parallel.  If the register order in
134 	 * struct_pt_regs changes, this code MUST be updated.
135 	 */
136 	scratch_unat  = PUT_BITS( 1,  1, nat);
137 	scratch_unat |= PUT_BITS( 2,  3, nat);
138 	scratch_unat |= PUT_BITS(12, 13, nat);
139 	scratch_unat |= PUT_BITS(14, 14, nat);
140 	scratch_unat |= PUT_BITS(15, 15, nat);
141 	scratch_unat |= PUT_BITS( 8, 11, nat);
142 	scratch_unat |= PUT_BITS(16, 31, nat);
143 
144 	return scratch_unat;
145 
146 #	undef PUT_BITS
147 }
148 
149 #define IA64_MLX_TEMPLATE	0x2
150 #define IA64_MOVL_OPCODE	6
151 
152 void
153 ia64_increment_ip (struct pt_regs *regs)
154 {
155 	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
156 
157 	if (ri > 2) {
158 		ri = 0;
159 		regs->cr_iip += 16;
160 	} else if (ri == 2) {
161 		get_user(w0, (char __user *) regs->cr_iip + 0);
162 		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
163 			/*
164 			 * rfi'ing to slot 2 of an MLX bundle causes
165 			 * an illegal operation fault.  We don't want
166 			 * that to happen...
167 			 */
168 			ri = 0;
169 			regs->cr_iip += 16;
170 		}
171 	}
172 	ia64_psr(regs)->ri = ri;
173 }
174 
175 void
176 ia64_decrement_ip (struct pt_regs *regs)
177 {
178 	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
179 
180 	if (ia64_psr(regs)->ri == 0) {
181 		regs->cr_iip -= 16;
182 		ri = 2;
183 		get_user(w0, (char __user *) regs->cr_iip + 0);
184 		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
185 			/*
186 			 * rfi'ing to slot 2 of an MLX bundle causes
187 			 * an illegal operation fault.  We don't want
188 			 * that to happen...
189 			 */
190 			ri = 1;
191 		}
192 	}
193 	ia64_psr(regs)->ri = ri;
194 }
195 
196 /*
197  * This routine is used to read an rnat bits that are stored on the
198  * kernel backing store.  Since, in general, the alignment of the user
199  * and kernel are different, this is not completely trivial.  In
200  * essence, we need to construct the user RNAT based on up to two
201  * kernel RNAT values and/or the RNAT value saved in the child's
202  * pt_regs.
203  *
204  * user rbs
205  *
206  * +--------+ <-- lowest address
207  * | slot62 |
208  * +--------+
209  * |  rnat  | 0x....1f8
210  * +--------+
211  * | slot00 | \
212  * +--------+ |
213  * | slot01 | > child_regs->ar_rnat
214  * +--------+ |
215  * | slot02 | /				kernel rbs
216  * +--------+				+--------+
217  *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
218  * +- - - - +				+--------+
219  *					| slot62 |
220  * +- - - - +				+--------+
221  *					|  rnat	 |
222  * +- - - - +				+--------+
223  *   vrnat				| slot00 |
224  * +- - - - +				+--------+
225  *					=	 =
226  *					+--------+
227  *					| slot00 | \
228  *					+--------+ |
229  *					| slot01 | > child_stack->ar_rnat
230  *					+--------+ |
231  *					| slot02 | /
232  *					+--------+
233  *						  <--- child_stack->ar_bspstore
234  *
235  * The way to think of this code is as follows: bit 0 in the user rnat
236  * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
237  * value.  The kernel rnat value holding this bit is stored in
238  * variable rnat0.  rnat1 is loaded with the kernel rnat value that
239  * form the upper bits of the user rnat value.
240  *
241  * Boundary cases:
242  *
243  * o when reading the rnat "below" the first rnat slot on the kernel
244  *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
245  *   merged in from pt->ar_rnat.
246  *
247  * o when reading the rnat "above" the last rnat slot on the kernel
248  *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
249  */
250 static unsigned long
251 get_rnat (struct task_struct *task, struct switch_stack *sw,
252 	  unsigned long *krbs, unsigned long *urnat_addr,
253 	  unsigned long *urbs_end)
254 {
255 	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
256 	unsigned long umask = 0, mask, m;
257 	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
258 	long num_regs, nbits;
259 	struct pt_regs *pt;
260 
261 	pt = task_pt_regs(task);
262 	kbsp = (unsigned long *) sw->ar_bspstore;
263 	ubspstore = (unsigned long *) pt->ar_bspstore;
264 
265 	if (urbs_end < urnat_addr)
266 		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
267 	else
268 		nbits = 63;
269 	mask = MASK(nbits);
270 	/*
271 	 * First, figure out which bit number slot 0 in user-land maps
272 	 * to in the kernel rnat.  Do this by figuring out how many
273 	 * register slots we're beyond the user's backingstore and
274 	 * then computing the equivalent address in kernel space.
275 	 */
276 	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
277 	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
278 	shift = ia64_rse_slot_num(slot0_kaddr);
279 	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
280 	rnat0_kaddr = rnat1_kaddr - 64;
281 
282 	if (ubspstore + 63 > urnat_addr) {
283 		/* some bits need to be merged in from pt->ar_rnat */
284 		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
285 		urnat = (pt->ar_rnat & umask);
286 		mask &= ~umask;
287 		if (!mask)
288 			return urnat;
289 	}
290 
291 	m = mask << shift;
292 	if (rnat0_kaddr >= kbsp)
293 		rnat0 = sw->ar_rnat;
294 	else if (rnat0_kaddr > krbs)
295 		rnat0 = *rnat0_kaddr;
296 	urnat |= (rnat0 & m) >> shift;
297 
298 	m = mask >> (63 - shift);
299 	if (rnat1_kaddr >= kbsp)
300 		rnat1 = sw->ar_rnat;
301 	else if (rnat1_kaddr > krbs)
302 		rnat1 = *rnat1_kaddr;
303 	urnat |= (rnat1 & m) << (63 - shift);
304 	return urnat;
305 }
306 
307 /*
308  * The reverse of get_rnat.
309  */
310 static void
311 put_rnat (struct task_struct *task, struct switch_stack *sw,
312 	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
313 	  unsigned long *urbs_end)
314 {
315 	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
316 	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
317 	long num_regs, nbits;
318 	struct pt_regs *pt;
319 	unsigned long cfm, *urbs_kargs;
320 
321 	pt = task_pt_regs(task);
322 	kbsp = (unsigned long *) sw->ar_bspstore;
323 	ubspstore = (unsigned long *) pt->ar_bspstore;
324 
325 	urbs_kargs = urbs_end;
326 	if (in_syscall(pt)) {
327 		/*
328 		 * If entered via syscall, don't allow user to set rnat bits
329 		 * for syscall args.
330 		 */
331 		cfm = pt->cr_ifs;
332 		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
333 	}
334 
335 	if (urbs_kargs >= urnat_addr)
336 		nbits = 63;
337 	else {
338 		if ((urnat_addr - 63) >= urbs_kargs)
339 			return;
340 		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
341 	}
342 	mask = MASK(nbits);
343 
344 	/*
345 	 * First, figure out which bit number slot 0 in user-land maps
346 	 * to in the kernel rnat.  Do this by figuring out how many
347 	 * register slots we're beyond the user's backingstore and
348 	 * then computing the equivalent address in kernel space.
349 	 */
350 	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
351 	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
352 	shift = ia64_rse_slot_num(slot0_kaddr);
353 	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
354 	rnat0_kaddr = rnat1_kaddr - 64;
355 
356 	if (ubspstore + 63 > urnat_addr) {
357 		/* some bits need to be place in pt->ar_rnat: */
358 		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
359 		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
360 		mask &= ~umask;
361 		if (!mask)
362 			return;
363 	}
364 	/*
365 	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
366 	 * rnat slot is ignored. so we don't have to clear it here.
367 	 */
368 	rnat0 = (urnat << shift);
369 	m = mask << shift;
370 	if (rnat0_kaddr >= kbsp)
371 		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
372 	else if (rnat0_kaddr > krbs)
373 		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
374 
375 	rnat1 = (urnat >> (63 - shift));
376 	m = mask >> (63 - shift);
377 	if (rnat1_kaddr >= kbsp)
378 		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
379 	else if (rnat1_kaddr > krbs)
380 		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
381 }
382 
383 static inline int
384 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
385 	       unsigned long urbs_end)
386 {
387 	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
388 						      urbs_end);
389 	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
390 }
391 
392 /*
393  * Read a word from the user-level backing store of task CHILD.  ADDR
394  * is the user-level address to read the word from, VAL a pointer to
395  * the return value, and USER_BSP gives the end of the user-level
396  * backing store (i.e., it's the address that would be in ar.bsp after
397  * the user executed a "cover" instruction).
398  *
399  * This routine takes care of accessing the kernel register backing
400  * store for those registers that got spilled there.  It also takes
401  * care of calculating the appropriate RNaT collection words.
402  */
403 long
404 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
405 	   unsigned long user_rbs_end, unsigned long addr, long *val)
406 {
407 	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
408 	struct pt_regs *child_regs;
409 	size_t copied;
410 	long ret;
411 
412 	urbs_end = (long *) user_rbs_end;
413 	laddr = (unsigned long *) addr;
414 	child_regs = task_pt_regs(child);
415 	bspstore = (unsigned long *) child_regs->ar_bspstore;
416 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
417 	if (on_kernel_rbs(addr, (unsigned long) bspstore,
418 			  (unsigned long) urbs_end))
419 	{
420 		/*
421 		 * Attempt to read the RBS in an area that's actually
422 		 * on the kernel RBS => read the corresponding bits in
423 		 * the kernel RBS.
424 		 */
425 		rnat_addr = ia64_rse_rnat_addr(laddr);
426 		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
427 
428 		if (laddr == rnat_addr) {
429 			/* return NaT collection word itself */
430 			*val = ret;
431 			return 0;
432 		}
433 
434 		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
435 			/*
436 			 * It is implementation dependent whether the
437 			 * data portion of a NaT value gets saved on a
438 			 * st8.spill or RSE spill (e.g., see EAS 2.6,
439 			 * 4.4.4.6 Register Spill and Fill).  To get
440 			 * consistent behavior across all possible
441 			 * IA-64 implementations, we return zero in
442 			 * this case.
443 			 */
444 			*val = 0;
445 			return 0;
446 		}
447 
448 		if (laddr < urbs_end) {
449 			/*
450 			 * The desired word is on the kernel RBS and
451 			 * is not a NaT.
452 			 */
453 			regnum = ia64_rse_num_regs(bspstore, laddr);
454 			*val = *ia64_rse_skip_regs(krbs, regnum);
455 			return 0;
456 		}
457 	}
458 	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
459 	if (copied != sizeof(ret))
460 		return -EIO;
461 	*val = ret;
462 	return 0;
463 }
464 
465 long
466 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 	   unsigned long user_rbs_end, unsigned long addr, long val)
468 {
469 	unsigned long *bspstore, *krbs, regnum, *laddr;
470 	unsigned long *urbs_end = (long *) user_rbs_end;
471 	struct pt_regs *child_regs;
472 
473 	laddr = (unsigned long *) addr;
474 	child_regs = task_pt_regs(child);
475 	bspstore = (unsigned long *) child_regs->ar_bspstore;
476 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
477 	if (on_kernel_rbs(addr, (unsigned long) bspstore,
478 			  (unsigned long) urbs_end))
479 	{
480 		/*
481 		 * Attempt to write the RBS in an area that's actually
482 		 * on the kernel RBS => write the corresponding bits
483 		 * in the kernel RBS.
484 		 */
485 		if (ia64_rse_is_rnat_slot(laddr))
486 			put_rnat(child, child_stack, krbs, laddr, val,
487 				 urbs_end);
488 		else {
489 			if (laddr < urbs_end) {
490 				regnum = ia64_rse_num_regs(bspstore, laddr);
491 				*ia64_rse_skip_regs(krbs, regnum) = val;
492 			}
493 		}
494 	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
495 		   != sizeof(val))
496 		return -EIO;
497 	return 0;
498 }
499 
500 /*
501  * Calculate the address of the end of the user-level register backing
502  * store.  This is the address that would have been stored in ar.bsp
503  * if the user had executed a "cover" instruction right before
504  * entering the kernel.  If CFMP is not NULL, it is used to return the
505  * "current frame mask" that was active at the time the kernel was
506  * entered.
507  */
508 unsigned long
509 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
510 		       unsigned long *cfmp)
511 {
512 	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
513 	long ndirty;
514 
515 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
516 	bspstore = (unsigned long *) pt->ar_bspstore;
517 	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
518 
519 	if (in_syscall(pt))
520 		ndirty += (cfm & 0x7f);
521 	else
522 		cfm &= ~(1UL << 63);	/* clear valid bit */
523 
524 	if (cfmp)
525 		*cfmp = cfm;
526 	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
527 }
528 
529 /*
530  * Synchronize (i.e, write) the RSE backing store living in kernel
531  * space to the VM of the CHILD task.  SW and PT are the pointers to
532  * the switch_stack and pt_regs structures, respectively.
533  * USER_RBS_END is the user-level address at which the backing store
534  * ends.
535  */
536 long
537 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
538 		    unsigned long user_rbs_start, unsigned long user_rbs_end)
539 {
540 	unsigned long addr, val;
541 	long ret;
542 
543 	/* now copy word for word from kernel rbs to user rbs: */
544 	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
545 		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
546 		if (ret < 0)
547 			return ret;
548 		if (access_process_vm(child, addr, &val, sizeof(val), 1)
549 		    != sizeof(val))
550 			return -EIO;
551 	}
552 	return 0;
553 }
554 
555 static long
556 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
557 		unsigned long user_rbs_start, unsigned long user_rbs_end)
558 {
559 	unsigned long addr, val;
560 	long ret;
561 
562 	/* now copy word for word from user rbs to kernel rbs: */
563 	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
564 		if (access_process_vm(child, addr, &val, sizeof(val), 0)
565 				!= sizeof(val))
566 			return -EIO;
567 
568 		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
569 		if (ret < 0)
570 			return ret;
571 	}
572 	return 0;
573 }
574 
575 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
576 			    unsigned long, unsigned long);
577 
578 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579 {
580 	struct pt_regs *pt;
581 	unsigned long urbs_end;
582 	syncfunc_t fn = arg;
583 
584 	if (unw_unwind_to_user(info) < 0)
585 		return;
586 	pt = task_pt_regs(info->task);
587 	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
588 
589 	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
590 }
591 
592 /*
593  * when a thread is stopped (ptraced), debugger might change thread's user
594  * stack (change memory directly), and we must avoid the RSE stored in kernel
595  * to override user stack (user space's RSE is newer than kernel's in the
596  * case). To workaround the issue, we copy kernel RSE to user RSE before the
597  * task is stopped, so user RSE has updated data.  we then copy user RSE to
598  * kernel after the task is resummed from traced stop and kernel will use the
599  * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600  * synchronize user RSE to kernel.
601  */
602 void ia64_ptrace_stop(void)
603 {
604 	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 		return;
606 	set_notify_resume(current);
607 	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608 }
609 
610 /*
611  * This is called to read back the register backing store.
612  */
613 void ia64_sync_krbs(void)
614 {
615 	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
616 
617 	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
618 }
619 
620 /*
621  * After PTRACE_ATTACH, a thread's register backing store area in user
622  * space is assumed to contain correct data whenever the thread is
623  * stopped.  arch_ptrace_stop takes care of this on tracing stops.
624  * But if the child was already stopped for job control when we attach
625  * to it, then it might not ever get into ptrace_stop by the time we
626  * want to examine the user memory containing the RBS.
627  */
628 void
629 ptrace_attach_sync_user_rbs (struct task_struct *child)
630 {
631 	int stopped = 0;
632 	struct unw_frame_info info;
633 
634 	/*
635 	 * If the child is in TASK_STOPPED, we need to change that to
636 	 * TASK_TRACED momentarily while we operate on it.  This ensures
637 	 * that the child won't be woken up and return to user mode while
638 	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
639 	 */
640 
641 	read_lock(&tasklist_lock);
642 	if (child->signal) {
643 		spin_lock_irq(&child->sighand->siglock);
644 		if (child->state == TASK_STOPPED &&
645 		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
646 			set_notify_resume(child);
647 
648 			child->state = TASK_TRACED;
649 			stopped = 1;
650 		}
651 		spin_unlock_irq(&child->sighand->siglock);
652 	}
653 	read_unlock(&tasklist_lock);
654 
655 	if (!stopped)
656 		return;
657 
658 	unw_init_from_blocked_task(&info, child);
659 	do_sync_rbs(&info, ia64_sync_user_rbs);
660 
661 	/*
662 	 * Now move the child back into TASK_STOPPED if it should be in a
663 	 * job control stop, so that SIGCONT can be used to wake it up.
664 	 */
665 	read_lock(&tasklist_lock);
666 	if (child->signal) {
667 		spin_lock_irq(&child->sighand->siglock);
668 		if (child->state == TASK_TRACED &&
669 		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
670 			child->state = TASK_STOPPED;
671 		}
672 		spin_unlock_irq(&child->sighand->siglock);
673 	}
674 	read_unlock(&tasklist_lock);
675 }
676 
677 static inline int
678 thread_matches (struct task_struct *thread, unsigned long addr)
679 {
680 	unsigned long thread_rbs_end;
681 	struct pt_regs *thread_regs;
682 
683 	if (ptrace_check_attach(thread, 0) < 0)
684 		/*
685 		 * If the thread is not in an attachable state, we'll
686 		 * ignore it.  The net effect is that if ADDR happens
687 		 * to overlap with the portion of the thread's
688 		 * register backing store that is currently residing
689 		 * on the thread's kernel stack, then ptrace() may end
690 		 * up accessing a stale value.  But if the thread
691 		 * isn't stopped, that's a problem anyhow, so we're
692 		 * doing as well as we can...
693 		 */
694 		return 0;
695 
696 	thread_regs = task_pt_regs(thread);
697 	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
698 	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
699 		return 0;
700 
701 	return 1;	/* looks like we've got a winner */
702 }
703 
704 /*
705  * Write f32-f127 back to task->thread.fph if it has been modified.
706  */
707 inline void
708 ia64_flush_fph (struct task_struct *task)
709 {
710 	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
711 
712 	/*
713 	 * Prevent migrating this task while
714 	 * we're fiddling with the FPU state
715 	 */
716 	preempt_disable();
717 	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
718 		psr->mfh = 0;
719 		task->thread.flags |= IA64_THREAD_FPH_VALID;
720 		ia64_save_fpu(&task->thread.fph[0]);
721 	}
722 	preempt_enable();
723 }
724 
725 /*
726  * Sync the fph state of the task so that it can be manipulated
727  * through thread.fph.  If necessary, f32-f127 are written back to
728  * thread.fph or, if the fph state hasn't been used before, thread.fph
729  * is cleared to zeroes.  Also, access to f32-f127 is disabled to
730  * ensure that the task picks up the state from thread.fph when it
731  * executes again.
732  */
733 void
734 ia64_sync_fph (struct task_struct *task)
735 {
736 	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
737 
738 	ia64_flush_fph(task);
739 	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
740 		task->thread.flags |= IA64_THREAD_FPH_VALID;
741 		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
742 	}
743 	ia64_drop_fpu(task);
744 	psr->dfh = 1;
745 }
746 
747 /*
748  * Change the machine-state of CHILD such that it will return via the normal
749  * kernel exit-path, rather than the syscall-exit path.
750  */
751 static void
752 convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
753 			unsigned long cfm)
754 {
755 	struct unw_frame_info info, prev_info;
756 	unsigned long ip, sp, pr;
757 
758 	unw_init_from_blocked_task(&info, child);
759 	while (1) {
760 		prev_info = info;
761 		if (unw_unwind(&info) < 0)
762 			return;
763 
764 		unw_get_sp(&info, &sp);
765 		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
766 		    < IA64_PT_REGS_SIZE) {
767 			dprintk("ptrace.%s: ran off the top of the kernel "
768 				"stack\n", __func__);
769 			return;
770 		}
771 		if (unw_get_pr (&prev_info, &pr) < 0) {
772 			unw_get_rp(&prev_info, &ip);
773 			dprintk("ptrace.%s: failed to read "
774 				"predicate register (ip=0x%lx)\n",
775 				__func__, ip);
776 			return;
777 		}
778 		if (unw_is_intr_frame(&info)
779 		    && (pr & (1UL << PRED_USER_STACK)))
780 			break;
781 	}
782 
783 	/*
784 	 * Note: at the time of this call, the target task is blocked
785 	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
786 	 * (aka, "pLvSys") we redirect execution from
787 	 * .work_pending_syscall_end to .work_processed_kernel.
788 	 */
789 	unw_get_pr(&prev_info, &pr);
790 	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
791 	pr |=  (1UL << PRED_NON_SYSCALL);
792 	unw_set_pr(&prev_info, pr);
793 
794 	pt->cr_ifs = (1UL << 63) | cfm;
795 	/*
796 	 * Clear the memory that is NOT written on syscall-entry to
797 	 * ensure we do not leak kernel-state to user when execution
798 	 * resumes.
799 	 */
800 	pt->r2 = 0;
801 	pt->r3 = 0;
802 	pt->r14 = 0;
803 	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
804 	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
805 	pt->b7 = 0;
806 	pt->ar_ccv = 0;
807 	pt->ar_csd = 0;
808 	pt->ar_ssd = 0;
809 }
810 
811 static int
812 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
813 		 struct unw_frame_info *info,
814 		 unsigned long *data, int write_access)
815 {
816 	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
817 	char nat = 0;
818 
819 	if (write_access) {
820 		nat_bits = *data;
821 		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
822 		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
823 			dprintk("ptrace: failed to set ar.unat\n");
824 			return -1;
825 		}
826 		for (regnum = 4; regnum <= 7; ++regnum) {
827 			unw_get_gr(info, regnum, &dummy, &nat);
828 			unw_set_gr(info, regnum, dummy,
829 				   (nat_bits >> regnum) & 1);
830 		}
831 	} else {
832 		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
833 			dprintk("ptrace: failed to read ar.unat\n");
834 			return -1;
835 		}
836 		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
837 		for (regnum = 4; regnum <= 7; ++regnum) {
838 			unw_get_gr(info, regnum, &dummy, &nat);
839 			nat_bits |= (nat != 0) << regnum;
840 		}
841 		*data = nat_bits;
842 	}
843 	return 0;
844 }
845 
846 static int
847 access_uarea (struct task_struct *child, unsigned long addr,
848 	      unsigned long *data, int write_access);
849 
850 static long
851 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
852 {
853 	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
854 	struct unw_frame_info info;
855 	struct ia64_fpreg fpval;
856 	struct switch_stack *sw;
857 	struct pt_regs *pt;
858 	long ret, retval = 0;
859 	char nat = 0;
860 	int i;
861 
862 	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
863 		return -EIO;
864 
865 	pt = task_pt_regs(child);
866 	sw = (struct switch_stack *) (child->thread.ksp + 16);
867 	unw_init_from_blocked_task(&info, child);
868 	if (unw_unwind_to_user(&info) < 0) {
869 		return -EIO;
870 	}
871 
872 	if (((unsigned long) ppr & 0x7) != 0) {
873 		dprintk("ptrace:unaligned register address %p\n", ppr);
874 		return -EIO;
875 	}
876 
877 	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
878 	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
879 	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
880 	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
881 	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
882 	    || access_uarea(child, PT_CFM, &cfm, 0)
883 	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
884 		return -EIO;
885 
886 	/* control regs */
887 
888 	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
889 	retval |= __put_user(psr, &ppr->cr_ipsr);
890 
891 	/* app regs */
892 
893 	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
894 	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
895 	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
896 	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
897 	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
898 	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
899 
900 	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
901 	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
902 	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
903 	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
904 	retval |= __put_user(cfm, &ppr->cfm);
905 
906 	/* gr1-gr3 */
907 
908 	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
909 	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
910 
911 	/* gr4-gr7 */
912 
913 	for (i = 4; i < 8; i++) {
914 		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
915 			return -EIO;
916 		retval |= __put_user(val, &ppr->gr[i]);
917 	}
918 
919 	/* gr8-gr11 */
920 
921 	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
922 
923 	/* gr12-gr15 */
924 
925 	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
926 	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
927 	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
928 
929 	/* gr16-gr31 */
930 
931 	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
932 
933 	/* b0 */
934 
935 	retval |= __put_user(pt->b0, &ppr->br[0]);
936 
937 	/* b1-b5 */
938 
939 	for (i = 1; i < 6; i++) {
940 		if (unw_access_br(&info, i, &val, 0) < 0)
941 			return -EIO;
942 		__put_user(val, &ppr->br[i]);
943 	}
944 
945 	/* b6-b7 */
946 
947 	retval |= __put_user(pt->b6, &ppr->br[6]);
948 	retval |= __put_user(pt->b7, &ppr->br[7]);
949 
950 	/* fr2-fr5 */
951 
952 	for (i = 2; i < 6; i++) {
953 		if (unw_get_fr(&info, i, &fpval) < 0)
954 			return -EIO;
955 		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
956 	}
957 
958 	/* fr6-fr11 */
959 
960 	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
961 				 sizeof(struct ia64_fpreg) * 6);
962 
963 	/* fp scratch regs(12-15) */
964 
965 	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
966 				 sizeof(struct ia64_fpreg) * 4);
967 
968 	/* fr16-fr31 */
969 
970 	for (i = 16; i < 32; i++) {
971 		if (unw_get_fr(&info, i, &fpval) < 0)
972 			return -EIO;
973 		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
974 	}
975 
976 	/* fph */
977 
978 	ia64_flush_fph(child);
979 	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
980 				 sizeof(ppr->fr[32]) * 96);
981 
982 	/*  preds */
983 
984 	retval |= __put_user(pt->pr, &ppr->pr);
985 
986 	/* nat bits */
987 
988 	retval |= __put_user(nat_bits, &ppr->nat);
989 
990 	ret = retval ? -EIO : 0;
991 	return ret;
992 }
993 
994 static long
995 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
996 {
997 	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
998 	struct unw_frame_info info;
999 	struct switch_stack *sw;
1000 	struct ia64_fpreg fpval;
1001 	struct pt_regs *pt;
1002 	long ret, retval = 0;
1003 	int i;
1004 
1005 	memset(&fpval, 0, sizeof(fpval));
1006 
1007 	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1008 		return -EIO;
1009 
1010 	pt = task_pt_regs(child);
1011 	sw = (struct switch_stack *) (child->thread.ksp + 16);
1012 	unw_init_from_blocked_task(&info, child);
1013 	if (unw_unwind_to_user(&info) < 0) {
1014 		return -EIO;
1015 	}
1016 
1017 	if (((unsigned long) ppr & 0x7) != 0) {
1018 		dprintk("ptrace:unaligned register address %p\n", ppr);
1019 		return -EIO;
1020 	}
1021 
1022 	/* control regs */
1023 
1024 	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1025 	retval |= __get_user(psr, &ppr->cr_ipsr);
1026 
1027 	/* app regs */
1028 
1029 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1030 	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1031 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1032 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1033 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1034 	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1035 
1036 	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1037 	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1038 	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1039 	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1040 	retval |= __get_user(cfm, &ppr->cfm);
1041 
1042 	/* gr1-gr3 */
1043 
1044 	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1045 	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1046 
1047 	/* gr4-gr7 */
1048 
1049 	for (i = 4; i < 8; i++) {
1050 		retval |= __get_user(val, &ppr->gr[i]);
1051 		/* NaT bit will be set via PT_NAT_BITS: */
1052 		if (unw_set_gr(&info, i, val, 0) < 0)
1053 			return -EIO;
1054 	}
1055 
1056 	/* gr8-gr11 */
1057 
1058 	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1059 
1060 	/* gr12-gr15 */
1061 
1062 	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1063 	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1064 	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1065 
1066 	/* gr16-gr31 */
1067 
1068 	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1069 
1070 	/* b0 */
1071 
1072 	retval |= __get_user(pt->b0, &ppr->br[0]);
1073 
1074 	/* b1-b5 */
1075 
1076 	for (i = 1; i < 6; i++) {
1077 		retval |= __get_user(val, &ppr->br[i]);
1078 		unw_set_br(&info, i, val);
1079 	}
1080 
1081 	/* b6-b7 */
1082 
1083 	retval |= __get_user(pt->b6, &ppr->br[6]);
1084 	retval |= __get_user(pt->b7, &ppr->br[7]);
1085 
1086 	/* fr2-fr5 */
1087 
1088 	for (i = 2; i < 6; i++) {
1089 		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1090 		if (unw_set_fr(&info, i, fpval) < 0)
1091 			return -EIO;
1092 	}
1093 
1094 	/* fr6-fr11 */
1095 
1096 	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1097 				   sizeof(ppr->fr[6]) * 6);
1098 
1099 	/* fp scratch regs(12-15) */
1100 
1101 	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1102 				   sizeof(ppr->fr[12]) * 4);
1103 
1104 	/* fr16-fr31 */
1105 
1106 	for (i = 16; i < 32; i++) {
1107 		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1108 					   sizeof(fpval));
1109 		if (unw_set_fr(&info, i, fpval) < 0)
1110 			return -EIO;
1111 	}
1112 
1113 	/* fph */
1114 
1115 	ia64_sync_fph(child);
1116 	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1117 				   sizeof(ppr->fr[32]) * 96);
1118 
1119 	/* preds */
1120 
1121 	retval |= __get_user(pt->pr, &ppr->pr);
1122 
1123 	/* nat bits */
1124 
1125 	retval |= __get_user(nat_bits, &ppr->nat);
1126 
1127 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1128 	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1129 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1130 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1131 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1132 	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1133 	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1134 	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1135 
1136 	ret = retval ? -EIO : 0;
1137 	return ret;
1138 }
1139 
1140 void
1141 user_enable_single_step (struct task_struct *child)
1142 {
1143 	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1144 
1145 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1146 	child_psr->ss = 1;
1147 }
1148 
1149 void
1150 user_enable_block_step (struct task_struct *child)
1151 {
1152 	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1153 
1154 	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1155 	child_psr->tb = 1;
1156 }
1157 
1158 void
1159 user_disable_single_step (struct task_struct *child)
1160 {
1161 	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1162 
1163 	/* make sure the single step/taken-branch trap bits are not set: */
1164 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1165 	child_psr->ss = 0;
1166 	child_psr->tb = 0;
1167 }
1168 
1169 /*
1170  * Called by kernel/ptrace.c when detaching..
1171  *
1172  * Make sure the single step bit is not set.
1173  */
1174 void
1175 ptrace_disable (struct task_struct *child)
1176 {
1177 	user_disable_single_step(child);
1178 }
1179 
1180 long
1181 arch_ptrace (struct task_struct *child, long request, long addr, long data)
1182 {
1183 	switch (request) {
1184 	case PTRACE_PEEKTEXT:
1185 	case PTRACE_PEEKDATA:
1186 		/* read word at location addr */
1187 		if (access_process_vm(child, addr, &data, sizeof(data), 0)
1188 		    != sizeof(data))
1189 			return -EIO;
1190 		/* ensure return value is not mistaken for error code */
1191 		force_successful_syscall_return();
1192 		return data;
1193 
1194 	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1195 	 * by the generic ptrace_request().
1196 	 */
1197 
1198 	case PTRACE_PEEKUSR:
1199 		/* read the word at addr in the USER area */
1200 		if (access_uarea(child, addr, &data, 0) < 0)
1201 			return -EIO;
1202 		/* ensure return value is not mistaken for error code */
1203 		force_successful_syscall_return();
1204 		return data;
1205 
1206 	case PTRACE_POKEUSR:
1207 		/* write the word at addr in the USER area */
1208 		if (access_uarea(child, addr, &data, 1) < 0)
1209 			return -EIO;
1210 		return 0;
1211 
1212 	case PTRACE_OLD_GETSIGINFO:
1213 		/* for backwards-compatibility */
1214 		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215 
1216 	case PTRACE_OLD_SETSIGINFO:
1217 		/* for backwards-compatibility */
1218 		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219 
1220 	case PTRACE_GETREGS:
1221 		return ptrace_getregs(child,
1222 				      (struct pt_all_user_regs __user *) data);
1223 
1224 	case PTRACE_SETREGS:
1225 		return ptrace_setregs(child,
1226 				      (struct pt_all_user_regs __user *) data);
1227 
1228 	default:
1229 		return ptrace_request(child, request, addr, data);
1230 	}
1231 }
1232 
1233 
1234 /* "asmlinkage" so the input arguments are preserved... */
1235 
1236 asmlinkage long
1237 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1238 		     long arg4, long arg5, long arg6, long arg7,
1239 		     struct pt_regs regs)
1240 {
1241 	if (test_thread_flag(TIF_SYSCALL_TRACE))
1242 		if (tracehook_report_syscall_entry(&regs))
1243 			return -ENOSYS;
1244 
1245 	/* copy user rbs to kernel rbs */
1246 	if (test_thread_flag(TIF_RESTORE_RSE))
1247 		ia64_sync_krbs();
1248 
1249 	if (unlikely(current->audit_context)) {
1250 		long syscall;
1251 		int arch;
1252 
1253 		if (IS_IA32_PROCESS(&regs)) {
1254 			syscall = regs.r1;
1255 			arch = AUDIT_ARCH_I386;
1256 		} else {
1257 			syscall = regs.r15;
1258 			arch = AUDIT_ARCH_IA64;
1259 		}
1260 
1261 		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1262 	}
1263 
1264 	return 0;
1265 }
1266 
1267 /* "asmlinkage" so the input arguments are preserved... */
1268 
1269 asmlinkage void
1270 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1271 		     long arg4, long arg5, long arg6, long arg7,
1272 		     struct pt_regs regs)
1273 {
1274 	int step;
1275 
1276 	if (unlikely(current->audit_context)) {
1277 		int success = AUDITSC_RESULT(regs.r10);
1278 		long result = regs.r8;
1279 
1280 		if (success != AUDITSC_SUCCESS)
1281 			result = -result;
1282 		audit_syscall_exit(success, result);
1283 	}
1284 
1285 	step = test_thread_flag(TIF_SINGLESTEP);
1286 	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1287 		tracehook_report_syscall_exit(&regs, step);
1288 
1289 	/* copy user rbs to kernel rbs */
1290 	if (test_thread_flag(TIF_RESTORE_RSE))
1291 		ia64_sync_krbs();
1292 }
1293 
1294 /* Utrace implementation starts here */
1295 struct regset_get {
1296 	void *kbuf;
1297 	void __user *ubuf;
1298 };
1299 
1300 struct regset_set {
1301 	const void *kbuf;
1302 	const void __user *ubuf;
1303 };
1304 
1305 struct regset_getset {
1306 	struct task_struct *target;
1307 	const struct user_regset *regset;
1308 	union {
1309 		struct regset_get get;
1310 		struct regset_set set;
1311 	} u;
1312 	unsigned int pos;
1313 	unsigned int count;
1314 	int ret;
1315 };
1316 
1317 static int
1318 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1319 		unsigned long addr, unsigned long *data, int write_access)
1320 {
1321 	struct pt_regs *pt;
1322 	unsigned long *ptr = NULL;
1323 	int ret;
1324 	char nat = 0;
1325 
1326 	pt = task_pt_regs(target);
1327 	switch (addr) {
1328 	case ELF_GR_OFFSET(1):
1329 		ptr = &pt->r1;
1330 		break;
1331 	case ELF_GR_OFFSET(2):
1332 	case ELF_GR_OFFSET(3):
1333 		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1334 		break;
1335 	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1336 		if (write_access) {
1337 			/* read NaT bit first: */
1338 			unsigned long dummy;
1339 
1340 			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1341 			if (ret < 0)
1342 				return ret;
1343 		}
1344 		return unw_access_gr(info, addr/8, data, &nat, write_access);
1345 	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1346 		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1347 		break;
1348 	case ELF_GR_OFFSET(12):
1349 	case ELF_GR_OFFSET(13):
1350 		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1351 		break;
1352 	case ELF_GR_OFFSET(14):
1353 		ptr = &pt->r14;
1354 		break;
1355 	case ELF_GR_OFFSET(15):
1356 		ptr = &pt->r15;
1357 	}
1358 	if (write_access)
1359 		*ptr = *data;
1360 	else
1361 		*data = *ptr;
1362 	return 0;
1363 }
1364 
1365 static int
1366 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1367 		unsigned long addr, unsigned long *data, int write_access)
1368 {
1369 	struct pt_regs *pt;
1370 	unsigned long *ptr = NULL;
1371 
1372 	pt = task_pt_regs(target);
1373 	switch (addr) {
1374 	case ELF_BR_OFFSET(0):
1375 		ptr = &pt->b0;
1376 		break;
1377 	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1378 		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1379 				     data, write_access);
1380 	case ELF_BR_OFFSET(6):
1381 		ptr = &pt->b6;
1382 		break;
1383 	case ELF_BR_OFFSET(7):
1384 		ptr = &pt->b7;
1385 	}
1386 	if (write_access)
1387 		*ptr = *data;
1388 	else
1389 		*data = *ptr;
1390 	return 0;
1391 }
1392 
1393 static int
1394 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1395 		unsigned long addr, unsigned long *data, int write_access)
1396 {
1397 	struct pt_regs *pt;
1398 	unsigned long cfm, urbs_end;
1399 	unsigned long *ptr = NULL;
1400 
1401 	pt = task_pt_regs(target);
1402 	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1403 		switch (addr) {
1404 		case ELF_AR_RSC_OFFSET:
1405 			/* force PL3 */
1406 			if (write_access)
1407 				pt->ar_rsc = *data | (3 << 2);
1408 			else
1409 				*data = pt->ar_rsc;
1410 			return 0;
1411 		case ELF_AR_BSP_OFFSET:
1412 			/*
1413 			 * By convention, we use PT_AR_BSP to refer to
1414 			 * the end of the user-level backing store.
1415 			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1416 			 * to get the real value of ar.bsp at the time
1417 			 * the kernel was entered.
1418 			 *
1419 			 * Furthermore, when changing the contents of
1420 			 * PT_AR_BSP (or PT_CFM) while the task is
1421 			 * blocked in a system call, convert the state
1422 			 * so that the non-system-call exit
1423 			 * path is used.  This ensures that the proper
1424 			 * state will be picked up when resuming
1425 			 * execution.  However, it *also* means that
1426 			 * once we write PT_AR_BSP/PT_CFM, it won't be
1427 			 * possible to modify the syscall arguments of
1428 			 * the pending system call any longer.  This
1429 			 * shouldn't be an issue because modifying
1430 			 * PT_AR_BSP/PT_CFM generally implies that
1431 			 * we're either abandoning the pending system
1432 			 * call or that we defer it's re-execution
1433 			 * (e.g., due to GDB doing an inferior
1434 			 * function call).
1435 			 */
1436 			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1437 			if (write_access) {
1438 				if (*data != urbs_end) {
1439 					if (in_syscall(pt))
1440 						convert_to_non_syscall(target,
1441 								       pt,
1442 								       cfm);
1443 					/*
1444 					 * Simulate user-level write
1445 					 * of ar.bsp:
1446 					 */
1447 					pt->loadrs = 0;
1448 					pt->ar_bspstore = *data;
1449 				}
1450 			} else
1451 				*data = urbs_end;
1452 			return 0;
1453 		case ELF_AR_BSPSTORE_OFFSET:
1454 			ptr = &pt->ar_bspstore;
1455 			break;
1456 		case ELF_AR_RNAT_OFFSET:
1457 			ptr = &pt->ar_rnat;
1458 			break;
1459 		case ELF_AR_CCV_OFFSET:
1460 			ptr = &pt->ar_ccv;
1461 			break;
1462 		case ELF_AR_UNAT_OFFSET:
1463 			ptr = &pt->ar_unat;
1464 			break;
1465 		case ELF_AR_FPSR_OFFSET:
1466 			ptr = &pt->ar_fpsr;
1467 			break;
1468 		case ELF_AR_PFS_OFFSET:
1469 			ptr = &pt->ar_pfs;
1470 			break;
1471 		case ELF_AR_LC_OFFSET:
1472 			return unw_access_ar(info, UNW_AR_LC, data,
1473 					     write_access);
1474 		case ELF_AR_EC_OFFSET:
1475 			return unw_access_ar(info, UNW_AR_EC, data,
1476 					     write_access);
1477 		case ELF_AR_CSD_OFFSET:
1478 			ptr = &pt->ar_csd;
1479 			break;
1480 		case ELF_AR_SSD_OFFSET:
1481 			ptr = &pt->ar_ssd;
1482 		}
1483 	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1484 		switch (addr) {
1485 		case ELF_CR_IIP_OFFSET:
1486 			ptr = &pt->cr_iip;
1487 			break;
1488 		case ELF_CFM_OFFSET:
1489 			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1490 			if (write_access) {
1491 				if (((cfm ^ *data) & PFM_MASK) != 0) {
1492 					if (in_syscall(pt))
1493 						convert_to_non_syscall(target,
1494 								       pt,
1495 								       cfm);
1496 					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1497 						      | (*data & PFM_MASK));
1498 				}
1499 			} else
1500 				*data = cfm;
1501 			return 0;
1502 		case ELF_CR_IPSR_OFFSET:
1503 			if (write_access) {
1504 				unsigned long tmp = *data;
1505 				/* psr.ri==3 is a reserved value: SDM 2:25 */
1506 				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1507 					tmp &= ~IA64_PSR_RI;
1508 				pt->cr_ipsr = ((tmp & IPSR_MASK)
1509 					       | (pt->cr_ipsr & ~IPSR_MASK));
1510 			} else
1511 				*data = (pt->cr_ipsr & IPSR_MASK);
1512 			return 0;
1513 		}
1514 	} else if (addr == ELF_NAT_OFFSET)
1515 		return access_nat_bits(target, pt, info,
1516 				       data, write_access);
1517 	else if (addr == ELF_PR_OFFSET)
1518 		ptr = &pt->pr;
1519 	else
1520 		return -1;
1521 
1522 	if (write_access)
1523 		*ptr = *data;
1524 	else
1525 		*data = *ptr;
1526 
1527 	return 0;
1528 }
1529 
1530 static int
1531 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1532 		unsigned long addr, unsigned long *data, int write_access)
1533 {
1534 	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1535 		return access_elf_gpreg(target, info, addr, data, write_access);
1536 	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1537 		return access_elf_breg(target, info, addr, data, write_access);
1538 	else
1539 		return access_elf_areg(target, info, addr, data, write_access);
1540 }
1541 
1542 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1543 {
1544 	struct pt_regs *pt;
1545 	struct regset_getset *dst = arg;
1546 	elf_greg_t tmp[16];
1547 	unsigned int i, index, min_copy;
1548 
1549 	if (unw_unwind_to_user(info) < 0)
1550 		return;
1551 
1552 	/*
1553 	 * coredump format:
1554 	 *      r0-r31
1555 	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1556 	 *      predicate registers (p0-p63)
1557 	 *      b0-b7
1558 	 *      ip cfm user-mask
1559 	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1560 	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1561 	 */
1562 
1563 
1564 	/* Skip r0 */
1565 	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1566 		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1567 						      &dst->u.get.kbuf,
1568 						      &dst->u.get.ubuf,
1569 						      0, ELF_GR_OFFSET(1));
1570 		if (dst->ret || dst->count == 0)
1571 			return;
1572 	}
1573 
1574 	/* gr1 - gr15 */
1575 	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1576 		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1577 		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1578 			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1579 		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1580 				index++)
1581 			if (access_elf_reg(dst->target, info, i,
1582 						&tmp[index], 0) < 0) {
1583 				dst->ret = -EIO;
1584 				return;
1585 			}
1586 		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1587 				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1588 				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1589 		if (dst->ret || dst->count == 0)
1590 			return;
1591 	}
1592 
1593 	/* r16-r31 */
1594 	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1595 		pt = task_pt_regs(dst->target);
1596 		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1597 				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1598 				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1599 		if (dst->ret || dst->count == 0)
1600 			return;
1601 	}
1602 
1603 	/* nat, pr, b0 - b7 */
1604 	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1605 		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1606 		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1607 			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1608 		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1609 				index++)
1610 			if (access_elf_reg(dst->target, info, i,
1611 						&tmp[index], 0) < 0) {
1612 				dst->ret = -EIO;
1613 				return;
1614 			}
1615 		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1616 				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1617 				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1618 		if (dst->ret || dst->count == 0)
1619 			return;
1620 	}
1621 
1622 	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1623 	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1624 	 */
1625 	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1626 		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1627 		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1628 			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1629 		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1630 				index++)
1631 			if (access_elf_reg(dst->target, info, i,
1632 						&tmp[index], 0) < 0) {
1633 				dst->ret = -EIO;
1634 				return;
1635 			}
1636 		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1637 				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1638 				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1639 	}
1640 }
1641 
1642 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1643 {
1644 	struct pt_regs *pt;
1645 	struct regset_getset *dst = arg;
1646 	elf_greg_t tmp[16];
1647 	unsigned int i, index;
1648 
1649 	if (unw_unwind_to_user(info) < 0)
1650 		return;
1651 
1652 	/* Skip r0 */
1653 	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1654 		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1655 						       &dst->u.set.kbuf,
1656 						       &dst->u.set.ubuf,
1657 						       0, ELF_GR_OFFSET(1));
1658 		if (dst->ret || dst->count == 0)
1659 			return;
1660 	}
1661 
1662 	/* gr1-gr15 */
1663 	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1664 		i = dst->pos;
1665 		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1666 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1667 				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1668 				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1669 		if (dst->ret)
1670 			return;
1671 		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1672 			if (access_elf_reg(dst->target, info, i,
1673 						&tmp[index], 1) < 0) {
1674 				dst->ret = -EIO;
1675 				return;
1676 			}
1677 		if (dst->count == 0)
1678 			return;
1679 	}
1680 
1681 	/* gr16-gr31 */
1682 	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1683 		pt = task_pt_regs(dst->target);
1684 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1685 				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1686 				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1687 		if (dst->ret || dst->count == 0)
1688 			return;
1689 	}
1690 
1691 	/* nat, pr, b0 - b7 */
1692 	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1693 		i = dst->pos;
1694 		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1695 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1696 				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1697 				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1698 		if (dst->ret)
1699 			return;
1700 		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1701 			if (access_elf_reg(dst->target, info, i,
1702 						&tmp[index], 1) < 0) {
1703 				dst->ret = -EIO;
1704 				return;
1705 			}
1706 		if (dst->count == 0)
1707 			return;
1708 	}
1709 
1710 	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1711 	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1712 	 */
1713 	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1714 		i = dst->pos;
1715 		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1716 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1717 				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1718 				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1719 		if (dst->ret)
1720 			return;
1721 		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1722 			if (access_elf_reg(dst->target, info, i,
1723 						&tmp[index], 1) < 0) {
1724 				dst->ret = -EIO;
1725 				return;
1726 			}
1727 	}
1728 }
1729 
1730 #define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1731 
1732 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1733 {
1734 	struct regset_getset *dst = arg;
1735 	struct task_struct *task = dst->target;
1736 	elf_fpreg_t tmp[30];
1737 	int index, min_copy, i;
1738 
1739 	if (unw_unwind_to_user(info) < 0)
1740 		return;
1741 
1742 	/* Skip pos 0 and 1 */
1743 	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1744 		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1745 						      &dst->u.get.kbuf,
1746 						      &dst->u.get.ubuf,
1747 						      0, ELF_FP_OFFSET(2));
1748 		if (dst->count == 0 || dst->ret)
1749 			return;
1750 	}
1751 
1752 	/* fr2-fr31 */
1753 	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1754 		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1755 
1756 		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1757 				dst->pos + dst->count);
1758 		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1759 				index++)
1760 			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1761 					 &tmp[index])) {
1762 				dst->ret = -EIO;
1763 				return;
1764 			}
1765 		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1766 				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1767 				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1768 		if (dst->count == 0 || dst->ret)
1769 			return;
1770 	}
1771 
1772 	/* fph */
1773 	if (dst->count > 0) {
1774 		ia64_flush_fph(dst->target);
1775 		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1776 			dst->ret = user_regset_copyout(
1777 				&dst->pos, &dst->count,
1778 				&dst->u.get.kbuf, &dst->u.get.ubuf,
1779 				&dst->target->thread.fph,
1780 				ELF_FP_OFFSET(32), -1);
1781 		else
1782 			/* Zero fill instead.  */
1783 			dst->ret = user_regset_copyout_zero(
1784 				&dst->pos, &dst->count,
1785 				&dst->u.get.kbuf, &dst->u.get.ubuf,
1786 				ELF_FP_OFFSET(32), -1);
1787 	}
1788 }
1789 
1790 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1791 {
1792 	struct regset_getset *dst = arg;
1793 	elf_fpreg_t fpreg, tmp[30];
1794 	int index, start, end;
1795 
1796 	if (unw_unwind_to_user(info) < 0)
1797 		return;
1798 
1799 	/* Skip pos 0 and 1 */
1800 	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1801 		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1802 						       &dst->u.set.kbuf,
1803 						       &dst->u.set.ubuf,
1804 						       0, ELF_FP_OFFSET(2));
1805 		if (dst->count == 0 || dst->ret)
1806 			return;
1807 	}
1808 
1809 	/* fr2-fr31 */
1810 	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1811 		start = dst->pos;
1812 		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1813 			 dst->pos + dst->count);
1814 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1815 				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1816 				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1817 		if (dst->ret)
1818 			return;
1819 
1820 		if (start & 0xF) { /* only write high part */
1821 			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1822 					 &fpreg)) {
1823 				dst->ret = -EIO;
1824 				return;
1825 			}
1826 			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1827 				= fpreg.u.bits[0];
1828 			start &= ~0xFUL;
1829 		}
1830 		if (end & 0xF) { /* only write low part */
1831 			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1832 					&fpreg)) {
1833 				dst->ret = -EIO;
1834 				return;
1835 			}
1836 			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1837 				= fpreg.u.bits[1];
1838 			end = (end + 0xF) & ~0xFUL;
1839 		}
1840 
1841 		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1842 			index = start / sizeof(elf_fpreg_t);
1843 			if (unw_set_fr(info, index, tmp[index - 2])) {
1844 				dst->ret = -EIO;
1845 				return;
1846 			}
1847 		}
1848 		if (dst->ret || dst->count == 0)
1849 			return;
1850 	}
1851 
1852 	/* fph */
1853 	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1854 		ia64_sync_fph(dst->target);
1855 		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1856 						&dst->u.set.kbuf,
1857 						&dst->u.set.ubuf,
1858 						&dst->target->thread.fph,
1859 						ELF_FP_OFFSET(32), -1);
1860 	}
1861 }
1862 
1863 static int
1864 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1865 	       struct task_struct *target,
1866 	       const struct user_regset *regset,
1867 	       unsigned int pos, unsigned int count,
1868 	       const void *kbuf, const void __user *ubuf)
1869 {
1870 	struct regset_getset info = { .target = target, .regset = regset,
1871 				 .pos = pos, .count = count,
1872 				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1873 				 .ret = 0 };
1874 
1875 	if (target == current)
1876 		unw_init_running(call, &info);
1877 	else {
1878 		struct unw_frame_info ufi;
1879 		memset(&ufi, 0, sizeof(ufi));
1880 		unw_init_from_blocked_task(&ufi, target);
1881 		(*call)(&ufi, &info);
1882 	}
1883 
1884 	return info.ret;
1885 }
1886 
1887 static int
1888 gpregs_get(struct task_struct *target,
1889 	   const struct user_regset *regset,
1890 	   unsigned int pos, unsigned int count,
1891 	   void *kbuf, void __user *ubuf)
1892 {
1893 	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1894 		kbuf, ubuf);
1895 }
1896 
1897 static int gpregs_set(struct task_struct *target,
1898 		const struct user_regset *regset,
1899 		unsigned int pos, unsigned int count,
1900 		const void *kbuf, const void __user *ubuf)
1901 {
1902 	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1903 		kbuf, ubuf);
1904 }
1905 
1906 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1907 {
1908 	do_sync_rbs(info, ia64_sync_user_rbs);
1909 }
1910 
1911 /*
1912  * This is called to write back the register backing store.
1913  * ptrace does this before it stops, so that a tracer reading the user
1914  * memory after the thread stops will get the current register data.
1915  */
1916 static int
1917 gpregs_writeback(struct task_struct *target,
1918 		 const struct user_regset *regset,
1919 		 int now)
1920 {
1921 	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1922 		return 0;
1923 	set_notify_resume(target);
1924 	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1925 		NULL, NULL);
1926 }
1927 
1928 static int
1929 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1930 {
1931 	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1932 }
1933 
1934 static int fpregs_get(struct task_struct *target,
1935 		const struct user_regset *regset,
1936 		unsigned int pos, unsigned int count,
1937 		void *kbuf, void __user *ubuf)
1938 {
1939 	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1940 		kbuf, ubuf);
1941 }
1942 
1943 static int fpregs_set(struct task_struct *target,
1944 		const struct user_regset *regset,
1945 		unsigned int pos, unsigned int count,
1946 		const void *kbuf, const void __user *ubuf)
1947 {
1948 	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1949 		kbuf, ubuf);
1950 }
1951 
1952 static int
1953 access_uarea(struct task_struct *child, unsigned long addr,
1954 	      unsigned long *data, int write_access)
1955 {
1956 	unsigned int pos = -1; /* an invalid value */
1957 	int ret;
1958 	unsigned long *ptr, regnum;
1959 
1960 	if ((addr & 0x7) != 0) {
1961 		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1962 		return -1;
1963 	}
1964 	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1965 		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1966 		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1967 		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1968 		dprintk("ptrace: rejecting access to register "
1969 					"address 0x%lx\n", addr);
1970 		return -1;
1971 	}
1972 
1973 	switch (addr) {
1974 	case PT_F32 ... (PT_F127 + 15):
1975 		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1976 		break;
1977 	case PT_F2 ... (PT_F5 + 15):
1978 		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1979 		break;
1980 	case PT_F10 ... (PT_F31 + 15):
1981 		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1982 		break;
1983 	case PT_F6 ... (PT_F9 + 15):
1984 		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1985 		break;
1986 	}
1987 
1988 	if (pos != -1) {
1989 		if (write_access)
1990 			ret = fpregs_set(child, NULL, pos,
1991 				sizeof(unsigned long), data, NULL);
1992 		else
1993 			ret = fpregs_get(child, NULL, pos,
1994 				sizeof(unsigned long), data, NULL);
1995 		if (ret != 0)
1996 			return -1;
1997 		return 0;
1998 	}
1999 
2000 	switch (addr) {
2001 	case PT_NAT_BITS:
2002 		pos = ELF_NAT_OFFSET;
2003 		break;
2004 	case PT_R4 ... PT_R7:
2005 		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2006 		break;
2007 	case PT_B1 ... PT_B5:
2008 		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2009 		break;
2010 	case PT_AR_EC:
2011 		pos = ELF_AR_EC_OFFSET;
2012 		break;
2013 	case PT_AR_LC:
2014 		pos = ELF_AR_LC_OFFSET;
2015 		break;
2016 	case PT_CR_IPSR:
2017 		pos = ELF_CR_IPSR_OFFSET;
2018 		break;
2019 	case PT_CR_IIP:
2020 		pos = ELF_CR_IIP_OFFSET;
2021 		break;
2022 	case PT_CFM:
2023 		pos = ELF_CFM_OFFSET;
2024 		break;
2025 	case PT_AR_UNAT:
2026 		pos = ELF_AR_UNAT_OFFSET;
2027 		break;
2028 	case PT_AR_PFS:
2029 		pos = ELF_AR_PFS_OFFSET;
2030 		break;
2031 	case PT_AR_RSC:
2032 		pos = ELF_AR_RSC_OFFSET;
2033 		break;
2034 	case PT_AR_RNAT:
2035 		pos = ELF_AR_RNAT_OFFSET;
2036 		break;
2037 	case PT_AR_BSPSTORE:
2038 		pos = ELF_AR_BSPSTORE_OFFSET;
2039 		break;
2040 	case PT_PR:
2041 		pos = ELF_PR_OFFSET;
2042 		break;
2043 	case PT_B6:
2044 		pos = ELF_BR_OFFSET(6);
2045 		break;
2046 	case PT_AR_BSP:
2047 		pos = ELF_AR_BSP_OFFSET;
2048 		break;
2049 	case PT_R1 ... PT_R3:
2050 		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2051 		break;
2052 	case PT_R12 ... PT_R15:
2053 		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2054 		break;
2055 	case PT_R8 ... PT_R11:
2056 		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2057 		break;
2058 	case PT_R16 ... PT_R31:
2059 		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2060 		break;
2061 	case PT_AR_CCV:
2062 		pos = ELF_AR_CCV_OFFSET;
2063 		break;
2064 	case PT_AR_FPSR:
2065 		pos = ELF_AR_FPSR_OFFSET;
2066 		break;
2067 	case PT_B0:
2068 		pos = ELF_BR_OFFSET(0);
2069 		break;
2070 	case PT_B7:
2071 		pos = ELF_BR_OFFSET(7);
2072 		break;
2073 	case PT_AR_CSD:
2074 		pos = ELF_AR_CSD_OFFSET;
2075 		break;
2076 	case PT_AR_SSD:
2077 		pos = ELF_AR_SSD_OFFSET;
2078 		break;
2079 	}
2080 
2081 	if (pos != -1) {
2082 		if (write_access)
2083 			ret = gpregs_set(child, NULL, pos,
2084 				sizeof(unsigned long), data, NULL);
2085 		else
2086 			ret = gpregs_get(child, NULL, pos,
2087 				sizeof(unsigned long), data, NULL);
2088 		if (ret != 0)
2089 			return -1;
2090 		return 0;
2091 	}
2092 
2093 	/* access debug registers */
2094 	if (addr >= PT_IBR) {
2095 		regnum = (addr - PT_IBR) >> 3;
2096 		ptr = &child->thread.ibr[0];
2097 	} else {
2098 		regnum = (addr - PT_DBR) >> 3;
2099 		ptr = &child->thread.dbr[0];
2100 	}
2101 
2102 	if (regnum >= 8) {
2103 		dprintk("ptrace: rejecting access to register "
2104 				"address 0x%lx\n", addr);
2105 		return -1;
2106 	}
2107 #ifdef CONFIG_PERFMON
2108 	/*
2109 	 * Check if debug registers are used by perfmon. This
2110 	 * test must be done once we know that we can do the
2111 	 * operation, i.e. the arguments are all valid, but
2112 	 * before we start modifying the state.
2113 	 *
2114 	 * Perfmon needs to keep a count of how many processes
2115 	 * are trying to modify the debug registers for system
2116 	 * wide monitoring sessions.
2117 	 *
2118 	 * We also include read access here, because they may
2119 	 * cause the PMU-installed debug register state
2120 	 * (dbr[], ibr[]) to be reset. The two arrays are also
2121 	 * used by perfmon, but we do not use
2122 	 * IA64_THREAD_DBG_VALID. The registers are restored
2123 	 * by the PMU context switch code.
2124 	 */
2125 	if (pfm_use_debug_registers(child))
2126 		return -1;
2127 #endif
2128 
2129 	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2130 		child->thread.flags |= IA64_THREAD_DBG_VALID;
2131 		memset(child->thread.dbr, 0,
2132 				sizeof(child->thread.dbr));
2133 		memset(child->thread.ibr, 0,
2134 				sizeof(child->thread.ibr));
2135 	}
2136 
2137 	ptr += regnum;
2138 
2139 	if ((regnum & 1) && write_access) {
2140 		/* don't let the user set kernel-level breakpoints: */
2141 		*ptr = *data & ~(7UL << 56);
2142 		return 0;
2143 	}
2144 	if (write_access)
2145 		*ptr = *data;
2146 	else
2147 		*data = *ptr;
2148 	return 0;
2149 }
2150 
2151 static const struct user_regset native_regsets[] = {
2152 	{
2153 		.core_note_type = NT_PRSTATUS,
2154 		.n = ELF_NGREG,
2155 		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2156 		.get = gpregs_get, .set = gpregs_set,
2157 		.writeback = gpregs_writeback
2158 	},
2159 	{
2160 		.core_note_type = NT_PRFPREG,
2161 		.n = ELF_NFPREG,
2162 		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2163 		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2164 	},
2165 };
2166 
2167 static const struct user_regset_view user_ia64_view = {
2168 	.name = "ia64",
2169 	.e_machine = EM_IA_64,
2170 	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2171 };
2172 
2173 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2174 {
2175 #ifdef CONFIG_IA32_SUPPORT
2176 	extern const struct user_regset_view user_ia32_view;
2177 	if (IS_IA32_PROCESS(task_pt_regs(tsk)))
2178 		return &user_ia32_view;
2179 #endif
2180 	return &user_ia64_view;
2181 }
2182 
2183 struct syscall_get_set_args {
2184 	unsigned int i;
2185 	unsigned int n;
2186 	unsigned long *args;
2187 	struct pt_regs *regs;
2188 	int rw;
2189 };
2190 
2191 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2192 {
2193 	struct syscall_get_set_args *args = data;
2194 	struct pt_regs *pt = args->regs;
2195 	unsigned long *krbs, cfm, ndirty;
2196 	int i, count;
2197 
2198 	if (unw_unwind_to_user(info) < 0)
2199 		return;
2200 
2201 	cfm = pt->cr_ifs;
2202 	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2203 	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2204 
2205 	count = 0;
2206 	if (in_syscall(pt))
2207 		count = min_t(int, args->n, cfm & 0x7f);
2208 
2209 	for (i = 0; i < count; i++) {
2210 		if (args->rw)
2211 			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2212 				args->args[i];
2213 		else
2214 			args->args[i] = *ia64_rse_skip_regs(krbs,
2215 				ndirty + i + args->i);
2216 	}
2217 
2218 	if (!args->rw) {
2219 		while (i < args->n) {
2220 			args->args[i] = 0;
2221 			i++;
2222 		}
2223 	}
2224 }
2225 
2226 void ia64_syscall_get_set_arguments(struct task_struct *task,
2227 	struct pt_regs *regs, unsigned int i, unsigned int n,
2228 	unsigned long *args, int rw)
2229 {
2230 	struct syscall_get_set_args data = {
2231 		.i = i,
2232 		.n = n,
2233 		.args = args,
2234 		.regs = regs,
2235 		.rw = rw,
2236 	};
2237 
2238 	if (task == current)
2239 		unw_init_running(syscall_get_set_args_cb, &data);
2240 	else {
2241 		struct unw_frame_info ufi;
2242 		memset(&ufi, 0, sizeof(ufi));
2243 		unw_init_from_blocked_task(&ufi, task);
2244 		syscall_get_set_args_cb(&ufi, &data);
2245 	}
2246 }
2247