xref: /openbmc/linux/arch/ia64/kernel/ptrace.c (revision 64c70b1c)
1 /*
2  * Kernel support for the ptrace() and syscall tracing interfaces.
3  *
4  * Copyright (C) 1999-2005 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * Derived from the x86 and Alpha versions.
8  */
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/smp_lock.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/signal.h>
20 
21 #include <asm/pgtable.h>
22 #include <asm/processor.h>
23 #include <asm/ptrace_offsets.h>
24 #include <asm/rse.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/unwind.h>
28 #ifdef CONFIG_PERFMON
29 #include <asm/perfmon.h>
30 #endif
31 
32 #include "entry.h"
33 
34 /*
35  * Bits in the PSR that we allow ptrace() to change:
36  *	be, up, ac, mfl, mfh (the user mask; five bits total)
37  *	db (debug breakpoint fault; one bit)
38  *	id (instruction debug fault disable; one bit)
39  *	dd (data debug fault disable; one bit)
40  *	ri (restart instruction; two bits)
41  *	is (instruction set; one bit)
42  */
43 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
44 		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
45 
46 #define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
47 #define PFM_MASK	MASK(38)
48 
49 #define PTRACE_DEBUG	0
50 
51 #if PTRACE_DEBUG
52 # define dprintk(format...)	printk(format)
53 # define inline
54 #else
55 # define dprintk(format...)
56 #endif
57 
58 /* Return TRUE if PT was created due to kernel-entry via a system-call.  */
59 
60 static inline int
61 in_syscall (struct pt_regs *pt)
62 {
63 	return (long) pt->cr_ifs >= 0;
64 }
65 
66 /*
67  * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
68  * bitset where bit i is set iff the NaT bit of register i is set.
69  */
70 unsigned long
71 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
72 {
73 #	define GET_BITS(first, last, unat)				\
74 	({								\
75 		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
76 		unsigned long nbits = (last - first + 1);		\
77 		unsigned long mask = MASK(nbits) << first;		\
78 		unsigned long dist;					\
79 		if (bit < first)					\
80 			dist = 64 + bit - first;			\
81 		else							\
82 			dist = bit - first;				\
83 		ia64_rotr(unat, dist) & mask;				\
84 	})
85 	unsigned long val;
86 
87 	/*
88 	 * Registers that are stored consecutively in struct pt_regs
89 	 * can be handled in parallel.  If the register order in
90 	 * struct_pt_regs changes, this code MUST be updated.
91 	 */
92 	val  = GET_BITS( 1,  1, scratch_unat);
93 	val |= GET_BITS( 2,  3, scratch_unat);
94 	val |= GET_BITS(12, 13, scratch_unat);
95 	val |= GET_BITS(14, 14, scratch_unat);
96 	val |= GET_BITS(15, 15, scratch_unat);
97 	val |= GET_BITS( 8, 11, scratch_unat);
98 	val |= GET_BITS(16, 31, scratch_unat);
99 	return val;
100 
101 #	undef GET_BITS
102 }
103 
104 /*
105  * Set the NaT bits for the scratch registers according to NAT and
106  * return the resulting unat (assuming the scratch registers are
107  * stored in PT).
108  */
109 unsigned long
110 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
111 {
112 #	define PUT_BITS(first, last, nat)				\
113 	({								\
114 		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
115 		unsigned long nbits = (last - first + 1);		\
116 		unsigned long mask = MASK(nbits) << first;		\
117 		long dist;						\
118 		if (bit < first)					\
119 			dist = 64 + bit - first;			\
120 		else							\
121 			dist = bit - first;				\
122 		ia64_rotl(nat & mask, dist);				\
123 	})
124 	unsigned long scratch_unat;
125 
126 	/*
127 	 * Registers that are stored consecutively in struct pt_regs
128 	 * can be handled in parallel.  If the register order in
129 	 * struct_pt_regs changes, this code MUST be updated.
130 	 */
131 	scratch_unat  = PUT_BITS( 1,  1, nat);
132 	scratch_unat |= PUT_BITS( 2,  3, nat);
133 	scratch_unat |= PUT_BITS(12, 13, nat);
134 	scratch_unat |= PUT_BITS(14, 14, nat);
135 	scratch_unat |= PUT_BITS(15, 15, nat);
136 	scratch_unat |= PUT_BITS( 8, 11, nat);
137 	scratch_unat |= PUT_BITS(16, 31, nat);
138 
139 	return scratch_unat;
140 
141 #	undef PUT_BITS
142 }
143 
144 #define IA64_MLX_TEMPLATE	0x2
145 #define IA64_MOVL_OPCODE	6
146 
147 void
148 ia64_increment_ip (struct pt_regs *regs)
149 {
150 	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
151 
152 	if (ri > 2) {
153 		ri = 0;
154 		regs->cr_iip += 16;
155 	} else if (ri == 2) {
156 		get_user(w0, (char __user *) regs->cr_iip + 0);
157 		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
158 			/*
159 			 * rfi'ing to slot 2 of an MLX bundle causes
160 			 * an illegal operation fault.  We don't want
161 			 * that to happen...
162 			 */
163 			ri = 0;
164 			regs->cr_iip += 16;
165 		}
166 	}
167 	ia64_psr(regs)->ri = ri;
168 }
169 
170 void
171 ia64_decrement_ip (struct pt_regs *regs)
172 {
173 	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
174 
175 	if (ia64_psr(regs)->ri == 0) {
176 		regs->cr_iip -= 16;
177 		ri = 2;
178 		get_user(w0, (char __user *) regs->cr_iip + 0);
179 		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
180 			/*
181 			 * rfi'ing to slot 2 of an MLX bundle causes
182 			 * an illegal operation fault.  We don't want
183 			 * that to happen...
184 			 */
185 			ri = 1;
186 		}
187 	}
188 	ia64_psr(regs)->ri = ri;
189 }
190 
191 /*
192  * This routine is used to read an rnat bits that are stored on the
193  * kernel backing store.  Since, in general, the alignment of the user
194  * and kernel are different, this is not completely trivial.  In
195  * essence, we need to construct the user RNAT based on up to two
196  * kernel RNAT values and/or the RNAT value saved in the child's
197  * pt_regs.
198  *
199  * user rbs
200  *
201  * +--------+ <-- lowest address
202  * | slot62 |
203  * +--------+
204  * |  rnat  | 0x....1f8
205  * +--------+
206  * | slot00 | \
207  * +--------+ |
208  * | slot01 | > child_regs->ar_rnat
209  * +--------+ |
210  * | slot02 | /				kernel rbs
211  * +--------+				+--------+
212  *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
213  * +- - - - +				+--------+
214  *					| slot62 |
215  * +- - - - +				+--------+
216  *					|  rnat	 |
217  * +- - - - +				+--------+
218  *   vrnat				| slot00 |
219  * +- - - - +				+--------+
220  *					=	 =
221  *					+--------+
222  *					| slot00 | \
223  *					+--------+ |
224  *					| slot01 | > child_stack->ar_rnat
225  *					+--------+ |
226  *					| slot02 | /
227  *					+--------+
228  *						  <--- child_stack->ar_bspstore
229  *
230  * The way to think of this code is as follows: bit 0 in the user rnat
231  * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
232  * value.  The kernel rnat value holding this bit is stored in
233  * variable rnat0.  rnat1 is loaded with the kernel rnat value that
234  * form the upper bits of the user rnat value.
235  *
236  * Boundary cases:
237  *
238  * o when reading the rnat "below" the first rnat slot on the kernel
239  *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
240  *   merged in from pt->ar_rnat.
241  *
242  * o when reading the rnat "above" the last rnat slot on the kernel
243  *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
244  */
245 static unsigned long
246 get_rnat (struct task_struct *task, struct switch_stack *sw,
247 	  unsigned long *krbs, unsigned long *urnat_addr,
248 	  unsigned long *urbs_end)
249 {
250 	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
251 	unsigned long umask = 0, mask, m;
252 	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
253 	long num_regs, nbits;
254 	struct pt_regs *pt;
255 
256 	pt = task_pt_regs(task);
257 	kbsp = (unsigned long *) sw->ar_bspstore;
258 	ubspstore = (unsigned long *) pt->ar_bspstore;
259 
260 	if (urbs_end < urnat_addr)
261 		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
262 	else
263 		nbits = 63;
264 	mask = MASK(nbits);
265 	/*
266 	 * First, figure out which bit number slot 0 in user-land maps
267 	 * to in the kernel rnat.  Do this by figuring out how many
268 	 * register slots we're beyond the user's backingstore and
269 	 * then computing the equivalent address in kernel space.
270 	 */
271 	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
272 	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
273 	shift = ia64_rse_slot_num(slot0_kaddr);
274 	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
275 	rnat0_kaddr = rnat1_kaddr - 64;
276 
277 	if (ubspstore + 63 > urnat_addr) {
278 		/* some bits need to be merged in from pt->ar_rnat */
279 		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
280 		urnat = (pt->ar_rnat & umask);
281 		mask &= ~umask;
282 		if (!mask)
283 			return urnat;
284 	}
285 
286 	m = mask << shift;
287 	if (rnat0_kaddr >= kbsp)
288 		rnat0 = sw->ar_rnat;
289 	else if (rnat0_kaddr > krbs)
290 		rnat0 = *rnat0_kaddr;
291 	urnat |= (rnat0 & m) >> shift;
292 
293 	m = mask >> (63 - shift);
294 	if (rnat1_kaddr >= kbsp)
295 		rnat1 = sw->ar_rnat;
296 	else if (rnat1_kaddr > krbs)
297 		rnat1 = *rnat1_kaddr;
298 	urnat |= (rnat1 & m) << (63 - shift);
299 	return urnat;
300 }
301 
302 /*
303  * The reverse of get_rnat.
304  */
305 static void
306 put_rnat (struct task_struct *task, struct switch_stack *sw,
307 	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
308 	  unsigned long *urbs_end)
309 {
310 	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
311 	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
312 	long num_regs, nbits;
313 	struct pt_regs *pt;
314 	unsigned long cfm, *urbs_kargs;
315 
316 	pt = task_pt_regs(task);
317 	kbsp = (unsigned long *) sw->ar_bspstore;
318 	ubspstore = (unsigned long *) pt->ar_bspstore;
319 
320 	urbs_kargs = urbs_end;
321 	if (in_syscall(pt)) {
322 		/*
323 		 * If entered via syscall, don't allow user to set rnat bits
324 		 * for syscall args.
325 		 */
326 		cfm = pt->cr_ifs;
327 		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
328 	}
329 
330 	if (urbs_kargs >= urnat_addr)
331 		nbits = 63;
332 	else {
333 		if ((urnat_addr - 63) >= urbs_kargs)
334 			return;
335 		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
336 	}
337 	mask = MASK(nbits);
338 
339 	/*
340 	 * First, figure out which bit number slot 0 in user-land maps
341 	 * to in the kernel rnat.  Do this by figuring out how many
342 	 * register slots we're beyond the user's backingstore and
343 	 * then computing the equivalent address in kernel space.
344 	 */
345 	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
346 	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
347 	shift = ia64_rse_slot_num(slot0_kaddr);
348 	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
349 	rnat0_kaddr = rnat1_kaddr - 64;
350 
351 	if (ubspstore + 63 > urnat_addr) {
352 		/* some bits need to be place in pt->ar_rnat: */
353 		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
354 		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
355 		mask &= ~umask;
356 		if (!mask)
357 			return;
358 	}
359 	/*
360 	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
361 	 * rnat slot is ignored. so we don't have to clear it here.
362 	 */
363 	rnat0 = (urnat << shift);
364 	m = mask << shift;
365 	if (rnat0_kaddr >= kbsp)
366 		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
367 	else if (rnat0_kaddr > krbs)
368 		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
369 
370 	rnat1 = (urnat >> (63 - shift));
371 	m = mask >> (63 - shift);
372 	if (rnat1_kaddr >= kbsp)
373 		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
374 	else if (rnat1_kaddr > krbs)
375 		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
376 }
377 
378 static inline int
379 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
380 	       unsigned long urbs_end)
381 {
382 	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
383 						      urbs_end);
384 	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
385 }
386 
387 /*
388  * Read a word from the user-level backing store of task CHILD.  ADDR
389  * is the user-level address to read the word from, VAL a pointer to
390  * the return value, and USER_BSP gives the end of the user-level
391  * backing store (i.e., it's the address that would be in ar.bsp after
392  * the user executed a "cover" instruction).
393  *
394  * This routine takes care of accessing the kernel register backing
395  * store for those registers that got spilled there.  It also takes
396  * care of calculating the appropriate RNaT collection words.
397  */
398 long
399 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
400 	   unsigned long user_rbs_end, unsigned long addr, long *val)
401 {
402 	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
403 	struct pt_regs *child_regs;
404 	size_t copied;
405 	long ret;
406 
407 	urbs_end = (long *) user_rbs_end;
408 	laddr = (unsigned long *) addr;
409 	child_regs = task_pt_regs(child);
410 	bspstore = (unsigned long *) child_regs->ar_bspstore;
411 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
412 	if (on_kernel_rbs(addr, (unsigned long) bspstore,
413 			  (unsigned long) urbs_end))
414 	{
415 		/*
416 		 * Attempt to read the RBS in an area that's actually
417 		 * on the kernel RBS => read the corresponding bits in
418 		 * the kernel RBS.
419 		 */
420 		rnat_addr = ia64_rse_rnat_addr(laddr);
421 		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
422 
423 		if (laddr == rnat_addr) {
424 			/* return NaT collection word itself */
425 			*val = ret;
426 			return 0;
427 		}
428 
429 		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
430 			/*
431 			 * It is implementation dependent whether the
432 			 * data portion of a NaT value gets saved on a
433 			 * st8.spill or RSE spill (e.g., see EAS 2.6,
434 			 * 4.4.4.6 Register Spill and Fill).  To get
435 			 * consistent behavior across all possible
436 			 * IA-64 implementations, we return zero in
437 			 * this case.
438 			 */
439 			*val = 0;
440 			return 0;
441 		}
442 
443 		if (laddr < urbs_end) {
444 			/*
445 			 * The desired word is on the kernel RBS and
446 			 * is not a NaT.
447 			 */
448 			regnum = ia64_rse_num_regs(bspstore, laddr);
449 			*val = *ia64_rse_skip_regs(krbs, regnum);
450 			return 0;
451 		}
452 	}
453 	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
454 	if (copied != sizeof(ret))
455 		return -EIO;
456 	*val = ret;
457 	return 0;
458 }
459 
460 long
461 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
462 	   unsigned long user_rbs_end, unsigned long addr, long val)
463 {
464 	unsigned long *bspstore, *krbs, regnum, *laddr;
465 	unsigned long *urbs_end = (long *) user_rbs_end;
466 	struct pt_regs *child_regs;
467 
468 	laddr = (unsigned long *) addr;
469 	child_regs = task_pt_regs(child);
470 	bspstore = (unsigned long *) child_regs->ar_bspstore;
471 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
472 	if (on_kernel_rbs(addr, (unsigned long) bspstore,
473 			  (unsigned long) urbs_end))
474 	{
475 		/*
476 		 * Attempt to write the RBS in an area that's actually
477 		 * on the kernel RBS => write the corresponding bits
478 		 * in the kernel RBS.
479 		 */
480 		if (ia64_rse_is_rnat_slot(laddr))
481 			put_rnat(child, child_stack, krbs, laddr, val,
482 				 urbs_end);
483 		else {
484 			if (laddr < urbs_end) {
485 				regnum = ia64_rse_num_regs(bspstore, laddr);
486 				*ia64_rse_skip_regs(krbs, regnum) = val;
487 			}
488 		}
489 	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
490 		   != sizeof(val))
491 		return -EIO;
492 	return 0;
493 }
494 
495 /*
496  * Calculate the address of the end of the user-level register backing
497  * store.  This is the address that would have been stored in ar.bsp
498  * if the user had executed a "cover" instruction right before
499  * entering the kernel.  If CFMP is not NULL, it is used to return the
500  * "current frame mask" that was active at the time the kernel was
501  * entered.
502  */
503 unsigned long
504 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
505 		       unsigned long *cfmp)
506 {
507 	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
508 	long ndirty;
509 
510 	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
511 	bspstore = (unsigned long *) pt->ar_bspstore;
512 	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
513 
514 	if (in_syscall(pt))
515 		ndirty += (cfm & 0x7f);
516 	else
517 		cfm &= ~(1UL << 63);	/* clear valid bit */
518 
519 	if (cfmp)
520 		*cfmp = cfm;
521 	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
522 }
523 
524 /*
525  * Synchronize (i.e, write) the RSE backing store living in kernel
526  * space to the VM of the CHILD task.  SW and PT are the pointers to
527  * the switch_stack and pt_regs structures, respectively.
528  * USER_RBS_END is the user-level address at which the backing store
529  * ends.
530  */
531 long
532 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
533 		    unsigned long user_rbs_start, unsigned long user_rbs_end)
534 {
535 	unsigned long addr, val;
536 	long ret;
537 
538 	/* now copy word for word from kernel rbs to user rbs: */
539 	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
540 		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
541 		if (ret < 0)
542 			return ret;
543 		if (access_process_vm(child, addr, &val, sizeof(val), 1)
544 		    != sizeof(val))
545 			return -EIO;
546 	}
547 	return 0;
548 }
549 
550 static inline int
551 thread_matches (struct task_struct *thread, unsigned long addr)
552 {
553 	unsigned long thread_rbs_end;
554 	struct pt_regs *thread_regs;
555 
556 	if (ptrace_check_attach(thread, 0) < 0)
557 		/*
558 		 * If the thread is not in an attachable state, we'll
559 		 * ignore it.  The net effect is that if ADDR happens
560 		 * to overlap with the portion of the thread's
561 		 * register backing store that is currently residing
562 		 * on the thread's kernel stack, then ptrace() may end
563 		 * up accessing a stale value.  But if the thread
564 		 * isn't stopped, that's a problem anyhow, so we're
565 		 * doing as well as we can...
566 		 */
567 		return 0;
568 
569 	thread_regs = task_pt_regs(thread);
570 	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
571 	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
572 		return 0;
573 
574 	return 1;	/* looks like we've got a winner */
575 }
576 
577 /*
578  * GDB apparently wants to be able to read the register-backing store
579  * of any thread when attached to a given process.  If we are peeking
580  * or poking an address that happens to reside in the kernel-backing
581  * store of another thread, we need to attach to that thread, because
582  * otherwise we end up accessing stale data.
583  *
584  * task_list_lock must be read-locked before calling this routine!
585  */
586 static struct task_struct *
587 find_thread_for_addr (struct task_struct *child, unsigned long addr)
588 {
589 	struct task_struct *p;
590 	struct mm_struct *mm;
591 	struct list_head *this, *next;
592 	int mm_users;
593 
594 	if (!(mm = get_task_mm(child)))
595 		return child;
596 
597 	/* -1 because of our get_task_mm(): */
598 	mm_users = atomic_read(&mm->mm_users) - 1;
599 	if (mm_users <= 1)
600 		goto out;		/* not multi-threaded */
601 
602 	/*
603 	 * Traverse the current process' children list.  Every task that
604 	 * one attaches to becomes a child.  And it is only attached children
605 	 * of the debugger that are of interest (ptrace_check_attach checks
606 	 * for this).
607 	 */
608  	list_for_each_safe(this, next, &current->children) {
609 		p = list_entry(this, struct task_struct, sibling);
610 		if (p->tgid != child->tgid)
611 			continue;
612 		if (thread_matches(p, addr)) {
613 			child = p;
614 			goto out;
615 		}
616 	}
617 
618   out:
619 	mmput(mm);
620 	return child;
621 }
622 
623 /*
624  * Write f32-f127 back to task->thread.fph if it has been modified.
625  */
626 inline void
627 ia64_flush_fph (struct task_struct *task)
628 {
629 	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
630 
631 	/*
632 	 * Prevent migrating this task while
633 	 * we're fiddling with the FPU state
634 	 */
635 	preempt_disable();
636 	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
637 		psr->mfh = 0;
638 		task->thread.flags |= IA64_THREAD_FPH_VALID;
639 		ia64_save_fpu(&task->thread.fph[0]);
640 	}
641 	preempt_enable();
642 }
643 
644 /*
645  * Sync the fph state of the task so that it can be manipulated
646  * through thread.fph.  If necessary, f32-f127 are written back to
647  * thread.fph or, if the fph state hasn't been used before, thread.fph
648  * is cleared to zeroes.  Also, access to f32-f127 is disabled to
649  * ensure that the task picks up the state from thread.fph when it
650  * executes again.
651  */
652 void
653 ia64_sync_fph (struct task_struct *task)
654 {
655 	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
656 
657 	ia64_flush_fph(task);
658 	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
659 		task->thread.flags |= IA64_THREAD_FPH_VALID;
660 		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
661 	}
662 	ia64_drop_fpu(task);
663 	psr->dfh = 1;
664 }
665 
666 static int
667 access_fr (struct unw_frame_info *info, int regnum, int hi,
668 	   unsigned long *data, int write_access)
669 {
670 	struct ia64_fpreg fpval;
671 	int ret;
672 
673 	ret = unw_get_fr(info, regnum, &fpval);
674 	if (ret < 0)
675 		return ret;
676 
677 	if (write_access) {
678 		fpval.u.bits[hi] = *data;
679 		ret = unw_set_fr(info, regnum, fpval);
680 	} else
681 		*data = fpval.u.bits[hi];
682 	return ret;
683 }
684 
685 /*
686  * Change the machine-state of CHILD such that it will return via the normal
687  * kernel exit-path, rather than the syscall-exit path.
688  */
689 static void
690 convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
691 			unsigned long cfm)
692 {
693 	struct unw_frame_info info, prev_info;
694 	unsigned long ip, sp, pr;
695 
696 	unw_init_from_blocked_task(&info, child);
697 	while (1) {
698 		prev_info = info;
699 		if (unw_unwind(&info) < 0)
700 			return;
701 
702 		unw_get_sp(&info, &sp);
703 		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
704 		    < IA64_PT_REGS_SIZE) {
705 			dprintk("ptrace.%s: ran off the top of the kernel "
706 				"stack\n", __FUNCTION__);
707 			return;
708 		}
709 		if (unw_get_pr (&prev_info, &pr) < 0) {
710 			unw_get_rp(&prev_info, &ip);
711 			dprintk("ptrace.%s: failed to read "
712 				"predicate register (ip=0x%lx)\n",
713 				__FUNCTION__, ip);
714 			return;
715 		}
716 		if (unw_is_intr_frame(&info)
717 		    && (pr & (1UL << PRED_USER_STACK)))
718 			break;
719 	}
720 
721 	/*
722 	 * Note: at the time of this call, the target task is blocked
723 	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
724 	 * (aka, "pLvSys") we redirect execution from
725 	 * .work_pending_syscall_end to .work_processed_kernel.
726 	 */
727 	unw_get_pr(&prev_info, &pr);
728 	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
729 	pr |=  (1UL << PRED_NON_SYSCALL);
730 	unw_set_pr(&prev_info, pr);
731 
732 	pt->cr_ifs = (1UL << 63) | cfm;
733 	/*
734 	 * Clear the memory that is NOT written on syscall-entry to
735 	 * ensure we do not leak kernel-state to user when execution
736 	 * resumes.
737 	 */
738 	pt->r2 = 0;
739 	pt->r3 = 0;
740 	pt->r14 = 0;
741 	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
742 	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
743 	pt->b7 = 0;
744 	pt->ar_ccv = 0;
745 	pt->ar_csd = 0;
746 	pt->ar_ssd = 0;
747 }
748 
749 static int
750 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
751 		 struct unw_frame_info *info,
752 		 unsigned long *data, int write_access)
753 {
754 	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
755 	char nat = 0;
756 
757 	if (write_access) {
758 		nat_bits = *data;
759 		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
760 		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
761 			dprintk("ptrace: failed to set ar.unat\n");
762 			return -1;
763 		}
764 		for (regnum = 4; regnum <= 7; ++regnum) {
765 			unw_get_gr(info, regnum, &dummy, &nat);
766 			unw_set_gr(info, regnum, dummy,
767 				   (nat_bits >> regnum) & 1);
768 		}
769 	} else {
770 		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
771 			dprintk("ptrace: failed to read ar.unat\n");
772 			return -1;
773 		}
774 		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
775 		for (regnum = 4; regnum <= 7; ++regnum) {
776 			unw_get_gr(info, regnum, &dummy, &nat);
777 			nat_bits |= (nat != 0) << regnum;
778 		}
779 		*data = nat_bits;
780 	}
781 	return 0;
782 }
783 
784 static int
785 access_uarea (struct task_struct *child, unsigned long addr,
786 	      unsigned long *data, int write_access)
787 {
788 	unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
789 	struct switch_stack *sw;
790 	struct pt_regs *pt;
791 #	define pt_reg_addr(pt, reg)	((void *)			    \
792 					 ((unsigned long) (pt)		    \
793 					  + offsetof(struct pt_regs, reg)))
794 
795 
796 	pt = task_pt_regs(child);
797 	sw = (struct switch_stack *) (child->thread.ksp + 16);
798 
799 	if ((addr & 0x7) != 0) {
800 		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
801 		return -1;
802 	}
803 
804 	if (addr < PT_F127 + 16) {
805 		/* accessing fph */
806 		if (write_access)
807 			ia64_sync_fph(child);
808 		else
809 			ia64_flush_fph(child);
810 		ptr = (unsigned long *)
811 			((unsigned long) &child->thread.fph + addr);
812 	} else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
813 		/* scratch registers untouched by kernel (saved in pt_regs) */
814 		ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
815 	} else if (addr >= PT_F12 && addr < PT_F15 + 16) {
816 		/*
817 		 * Scratch registers untouched by kernel (saved in
818 		 * switch_stack).
819 		 */
820 		ptr = (unsigned long *) ((long) sw
821 					 + (addr - PT_NAT_BITS - 32));
822 	} else if (addr < PT_AR_LC + 8) {
823 		/* preserved state: */
824 		struct unw_frame_info info;
825 		char nat = 0;
826 		int ret;
827 
828 		unw_init_from_blocked_task(&info, child);
829 		if (unw_unwind_to_user(&info) < 0)
830 			return -1;
831 
832 		switch (addr) {
833 		      case PT_NAT_BITS:
834 			return access_nat_bits(child, pt, &info,
835 					       data, write_access);
836 
837 		      case PT_R4: case PT_R5: case PT_R6: case PT_R7:
838 			if (write_access) {
839 				/* read NaT bit first: */
840 				unsigned long dummy;
841 
842 				ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
843 						 &dummy, &nat);
844 				if (ret < 0)
845 					return ret;
846 			}
847 			return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
848 					     &nat, write_access);
849 
850 		      case PT_B1: case PT_B2: case PT_B3:
851 		      case PT_B4: case PT_B5:
852 			return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
853 					     write_access);
854 
855 		      case PT_AR_EC:
856 			return unw_access_ar(&info, UNW_AR_EC, data,
857 					     write_access);
858 
859 		      case PT_AR_LC:
860 			return unw_access_ar(&info, UNW_AR_LC, data,
861 					     write_access);
862 
863 		      default:
864 			if (addr >= PT_F2 && addr < PT_F5 + 16)
865 				return access_fr(&info, (addr - PT_F2)/16 + 2,
866 						 (addr & 8) != 0, data,
867 						 write_access);
868 			else if (addr >= PT_F16 && addr < PT_F31 + 16)
869 				return access_fr(&info,
870 						 (addr - PT_F16)/16 + 16,
871 						 (addr & 8) != 0,
872 						 data, write_access);
873 			else {
874 				dprintk("ptrace: rejecting access to register "
875 					"address 0x%lx\n", addr);
876 				return -1;
877 			}
878 		}
879 	} else if (addr < PT_F9+16) {
880 		/* scratch state */
881 		switch (addr) {
882 		      case PT_AR_BSP:
883 			/*
884 			 * By convention, we use PT_AR_BSP to refer to
885 			 * the end of the user-level backing store.
886 			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
887 			 * to get the real value of ar.bsp at the time
888 			 * the kernel was entered.
889 			 *
890 			 * Furthermore, when changing the contents of
891 			 * PT_AR_BSP (or PT_CFM) we MUST copy any
892 			 * users-level stacked registers that are
893 			 * stored on the kernel stack back to
894 			 * user-space because otherwise, we might end
895 			 * up clobbering kernel stacked registers.
896 			 * Also, if this happens while the task is
897 			 * blocked in a system call, which convert the
898 			 * state such that the non-system-call exit
899 			 * path is used.  This ensures that the proper
900 			 * state will be picked up when resuming
901 			 * execution.  However, it *also* means that
902 			 * once we write PT_AR_BSP/PT_CFM, it won't be
903 			 * possible to modify the syscall arguments of
904 			 * the pending system call any longer.  This
905 			 * shouldn't be an issue because modifying
906 			 * PT_AR_BSP/PT_CFM generally implies that
907 			 * we're either abandoning the pending system
908 			 * call or that we defer it's re-execution
909 			 * (e.g., due to GDB doing an inferior
910 			 * function call).
911 			 */
912 			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
913 			if (write_access) {
914 				if (*data != urbs_end) {
915 					if (ia64_sync_user_rbs(child, sw,
916 							       pt->ar_bspstore,
917 							       urbs_end) < 0)
918 						return -1;
919 					if (in_syscall(pt))
920 						convert_to_non_syscall(child,
921 								       pt,
922 								       cfm);
923 					/*
924 					 * Simulate user-level write
925 					 * of ar.bsp:
926 					 */
927 					pt->loadrs = 0;
928 					pt->ar_bspstore = *data;
929 				}
930 			} else
931 				*data = urbs_end;
932 			return 0;
933 
934 		      case PT_CFM:
935 			urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
936 			if (write_access) {
937 				if (((cfm ^ *data) & PFM_MASK) != 0) {
938 					if (ia64_sync_user_rbs(child, sw,
939 							       pt->ar_bspstore,
940 							       urbs_end) < 0)
941 						return -1;
942 					if (in_syscall(pt))
943 						convert_to_non_syscall(child,
944 								       pt,
945 								       cfm);
946 					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
947 						      | (*data & PFM_MASK));
948 				}
949 			} else
950 				*data = cfm;
951 			return 0;
952 
953 		      case PT_CR_IPSR:
954 			if (write_access)
955 				pt->cr_ipsr = ((*data & IPSR_MASK)
956 					       | (pt->cr_ipsr & ~IPSR_MASK));
957 			else
958 				*data = (pt->cr_ipsr & IPSR_MASK);
959 			return 0;
960 
961 		      case PT_AR_RSC:
962 			if (write_access)
963 				pt->ar_rsc = *data | (3 << 2); /* force PL3 */
964 			else
965 				*data = pt->ar_rsc;
966 			return 0;
967 
968 		      case PT_AR_RNAT:
969 			urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
970 			rnat_addr = (long) ia64_rse_rnat_addr((long *)
971 							      urbs_end);
972 			if (write_access)
973 				return ia64_poke(child, sw, urbs_end,
974 						 rnat_addr, *data);
975 			else
976 				return ia64_peek(child, sw, urbs_end,
977 						 rnat_addr, data);
978 
979 		      case PT_R1:
980 			ptr = pt_reg_addr(pt, r1);
981 			break;
982 		      case PT_R2:  case PT_R3:
983 			ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
984 			break;
985 		      case PT_R8:  case PT_R9:  case PT_R10: case PT_R11:
986 			ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
987 			break;
988 		      case PT_R12: case PT_R13:
989 			ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
990 			break;
991 		      case PT_R14:
992 			ptr = pt_reg_addr(pt, r14);
993 			break;
994 		      case PT_R15:
995 			ptr = pt_reg_addr(pt, r15);
996 			break;
997 		      case PT_R16: case PT_R17: case PT_R18: case PT_R19:
998 		      case PT_R20: case PT_R21: case PT_R22: case PT_R23:
999 		      case PT_R24: case PT_R25: case PT_R26: case PT_R27:
1000 		      case PT_R28: case PT_R29: case PT_R30: case PT_R31:
1001 			ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
1002 			break;
1003 		      case PT_B0:
1004 			ptr = pt_reg_addr(pt, b0);
1005 			break;
1006 		      case PT_B6:
1007 			ptr = pt_reg_addr(pt, b6);
1008 			break;
1009 		      case PT_B7:
1010 			ptr = pt_reg_addr(pt, b7);
1011 			break;
1012 		      case PT_F6:  case PT_F6+8: case PT_F7: case PT_F7+8:
1013 		      case PT_F8:  case PT_F8+8: case PT_F9: case PT_F9+8:
1014 			ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
1015 			break;
1016 		      case PT_AR_BSPSTORE:
1017 			ptr = pt_reg_addr(pt, ar_bspstore);
1018 			break;
1019 		      case PT_AR_UNAT:
1020 			ptr = pt_reg_addr(pt, ar_unat);
1021 			break;
1022 		      case PT_AR_PFS:
1023 			ptr = pt_reg_addr(pt, ar_pfs);
1024 			break;
1025 		      case PT_AR_CCV:
1026 			ptr = pt_reg_addr(pt, ar_ccv);
1027 			break;
1028 		      case PT_AR_FPSR:
1029 			ptr = pt_reg_addr(pt, ar_fpsr);
1030 			break;
1031 		      case PT_CR_IIP:
1032 			ptr = pt_reg_addr(pt, cr_iip);
1033 			break;
1034 		      case PT_PR:
1035 			ptr = pt_reg_addr(pt, pr);
1036 			break;
1037 			/* scratch register */
1038 
1039 		      default:
1040 			/* disallow accessing anything else... */
1041 			dprintk("ptrace: rejecting access to register "
1042 				"address 0x%lx\n", addr);
1043 			return -1;
1044 		}
1045 	} else if (addr <= PT_AR_SSD) {
1046 		ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
1047 	} else {
1048 		/* access debug registers */
1049 
1050 		if (addr >= PT_IBR) {
1051 			regnum = (addr - PT_IBR) >> 3;
1052 			ptr = &child->thread.ibr[0];
1053 		} else {
1054 			regnum = (addr - PT_DBR) >> 3;
1055 			ptr = &child->thread.dbr[0];
1056 		}
1057 
1058 		if (regnum >= 8) {
1059 			dprintk("ptrace: rejecting access to register "
1060 				"address 0x%lx\n", addr);
1061 			return -1;
1062 		}
1063 #ifdef CONFIG_PERFMON
1064 		/*
1065 		 * Check if debug registers are used by perfmon. This
1066 		 * test must be done once we know that we can do the
1067 		 * operation, i.e. the arguments are all valid, but
1068 		 * before we start modifying the state.
1069 		 *
1070 		 * Perfmon needs to keep a count of how many processes
1071 		 * are trying to modify the debug registers for system
1072 		 * wide monitoring sessions.
1073 		 *
1074 		 * We also include read access here, because they may
1075 		 * cause the PMU-installed debug register state
1076 		 * (dbr[], ibr[]) to be reset. The two arrays are also
1077 		 * used by perfmon, but we do not use
1078 		 * IA64_THREAD_DBG_VALID. The registers are restored
1079 		 * by the PMU context switch code.
1080 		 */
1081 		if (pfm_use_debug_registers(child)) return -1;
1082 #endif
1083 
1084 		if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
1085 			child->thread.flags |= IA64_THREAD_DBG_VALID;
1086 			memset(child->thread.dbr, 0,
1087 			       sizeof(child->thread.dbr));
1088 			memset(child->thread.ibr, 0,
1089 			       sizeof(child->thread.ibr));
1090 		}
1091 
1092 		ptr += regnum;
1093 
1094 		if ((regnum & 1) && write_access) {
1095 			/* don't let the user set kernel-level breakpoints: */
1096 			*ptr = *data & ~(7UL << 56);
1097 			return 0;
1098 		}
1099 	}
1100 	if (write_access)
1101 		*ptr = *data;
1102 	else
1103 		*data = *ptr;
1104 	return 0;
1105 }
1106 
1107 static long
1108 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1109 {
1110 	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
1111 	struct unw_frame_info info;
1112 	struct ia64_fpreg fpval;
1113 	struct switch_stack *sw;
1114 	struct pt_regs *pt;
1115 	long ret, retval = 0;
1116 	char nat = 0;
1117 	int i;
1118 
1119 	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1120 		return -EIO;
1121 
1122 	pt = task_pt_regs(child);
1123 	sw = (struct switch_stack *) (child->thread.ksp + 16);
1124 	unw_init_from_blocked_task(&info, child);
1125 	if (unw_unwind_to_user(&info) < 0) {
1126 		return -EIO;
1127 	}
1128 
1129 	if (((unsigned long) ppr & 0x7) != 0) {
1130 		dprintk("ptrace:unaligned register address %p\n", ppr);
1131 		return -EIO;
1132 	}
1133 
1134 	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
1135 	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
1136 	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
1137 	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
1138 	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
1139 	    || access_uarea(child, PT_CFM, &cfm, 0)
1140 	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
1141 		return -EIO;
1142 
1143 	/* control regs */
1144 
1145 	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
1146 	retval |= __put_user(psr, &ppr->cr_ipsr);
1147 
1148 	/* app regs */
1149 
1150 	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1151 	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1152 	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1153 	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1154 	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1155 	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1156 
1157 	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
1158 	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
1159 	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1160 	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
1161 	retval |= __put_user(cfm, &ppr->cfm);
1162 
1163 	/* gr1-gr3 */
1164 
1165 	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
1166 	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
1167 
1168 	/* gr4-gr7 */
1169 
1170 	for (i = 4; i < 8; i++) {
1171 		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
1172 			return -EIO;
1173 		retval |= __put_user(val, &ppr->gr[i]);
1174 	}
1175 
1176 	/* gr8-gr11 */
1177 
1178 	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
1179 
1180 	/* gr12-gr15 */
1181 
1182 	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
1183 	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
1184 	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
1185 
1186 	/* gr16-gr31 */
1187 
1188 	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
1189 
1190 	/* b0 */
1191 
1192 	retval |= __put_user(pt->b0, &ppr->br[0]);
1193 
1194 	/* b1-b5 */
1195 
1196 	for (i = 1; i < 6; i++) {
1197 		if (unw_access_br(&info, i, &val, 0) < 0)
1198 			return -EIO;
1199 		__put_user(val, &ppr->br[i]);
1200 	}
1201 
1202 	/* b6-b7 */
1203 
1204 	retval |= __put_user(pt->b6, &ppr->br[6]);
1205 	retval |= __put_user(pt->b7, &ppr->br[7]);
1206 
1207 	/* fr2-fr5 */
1208 
1209 	for (i = 2; i < 6; i++) {
1210 		if (unw_get_fr(&info, i, &fpval) < 0)
1211 			return -EIO;
1212 		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1213 	}
1214 
1215 	/* fr6-fr11 */
1216 
1217 	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
1218 				 sizeof(struct ia64_fpreg) * 6);
1219 
1220 	/* fp scratch regs(12-15) */
1221 
1222 	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
1223 				 sizeof(struct ia64_fpreg) * 4);
1224 
1225 	/* fr16-fr31 */
1226 
1227 	for (i = 16; i < 32; i++) {
1228 		if (unw_get_fr(&info, i, &fpval) < 0)
1229 			return -EIO;
1230 		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1231 	}
1232 
1233 	/* fph */
1234 
1235 	ia64_flush_fph(child);
1236 	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
1237 				 sizeof(ppr->fr[32]) * 96);
1238 
1239 	/*  preds */
1240 
1241 	retval |= __put_user(pt->pr, &ppr->pr);
1242 
1243 	/* nat bits */
1244 
1245 	retval |= __put_user(nat_bits, &ppr->nat);
1246 
1247 	ret = retval ? -EIO : 0;
1248 	return ret;
1249 }
1250 
1251 static long
1252 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1253 {
1254 	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1255 	struct unw_frame_info info;
1256 	struct switch_stack *sw;
1257 	struct ia64_fpreg fpval;
1258 	struct pt_regs *pt;
1259 	long ret, retval = 0;
1260 	int i;
1261 
1262 	memset(&fpval, 0, sizeof(fpval));
1263 
1264 	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1265 		return -EIO;
1266 
1267 	pt = task_pt_regs(child);
1268 	sw = (struct switch_stack *) (child->thread.ksp + 16);
1269 	unw_init_from_blocked_task(&info, child);
1270 	if (unw_unwind_to_user(&info) < 0) {
1271 		return -EIO;
1272 	}
1273 
1274 	if (((unsigned long) ppr & 0x7) != 0) {
1275 		dprintk("ptrace:unaligned register address %p\n", ppr);
1276 		return -EIO;
1277 	}
1278 
1279 	/* control regs */
1280 
1281 	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1282 	retval |= __get_user(psr, &ppr->cr_ipsr);
1283 
1284 	/* app regs */
1285 
1286 	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1287 	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1288 	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1289 	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1290 	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1291 	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1292 
1293 	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1294 	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1295 	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1296 	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1297 	retval |= __get_user(cfm, &ppr->cfm);
1298 
1299 	/* gr1-gr3 */
1300 
1301 	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1302 	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1303 
1304 	/* gr4-gr7 */
1305 
1306 	for (i = 4; i < 8; i++) {
1307 		retval |= __get_user(val, &ppr->gr[i]);
1308 		/* NaT bit will be set via PT_NAT_BITS: */
1309 		if (unw_set_gr(&info, i, val, 0) < 0)
1310 			return -EIO;
1311 	}
1312 
1313 	/* gr8-gr11 */
1314 
1315 	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1316 
1317 	/* gr12-gr15 */
1318 
1319 	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1320 	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1321 	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1322 
1323 	/* gr16-gr31 */
1324 
1325 	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1326 
1327 	/* b0 */
1328 
1329 	retval |= __get_user(pt->b0, &ppr->br[0]);
1330 
1331 	/* b1-b5 */
1332 
1333 	for (i = 1; i < 6; i++) {
1334 		retval |= __get_user(val, &ppr->br[i]);
1335 		unw_set_br(&info, i, val);
1336 	}
1337 
1338 	/* b6-b7 */
1339 
1340 	retval |= __get_user(pt->b6, &ppr->br[6]);
1341 	retval |= __get_user(pt->b7, &ppr->br[7]);
1342 
1343 	/* fr2-fr5 */
1344 
1345 	for (i = 2; i < 6; i++) {
1346 		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1347 		if (unw_set_fr(&info, i, fpval) < 0)
1348 			return -EIO;
1349 	}
1350 
1351 	/* fr6-fr11 */
1352 
1353 	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1354 				   sizeof(ppr->fr[6]) * 6);
1355 
1356 	/* fp scratch regs(12-15) */
1357 
1358 	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1359 				   sizeof(ppr->fr[12]) * 4);
1360 
1361 	/* fr16-fr31 */
1362 
1363 	for (i = 16; i < 32; i++) {
1364 		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1365 					   sizeof(fpval));
1366 		if (unw_set_fr(&info, i, fpval) < 0)
1367 			return -EIO;
1368 	}
1369 
1370 	/* fph */
1371 
1372 	ia64_sync_fph(child);
1373 	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1374 				   sizeof(ppr->fr[32]) * 96);
1375 
1376 	/* preds */
1377 
1378 	retval |= __get_user(pt->pr, &ppr->pr);
1379 
1380 	/* nat bits */
1381 
1382 	retval |= __get_user(nat_bits, &ppr->nat);
1383 
1384 	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1385 	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1386 	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1387 	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1388 	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1389 	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1390 	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1391 	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1392 
1393 	ret = retval ? -EIO : 0;
1394 	return ret;
1395 }
1396 
1397 /*
1398  * Called by kernel/ptrace.c when detaching..
1399  *
1400  * Make sure the single step bit is not set.
1401  */
1402 void
1403 ptrace_disable (struct task_struct *child)
1404 {
1405 	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 
1407 	/* make sure the single step/taken-branch trap bits are not set: */
1408 	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1409 	child_psr->ss = 0;
1410 	child_psr->tb = 0;
1411 }
1412 
1413 asmlinkage long
1414 sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1415 {
1416 	struct pt_regs *pt;
1417 	unsigned long urbs_end, peek_or_poke;
1418 	struct task_struct *child;
1419 	struct switch_stack *sw;
1420 	long ret;
1421 
1422 	lock_kernel();
1423 	ret = -EPERM;
1424 	if (request == PTRACE_TRACEME) {
1425 		ret = ptrace_traceme();
1426 		goto out;
1427 	}
1428 
1429 	peek_or_poke = (request == PTRACE_PEEKTEXT
1430 			|| request == PTRACE_PEEKDATA
1431 			|| request == PTRACE_POKETEXT
1432 			|| request == PTRACE_POKEDATA);
1433 	ret = -ESRCH;
1434 	read_lock(&tasklist_lock);
1435 	{
1436 		child = find_task_by_pid(pid);
1437 		if (child) {
1438 			if (peek_or_poke)
1439 				child = find_thread_for_addr(child, addr);
1440 			get_task_struct(child);
1441 		}
1442 	}
1443 	read_unlock(&tasklist_lock);
1444 	if (!child)
1445 		goto out;
1446 	ret = -EPERM;
1447 	if (pid == 1)		/* no messing around with init! */
1448 		goto out_tsk;
1449 
1450 	if (request == PTRACE_ATTACH) {
1451 		ret = ptrace_attach(child);
1452 		goto out_tsk;
1453 	}
1454 
1455 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
1456 	if (ret < 0)
1457 		goto out_tsk;
1458 
1459 	pt = task_pt_regs(child);
1460 	sw = (struct switch_stack *) (child->thread.ksp + 16);
1461 
1462 	switch (request) {
1463 	      case PTRACE_PEEKTEXT:
1464 	      case PTRACE_PEEKDATA:
1465 		/* read word at location addr */
1466 		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1467 		ret = ia64_peek(child, sw, urbs_end, addr, &data);
1468 		if (ret == 0) {
1469 			ret = data;
1470 			/* ensure "ret" is not mistaken as an error code: */
1471 			force_successful_syscall_return();
1472 		}
1473 		goto out_tsk;
1474 
1475 	      case PTRACE_POKETEXT:
1476 	      case PTRACE_POKEDATA:
1477 		/* write the word at location addr */
1478 		urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1479 		ret = ia64_poke(child, sw, urbs_end, addr, data);
1480 		goto out_tsk;
1481 
1482 	      case PTRACE_PEEKUSR:
1483 		/* read the word at addr in the USER area */
1484 		if (access_uarea(child, addr, &data, 0) < 0) {
1485 			ret = -EIO;
1486 			goto out_tsk;
1487 		}
1488 		ret = data;
1489 		/* ensure "ret" is not mistaken as an error code */
1490 		force_successful_syscall_return();
1491 		goto out_tsk;
1492 
1493 	      case PTRACE_POKEUSR:
1494 		/* write the word at addr in the USER area */
1495 		if (access_uarea(child, addr, &data, 1) < 0) {
1496 			ret = -EIO;
1497 			goto out_tsk;
1498 		}
1499 		ret = 0;
1500 		goto out_tsk;
1501 
1502 	      case PTRACE_OLD_GETSIGINFO:
1503 		/* for backwards-compatibility */
1504 		ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1505 		goto out_tsk;
1506 
1507 	      case PTRACE_OLD_SETSIGINFO:
1508 		/* for backwards-compatibility */
1509 		ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1510 		goto out_tsk;
1511 
1512 	      case PTRACE_SYSCALL:
1513 		/* continue and stop at next (return from) syscall */
1514 	      case PTRACE_CONT:
1515 		/* restart after signal. */
1516 		ret = -EIO;
1517 		if (!valid_signal(data))
1518 			goto out_tsk;
1519 		if (request == PTRACE_SYSCALL)
1520 			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1521 		else
1522 			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1523 		child->exit_code = data;
1524 
1525 		/*
1526 		 * Make sure the single step/taken-branch trap bits
1527 		 * are not set:
1528 		 */
1529 		clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1530 		ia64_psr(pt)->ss = 0;
1531 		ia64_psr(pt)->tb = 0;
1532 
1533 		wake_up_process(child);
1534 		ret = 0;
1535 		goto out_tsk;
1536 
1537 	      case PTRACE_KILL:
1538 		/*
1539 		 * Make the child exit.  Best I can do is send it a
1540 		 * sigkill.  Perhaps it should be put in the status
1541 		 * that it wants to exit.
1542 		 */
1543 		if (child->exit_state == EXIT_ZOMBIE)
1544 			/* already dead */
1545 			goto out_tsk;
1546 		child->exit_code = SIGKILL;
1547 
1548 		ptrace_disable(child);
1549 		wake_up_process(child);
1550 		ret = 0;
1551 		goto out_tsk;
1552 
1553 	      case PTRACE_SINGLESTEP:
1554 		/* let child execute for one instruction */
1555 	      case PTRACE_SINGLEBLOCK:
1556 		ret = -EIO;
1557 		if (!valid_signal(data))
1558 			goto out_tsk;
1559 
1560 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 		set_tsk_thread_flag(child, TIF_SINGLESTEP);
1562 		if (request == PTRACE_SINGLESTEP) {
1563 			ia64_psr(pt)->ss = 1;
1564 		} else {
1565 			ia64_psr(pt)->tb = 1;
1566 		}
1567 		child->exit_code = data;
1568 
1569 		/* give it a chance to run. */
1570 		wake_up_process(child);
1571 		ret = 0;
1572 		goto out_tsk;
1573 
1574 	      case PTRACE_DETACH:
1575 		/* detach a process that was attached. */
1576 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1577 		ret = ptrace_detach(child, data);
1578 		goto out_tsk;
1579 
1580 	      case PTRACE_GETREGS:
1581 		ret = ptrace_getregs(child,
1582 				     (struct pt_all_user_regs __user *) data);
1583 		goto out_tsk;
1584 
1585 	      case PTRACE_SETREGS:
1586 		ret = ptrace_setregs(child,
1587 				     (struct pt_all_user_regs __user *) data);
1588 		goto out_tsk;
1589 
1590 	      default:
1591 		ret = ptrace_request(child, request, addr, data);
1592 		goto out_tsk;
1593 	}
1594   out_tsk:
1595 	put_task_struct(child);
1596   out:
1597 	unlock_kernel();
1598 	return ret;
1599 }
1600 
1601 
1602 static void
1603 syscall_trace (void)
1604 {
1605 	/*
1606 	 * The 0x80 provides a way for the tracing parent to
1607 	 * distinguish between a syscall stop and SIGTRAP delivery.
1608 	 */
1609 	ptrace_notify(SIGTRAP
1610 		      | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1611 
1612 	/*
1613 	 * This isn't the same as continuing with a signal, but it
1614 	 * will do for normal use.  strace only continues with a
1615 	 * signal if the stopping signal is not SIGTRAP.  -brl
1616 	 */
1617 	if (current->exit_code) {
1618 		send_sig(current->exit_code, current, 1);
1619 		current->exit_code = 0;
1620 	}
1621 }
1622 
1623 /* "asmlinkage" so the input arguments are preserved... */
1624 
1625 asmlinkage void
1626 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1627 		     long arg4, long arg5, long arg6, long arg7,
1628 		     struct pt_regs regs)
1629 {
1630 	if (test_thread_flag(TIF_SYSCALL_TRACE)
1631 	    && (current->ptrace & PT_PTRACED))
1632 		syscall_trace();
1633 
1634 	if (unlikely(current->audit_context)) {
1635 		long syscall;
1636 		int arch;
1637 
1638 		if (IS_IA32_PROCESS(&regs)) {
1639 			syscall = regs.r1;
1640 			arch = AUDIT_ARCH_I386;
1641 		} else {
1642 			syscall = regs.r15;
1643 			arch = AUDIT_ARCH_IA64;
1644 		}
1645 
1646 		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1647 	}
1648 
1649 }
1650 
1651 /* "asmlinkage" so the input arguments are preserved... */
1652 
1653 asmlinkage void
1654 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1655 		     long arg4, long arg5, long arg6, long arg7,
1656 		     struct pt_regs regs)
1657 {
1658 	if (unlikely(current->audit_context)) {
1659 		int success = AUDITSC_RESULT(regs.r10);
1660 		long result = regs.r8;
1661 
1662 		if (success != AUDITSC_SUCCESS)
1663 			result = -result;
1664 		audit_syscall_exit(success, result);
1665 	}
1666 
1667 	if ((test_thread_flag(TIF_SYSCALL_TRACE)
1668 	    || test_thread_flag(TIF_SINGLESTEP))
1669 	    && (current->ptrace & PT_PTRACED))
1670 		syscall_trace();
1671 }
1672