xref: /openbmc/linux/arch/ia64/kernel/unwind.c (revision df561f66)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1999-2004 Hewlett-Packard Co
4  *	David Mosberger-Tang <davidm@hpl.hp.com>
5  * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
6  * 	- Change pt_regs_off() to make it less dependent on pt_regs structure.
7  */
8 /*
9  * This file implements call frame unwind support for the Linux
10  * kernel.  Parsing and processing the unwind information is
11  * time-consuming, so this implementation translates the unwind
12  * descriptors into unwind scripts.  These scripts are very simple
13  * (basically a sequence of assignments) and efficient to execute.
14  * They are cached for later re-use.  Each script is specific for a
15  * given instruction pointer address and the set of predicate values
16  * that the script depends on (most unwind descriptors are
17  * unconditional and scripts often do not depend on predicates at
18  * all).  This code is based on the unwind conventions described in
19  * the "IA-64 Software Conventions and Runtime Architecture" manual.
20  *
21  * SMP conventions:
22  *	o updates to the global unwind data (in structure "unw") are serialized
23  *	  by the unw.lock spinlock
24  *	o each unwind script has its own read-write lock; a thread must acquire
25  *	  a read lock before executing a script and must acquire a write lock
26  *	  before modifying a script
27  *	o if both the unw.lock spinlock and a script's read-write lock must be
28  *	  acquired, then the read-write lock must be acquired first.
29  */
30 #include <linux/module.h>
31 #include <linux/memblock.h>
32 #include <linux/elf.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 
37 #include <asm/unwind.h>
38 
39 #include <asm/delay.h>
40 #include <asm/page.h>
41 #include <asm/ptrace.h>
42 #include <asm/ptrace_offsets.h>
43 #include <asm/rse.h>
44 #include <asm/sections.h>
45 #include <linux/uaccess.h>
46 
47 #include "entry.h"
48 #include "unwind_i.h"
49 
50 #define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
51 #define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
52 
53 #define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
54 #define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
55 
56 #define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
57 
58 #ifdef UNW_DEBUG
59   static unsigned int unw_debug_level = UNW_DEBUG;
60 #  define UNW_DEBUG_ON(n)	unw_debug_level >= n
61    /* Do not code a printk level, not all debug lines end in newline */
62 #  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
63 #  undef inline
64 #  define inline
65 #else /* !UNW_DEBUG */
66 #  define UNW_DEBUG_ON(n)  0
67 #  define UNW_DPRINT(n, ...)
68 #endif /* UNW_DEBUG */
69 
70 #if UNW_STATS
71 # define STAT(x...)	x
72 #else
73 # define STAT(x...)
74 #endif
75 
76 #define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
77 #define free_reg_state(usr)	kfree(usr)
78 #define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
79 #define free_labeled_state(usr)	kfree(usr)
80 
81 typedef unsigned long unw_word;
82 typedef unsigned char unw_hash_index_t;
83 
84 static struct {
85 	spinlock_t lock;			/* spinlock for unwind data */
86 
87 	/* list of unwind tables (one per load-module) */
88 	struct unw_table *tables;
89 
90 	unsigned long r0;			/* constant 0 for r0 */
91 
92 	/* table of registers that prologues can save (and order in which they're saved): */
93 	const unsigned char save_order[8];
94 
95 	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
96 	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
97 
98 	unsigned short lru_head;		/* index of lead-recently used script */
99 	unsigned short lru_tail;		/* index of most-recently used script */
100 
101 	/* index into unw_frame_info for preserved register i */
102 	unsigned short preg_index[UNW_NUM_REGS];
103 
104 	short pt_regs_offsets[32];
105 
106 	/* unwind table for the kernel: */
107 	struct unw_table kernel_table;
108 
109 	/* unwind table describing the gate page (kernel code that is mapped into user space): */
110 	size_t gate_table_size;
111 	unsigned long *gate_table;
112 
113 	/* hash table that maps instruction pointer to script index: */
114 	unsigned short hash[UNW_HASH_SIZE];
115 
116 	/* script cache: */
117 	struct unw_script cache[UNW_CACHE_SIZE];
118 
119 # ifdef UNW_DEBUG
120 	const char *preg_name[UNW_NUM_REGS];
121 # endif
122 # if UNW_STATS
123 	struct {
124 		struct {
125 			int lookups;
126 			int hinted_hits;
127 			int normal_hits;
128 			int collision_chain_traversals;
129 		} cache;
130 		struct {
131 			unsigned long build_time;
132 			unsigned long run_time;
133 			unsigned long parse_time;
134 			int builds;
135 			int news;
136 			int collisions;
137 			int runs;
138 		} script;
139 		struct {
140 			unsigned long init_time;
141 			unsigned long unwind_time;
142 			int inits;
143 			int unwinds;
144 		} api;
145 	} stat;
146 # endif
147 } unw = {
148 	.tables = &unw.kernel_table,
149 	.lock = __SPIN_LOCK_UNLOCKED(unw.lock),
150 	.save_order = {
151 		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
152 		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
153 	},
154 	.preg_index = {
155 		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
156 		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
157 		offsetof(struct unw_frame_info, bsp_loc)/8,
158 		offsetof(struct unw_frame_info, bspstore_loc)/8,
159 		offsetof(struct unw_frame_info, pfs_loc)/8,
160 		offsetof(struct unw_frame_info, rnat_loc)/8,
161 		offsetof(struct unw_frame_info, psp)/8,
162 		offsetof(struct unw_frame_info, rp_loc)/8,
163 		offsetof(struct unw_frame_info, r4)/8,
164 		offsetof(struct unw_frame_info, r5)/8,
165 		offsetof(struct unw_frame_info, r6)/8,
166 		offsetof(struct unw_frame_info, r7)/8,
167 		offsetof(struct unw_frame_info, unat_loc)/8,
168 		offsetof(struct unw_frame_info, pr_loc)/8,
169 		offsetof(struct unw_frame_info, lc_loc)/8,
170 		offsetof(struct unw_frame_info, fpsr_loc)/8,
171 		offsetof(struct unw_frame_info, b1_loc)/8,
172 		offsetof(struct unw_frame_info, b2_loc)/8,
173 		offsetof(struct unw_frame_info, b3_loc)/8,
174 		offsetof(struct unw_frame_info, b4_loc)/8,
175 		offsetof(struct unw_frame_info, b5_loc)/8,
176 		offsetof(struct unw_frame_info, f2_loc)/8,
177 		offsetof(struct unw_frame_info, f3_loc)/8,
178 		offsetof(struct unw_frame_info, f4_loc)/8,
179 		offsetof(struct unw_frame_info, f5_loc)/8,
180 		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
181 		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
182 		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
183 		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
184 		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
185 		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
186 		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
187 		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
188 		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
189 		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
190 		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
191 		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
192 		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
193 		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
194 		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
195 		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
196 	},
197 	.pt_regs_offsets = {
198 		[0] = -1,
199 		offsetof(struct pt_regs,  r1),
200 		offsetof(struct pt_regs,  r2),
201 		offsetof(struct pt_regs,  r3),
202 		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
203 		offsetof(struct pt_regs,  r8),
204 		offsetof(struct pt_regs,  r9),
205 		offsetof(struct pt_regs, r10),
206 		offsetof(struct pt_regs, r11),
207 		offsetof(struct pt_regs, r12),
208 		offsetof(struct pt_regs, r13),
209 		offsetof(struct pt_regs, r14),
210 		offsetof(struct pt_regs, r15),
211 		offsetof(struct pt_regs, r16),
212 		offsetof(struct pt_regs, r17),
213 		offsetof(struct pt_regs, r18),
214 		offsetof(struct pt_regs, r19),
215 		offsetof(struct pt_regs, r20),
216 		offsetof(struct pt_regs, r21),
217 		offsetof(struct pt_regs, r22),
218 		offsetof(struct pt_regs, r23),
219 		offsetof(struct pt_regs, r24),
220 		offsetof(struct pt_regs, r25),
221 		offsetof(struct pt_regs, r26),
222 		offsetof(struct pt_regs, r27),
223 		offsetof(struct pt_regs, r28),
224 		offsetof(struct pt_regs, r29),
225 		offsetof(struct pt_regs, r30),
226 		offsetof(struct pt_regs, r31),
227 	},
228 	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
229 #ifdef UNW_DEBUG
230 	.preg_name = {
231 		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
232 		"r4", "r5", "r6", "r7",
233 		"ar.unat", "pr", "ar.lc", "ar.fpsr",
234 		"b1", "b2", "b3", "b4", "b5",
235 		"f2", "f3", "f4", "f5",
236 		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
237 		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
238 	}
239 #endif
240 };
241 
242 static inline int
read_only(void * addr)243 read_only (void *addr)
244 {
245 	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
246 }
247 
248 /*
249  * Returns offset of rREG in struct pt_regs.
250  */
251 static inline unsigned long
pt_regs_off(unsigned long reg)252 pt_regs_off (unsigned long reg)
253 {
254 	short off = -1;
255 
256 	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
257 		off = unw.pt_regs_offsets[reg];
258 
259 	if (off < 0) {
260 		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
261 		off = 0;
262 	}
263 	return (unsigned long) off;
264 }
265 
266 static inline struct pt_regs *
get_scratch_regs(struct unw_frame_info * info)267 get_scratch_regs (struct unw_frame_info *info)
268 {
269 	if (!info->pt) {
270 		/* This should not happen with valid unwind info.  */
271 		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
272 		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
274 		else
275 			info->pt = info->sp - 16;
276 	}
277 	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
278 	return (struct pt_regs *) info->pt;
279 }
280 
281 /* Unwind accessors.  */
282 
283 int
unw_access_gr(struct unw_frame_info * info,int regnum,unsigned long * val,char * nat,int write)284 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
285 {
286 	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
287 	struct unw_ireg *ireg;
288 	struct pt_regs *pt;
289 
290 	if ((unsigned) regnum - 1 >= 127) {
291 		if (regnum == 0 && !write) {
292 			*val = 0;	/* read r0 always returns 0 */
293 			*nat = 0;
294 			return 0;
295 		}
296 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 			   __func__, regnum);
298 		return -1;
299 	}
300 
301 	if (regnum < 32) {
302 		if (regnum >= 4 && regnum <= 7) {
303 			/* access a preserved register */
304 			ireg = &info->r4 + (regnum - 4);
305 			addr = ireg->loc;
306 			if (addr) {
307 				nat_addr = addr + ireg->nat.off;
308 				switch (ireg->nat.type) {
309 				      case UNW_NAT_VAL:
310 					/* simulate getf.sig/setf.sig */
311 					if (write) {
312 						if (*nat) {
313 							/* write NaTVal and be done with it */
314 							addr[0] = 0;
315 							addr[1] = 0x1fffe;
316 							return 0;
317 						}
318 						addr[1] = 0x1003e;
319 					} else {
320 						if (addr[0] == 0 && addr[1] == 0x1ffe) {
321 							/* return NaT and be done with it */
322 							*val = 0;
323 							*nat = 1;
324 							return 0;
325 						}
326 					}
327 					fallthrough;
328 				      case UNW_NAT_NONE:
329 					dummy_nat = 0;
330 					nat_addr = &dummy_nat;
331 					break;
332 
333 				      case UNW_NAT_MEMSTK:
334 					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
335 					break;
336 
337 				      case UNW_NAT_REGSTK:
338 					nat_addr = ia64_rse_rnat_addr(addr);
339 					if ((unsigned long) addr < info->regstk.limit
340 					    || (unsigned long) addr >= info->regstk.top)
341 					{
342 						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
343 							"[0x%lx-0x%lx)\n",
344 							__func__, (void *) addr,
345 							info->regstk.limit,
346 							info->regstk.top);
347 						return -1;
348 					}
349 					if ((unsigned long) nat_addr >= info->regstk.top)
350 						nat_addr = &info->sw->ar_rnat;
351 					nat_mask = (1UL << ia64_rse_slot_num(addr));
352 					break;
353 				}
354 			} else {
355 				addr = &info->sw->r4 + (regnum - 4);
356 				nat_addr = &info->sw->ar_unat;
357 				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
358 			}
359 		} else {
360 			/* access a scratch register */
361 			pt = get_scratch_regs(info);
362 			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
363 			if (info->pri_unat_loc)
364 				nat_addr = info->pri_unat_loc;
365 			else
366 				nat_addr = &info->sw->caller_unat;
367 			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
368 		}
369 	} else {
370 		/* access a stacked register */
371 		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
372 		nat_addr = ia64_rse_rnat_addr(addr);
373 		if ((unsigned long) addr < info->regstk.limit
374 		    || (unsigned long) addr >= info->regstk.top)
375 		{
376 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 				   "of rbs\n",  __func__);
378 			return -1;
379 		}
380 		if ((unsigned long) nat_addr >= info->regstk.top)
381 			nat_addr = &info->sw->ar_rnat;
382 		nat_mask = (1UL << ia64_rse_slot_num(addr));
383 	}
384 
385 	if (write) {
386 		if (read_only(addr)) {
387 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
388 				__func__);
389 		} else {
390 			*addr = *val;
391 			if (*nat)
392 				*nat_addr |= nat_mask;
393 			else
394 				*nat_addr &= ~nat_mask;
395 		}
396 	} else {
397 		if ((*nat_addr & nat_mask) == 0) {
398 			*val = *addr;
399 			*nat = 0;
400 		} else {
401 			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
402 			*nat = 1;
403 		}
404 	}
405 	return 0;
406 }
407 EXPORT_SYMBOL(unw_access_gr);
408 
409 int
unw_access_br(struct unw_frame_info * info,int regnum,unsigned long * val,int write)410 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
411 {
412 	unsigned long *addr;
413 	struct pt_regs *pt;
414 
415 	switch (regnum) {
416 		/* scratch: */
417 	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
418 	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
419 	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
420 
421 		/* preserved: */
422 	      case 1: case 2: case 3: case 4: case 5:
423 		addr = *(&info->b1_loc + (regnum - 1));
424 		if (!addr)
425 			addr = &info->sw->b1 + (regnum - 1);
426 		break;
427 
428 	      default:
429 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 			   __func__, regnum);
431 		return -1;
432 	}
433 	if (write)
434 		if (read_only(addr)) {
435 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
436 				__func__);
437 		} else
438 			*addr = *val;
439 	else
440 		*val = *addr;
441 	return 0;
442 }
443 EXPORT_SYMBOL(unw_access_br);
444 
445 int
unw_access_fr(struct unw_frame_info * info,int regnum,struct ia64_fpreg * val,int write)446 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
447 {
448 	struct ia64_fpreg *addr = NULL;
449 	struct pt_regs *pt;
450 
451 	if ((unsigned) (regnum - 2) >= 126) {
452 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 			   __func__, regnum);
454 		return -1;
455 	}
456 
457 	if (regnum <= 5) {
458 		addr = *(&info->f2_loc + (regnum - 2));
459 		if (!addr)
460 			addr = &info->sw->f2 + (regnum - 2);
461 	} else if (regnum <= 15) {
462 		if (regnum <= 11) {
463 			pt = get_scratch_regs(info);
464 			addr = &pt->f6  + (regnum - 6);
465 		}
466 		else
467 			addr = &info->sw->f12 + (regnum - 12);
468 	} else if (regnum <= 31) {
469 		addr = info->fr_loc[regnum - 16];
470 		if (!addr)
471 			addr = &info->sw->f16 + (regnum - 16);
472 	} else {
473 		struct task_struct *t = info->task;
474 
475 		if (write)
476 			ia64_sync_fph(t);
477 		else
478 			ia64_flush_fph(t);
479 		addr = t->thread.fph + (regnum - 32);
480 	}
481 
482 	if (write)
483 		if (read_only(addr)) {
484 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
485 				__func__);
486 		} else
487 			*addr = *val;
488 	else
489 		*val = *addr;
490 	return 0;
491 }
492 EXPORT_SYMBOL(unw_access_fr);
493 
494 int
unw_access_ar(struct unw_frame_info * info,int regnum,unsigned long * val,int write)495 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
496 {
497 	unsigned long *addr;
498 	struct pt_regs *pt;
499 
500 	switch (regnum) {
501 	      case UNW_AR_BSP:
502 		addr = info->bsp_loc;
503 		if (!addr)
504 			addr = &info->sw->ar_bspstore;
505 		break;
506 
507 	      case UNW_AR_BSPSTORE:
508 		addr = info->bspstore_loc;
509 		if (!addr)
510 			addr = &info->sw->ar_bspstore;
511 		break;
512 
513 	      case UNW_AR_PFS:
514 		addr = info->pfs_loc;
515 		if (!addr)
516 			addr = &info->sw->ar_pfs;
517 		break;
518 
519 	      case UNW_AR_RNAT:
520 		addr = info->rnat_loc;
521 		if (!addr)
522 			addr = &info->sw->ar_rnat;
523 		break;
524 
525 	      case UNW_AR_UNAT:
526 		addr = info->unat_loc;
527 		if (!addr)
528 			addr = &info->sw->caller_unat;
529 		break;
530 
531 	      case UNW_AR_LC:
532 		addr = info->lc_loc;
533 		if (!addr)
534 			addr = &info->sw->ar_lc;
535 		break;
536 
537 	      case UNW_AR_EC:
538 		if (!info->cfm_loc)
539 			return -1;
540 		if (write)
541 			*info->cfm_loc =
542 				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
543 		else
544 			*val = (*info->cfm_loc >> 52) & 0x3f;
545 		return 0;
546 
547 	      case UNW_AR_FPSR:
548 		addr = info->fpsr_loc;
549 		if (!addr)
550 			addr = &info->sw->ar_fpsr;
551 		break;
552 
553 	      case UNW_AR_RSC:
554 		pt = get_scratch_regs(info);
555 		addr = &pt->ar_rsc;
556 		break;
557 
558 	      case UNW_AR_CCV:
559 		pt = get_scratch_regs(info);
560 		addr = &pt->ar_ccv;
561 		break;
562 
563 	      case UNW_AR_CSD:
564 		pt = get_scratch_regs(info);
565 		addr = &pt->ar_csd;
566 		break;
567 
568 	      case UNW_AR_SSD:
569 		pt = get_scratch_regs(info);
570 		addr = &pt->ar_ssd;
571 		break;
572 
573 	      default:
574 		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 			   __func__, regnum);
576 		return -1;
577 	}
578 
579 	if (write) {
580 		if (read_only(addr)) {
581 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
582 				__func__);
583 		} else
584 			*addr = *val;
585 	} else
586 		*val = *addr;
587 	return 0;
588 }
589 EXPORT_SYMBOL(unw_access_ar);
590 
591 int
unw_access_pr(struct unw_frame_info * info,unsigned long * val,int write)592 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
593 {
594 	unsigned long *addr;
595 
596 	addr = info->pr_loc;
597 	if (!addr)
598 		addr = &info->sw->pr;
599 
600 	if (write) {
601 		if (read_only(addr)) {
602 			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
603 				__func__);
604 		} else
605 			*addr = *val;
606 	} else
607 		*val = *addr;
608 	return 0;
609 }
610 EXPORT_SYMBOL(unw_access_pr);
611 
612 
613 /* Routines to manipulate the state stack.  */
614 
615 static inline void
push(struct unw_state_record * sr)616 push (struct unw_state_record *sr)
617 {
618 	struct unw_reg_state *rs;
619 
620 	rs = alloc_reg_state();
621 	if (!rs) {
622 		printk(KERN_ERR "unwind: cannot stack reg state!\n");
623 		return;
624 	}
625 	memcpy(rs, &sr->curr, sizeof(*rs));
626 	sr->curr.next = rs;
627 }
628 
629 static void
pop(struct unw_state_record * sr)630 pop (struct unw_state_record *sr)
631 {
632 	struct unw_reg_state *rs = sr->curr.next;
633 
634 	if (!rs) {
635 		printk(KERN_ERR "unwind: stack underflow!\n");
636 		return;
637 	}
638 	memcpy(&sr->curr, rs, sizeof(*rs));
639 	free_reg_state(rs);
640 }
641 
642 /* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
643 static struct unw_reg_state *
dup_state_stack(struct unw_reg_state * rs)644 dup_state_stack (struct unw_reg_state *rs)
645 {
646 	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
647 
648 	while (rs) {
649 		copy = alloc_reg_state();
650 		if (!copy) {
651 			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
652 			return NULL;
653 		}
654 		memcpy(copy, rs, sizeof(*copy));
655 		if (first)
656 			prev->next = copy;
657 		else
658 			first = copy;
659 		rs = rs->next;
660 		prev = copy;
661 	}
662 	return first;
663 }
664 
665 /* Free all stacked register states (but not RS itself).  */
666 static void
free_state_stack(struct unw_reg_state * rs)667 free_state_stack (struct unw_reg_state *rs)
668 {
669 	struct unw_reg_state *p, *next;
670 
671 	for (p = rs->next; p != NULL; p = next) {
672 		next = p->next;
673 		free_reg_state(p);
674 	}
675 	rs->next = NULL;
676 }
677 
678 /* Unwind decoder routines */
679 
680 static enum unw_register_index __attribute_const__
decode_abreg(unsigned char abreg,int memory)681 decode_abreg (unsigned char abreg, int memory)
682 {
683 	switch (abreg) {
684 	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
685 	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
686 	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
687 	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
688 	      case 0x60: return UNW_REG_PR;
689 	      case 0x61: return UNW_REG_PSP;
690 	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
691 	      case 0x63: return UNW_REG_RP;
692 	      case 0x64: return UNW_REG_BSP;
693 	      case 0x65: return UNW_REG_BSPSTORE;
694 	      case 0x66: return UNW_REG_RNAT;
695 	      case 0x67: return UNW_REG_UNAT;
696 	      case 0x68: return UNW_REG_FPSR;
697 	      case 0x69: return UNW_REG_PFS;
698 	      case 0x6a: return UNW_REG_LC;
699 	      default:
700 		break;
701 	}
702 	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
703 	return UNW_REG_LC;
704 }
705 
706 static void
set_reg(struct unw_reg_info * reg,enum unw_where where,int when,unsigned long val)707 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
708 {
709 	reg->val = val;
710 	reg->where = where;
711 	if (reg->when == UNW_WHEN_NEVER)
712 		reg->when = when;
713 }
714 
715 static void
alloc_spill_area(unsigned long * offp,unsigned long regsize,struct unw_reg_info * lo,struct unw_reg_info * hi)716 alloc_spill_area (unsigned long *offp, unsigned long regsize,
717 		  struct unw_reg_info *lo, struct unw_reg_info *hi)
718 {
719 	struct unw_reg_info *reg;
720 
721 	for (reg = hi; reg >= lo; --reg) {
722 		if (reg->where == UNW_WHERE_SPILL_HOME) {
723 			reg->where = UNW_WHERE_PSPREL;
724 			*offp -= regsize;
725 			reg->val = *offp;
726 		}
727 	}
728 }
729 
730 static inline void
spill_next_when(struct unw_reg_info ** regp,struct unw_reg_info * lim,unw_word t)731 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
732 {
733 	struct unw_reg_info *reg;
734 
735 	for (reg = *regp; reg <= lim; ++reg) {
736 		if (reg->where == UNW_WHERE_SPILL_HOME) {
737 			reg->when = t;
738 			*regp = reg + 1;
739 			return;
740 		}
741 	}
742 	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __func__);
743 }
744 
745 static inline void
finish_prologue(struct unw_state_record * sr)746 finish_prologue (struct unw_state_record *sr)
747 {
748 	struct unw_reg_info *reg;
749 	unsigned long off;
750 	int i;
751 
752 	/*
753 	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
754 	 * for Using Unwind Descriptors", rule 3):
755 	 */
756 	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
757 		reg = sr->curr.reg + unw.save_order[i];
758 		if (reg->where == UNW_WHERE_GR_SAVE) {
759 			reg->where = UNW_WHERE_GR;
760 			reg->val = sr->gr_save_loc++;
761 		}
762 	}
763 
764 	/*
765 	 * Next, compute when the fp, general, and branch registers get
766 	 * saved.  This must come before alloc_spill_area() because
767 	 * we need to know which registers are spilled to their home
768 	 * locations.
769 	 */
770 	if (sr->imask) {
771 		unsigned char kind, mask = 0, *cp = sr->imask;
772 		int t;
773 		static const unsigned char limit[3] = {
774 			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
775 		};
776 		struct unw_reg_info *(regs[3]);
777 
778 		regs[0] = sr->curr.reg + UNW_REG_F2;
779 		regs[1] = sr->curr.reg + UNW_REG_R4;
780 		regs[2] = sr->curr.reg + UNW_REG_B1;
781 
782 		for (t = 0; t < sr->region_len; ++t) {
783 			if ((t & 3) == 0)
784 				mask = *cp++;
785 			kind = (mask >> 2*(3-(t & 3))) & 3;
786 			if (kind > 0)
787 				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
788 						sr->region_start + t);
789 		}
790 	}
791 	/*
792 	 * Next, lay out the memory stack spill area:
793 	 */
794 	if (sr->any_spills) {
795 		off = sr->spill_offset;
796 		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
797 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
798 		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
799 	}
800 }
801 
802 /*
803  * Region header descriptors.
804  */
805 
806 static void
desc_prologue(int body,unw_word rlen,unsigned char mask,unsigned char grsave,struct unw_state_record * sr)807 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
808 	       struct unw_state_record *sr)
809 {
810 	int i, region_start;
811 
812 	if (!(sr->in_body || sr->first_region))
813 		finish_prologue(sr);
814 	sr->first_region = 0;
815 
816 	/* check if we're done: */
817 	if (sr->when_target < sr->region_start + sr->region_len) {
818 		sr->done = 1;
819 		return;
820 	}
821 
822 	region_start = sr->region_start + sr->region_len;
823 
824 	for (i = 0; i < sr->epilogue_count; ++i)
825 		pop(sr);
826 	sr->epilogue_count = 0;
827 	sr->epilogue_start = UNW_WHEN_NEVER;
828 
829 	sr->region_start = region_start;
830 	sr->region_len = rlen;
831 	sr->in_body = body;
832 
833 	if (!body) {
834 		push(sr);
835 
836 		for (i = 0; i < 4; ++i) {
837 			if (mask & 0x8)
838 				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
839 					sr->region_start + sr->region_len - 1, grsave++);
840 			mask <<= 1;
841 		}
842 		sr->gr_save_loc = grsave;
843 		sr->any_spills = 0;
844 		sr->imask = NULL;
845 		sr->spill_offset = 0x10;	/* default to psp+16 */
846 	}
847 }
848 
849 /*
850  * Prologue descriptors.
851  */
852 
853 static inline void
desc_abi(unsigned char abi,unsigned char context,struct unw_state_record * sr)854 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
855 {
856 	if (abi == 3 && context == 'i') {
857 		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __func__);
859 	}
860 	else
861 		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 				__func__, abi, context);
863 }
864 
865 static inline void
desc_br_gr(unsigned char brmask,unsigned char gr,struct unw_state_record * sr)866 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
867 {
868 	int i;
869 
870 	for (i = 0; i < 5; ++i) {
871 		if (brmask & 1)
872 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
873 				sr->region_start + sr->region_len - 1, gr++);
874 		brmask >>= 1;
875 	}
876 }
877 
878 static inline void
desc_br_mem(unsigned char brmask,struct unw_state_record * sr)879 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
880 {
881 	int i;
882 
883 	for (i = 0; i < 5; ++i) {
884 		if (brmask & 1) {
885 			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
886 				sr->region_start + sr->region_len - 1, 0);
887 			sr->any_spills = 1;
888 		}
889 		brmask >>= 1;
890 	}
891 }
892 
893 static inline void
desc_frgr_mem(unsigned char grmask,unw_word frmask,struct unw_state_record * sr)894 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
895 {
896 	int i;
897 
898 	for (i = 0; i < 4; ++i) {
899 		if ((grmask & 1) != 0) {
900 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
901 				sr->region_start + sr->region_len - 1, 0);
902 			sr->any_spills = 1;
903 		}
904 		grmask >>= 1;
905 	}
906 	for (i = 0; i < 20; ++i) {
907 		if ((frmask & 1) != 0) {
908 			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
909 			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
910 				sr->region_start + sr->region_len - 1, 0);
911 			sr->any_spills = 1;
912 		}
913 		frmask >>= 1;
914 	}
915 }
916 
917 static inline void
desc_fr_mem(unsigned char frmask,struct unw_state_record * sr)918 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
919 {
920 	int i;
921 
922 	for (i = 0; i < 4; ++i) {
923 		if ((frmask & 1) != 0) {
924 			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
925 				sr->region_start + sr->region_len - 1, 0);
926 			sr->any_spills = 1;
927 		}
928 		frmask >>= 1;
929 	}
930 }
931 
932 static inline void
desc_gr_gr(unsigned char grmask,unsigned char gr,struct unw_state_record * sr)933 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
934 {
935 	int i;
936 
937 	for (i = 0; i < 4; ++i) {
938 		if ((grmask & 1) != 0)
939 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
940 				sr->region_start + sr->region_len - 1, gr++);
941 		grmask >>= 1;
942 	}
943 }
944 
945 static inline void
desc_gr_mem(unsigned char grmask,struct unw_state_record * sr)946 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
947 {
948 	int i;
949 
950 	for (i = 0; i < 4; ++i) {
951 		if ((grmask & 1) != 0) {
952 			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
953 				sr->region_start + sr->region_len - 1, 0);
954 			sr->any_spills = 1;
955 		}
956 		grmask >>= 1;
957 	}
958 }
959 
960 static inline void
desc_mem_stack_f(unw_word t,unw_word size,struct unw_state_record * sr)961 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
962 {
963 	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
964 		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
965 }
966 
967 static inline void
desc_mem_stack_v(unw_word t,struct unw_state_record * sr)968 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
969 {
970 	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
971 }
972 
973 static inline void
desc_reg_gr(unsigned char reg,unsigned char dst,struct unw_state_record * sr)974 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
975 {
976 	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
977 }
978 
979 static inline void
desc_reg_psprel(unsigned char reg,unw_word pspoff,struct unw_state_record * sr)980 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
981 {
982 	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
983 		0x10 - 4*pspoff);
984 }
985 
986 static inline void
desc_reg_sprel(unsigned char reg,unw_word spoff,struct unw_state_record * sr)987 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
988 {
989 	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
990 		4*spoff);
991 }
992 
993 static inline void
desc_rp_br(unsigned char dst,struct unw_state_record * sr)994 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
995 {
996 	sr->return_link_reg = dst;
997 }
998 
999 static inline void
desc_reg_when(unsigned char regnum,unw_word t,struct unw_state_record * sr)1000 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1001 {
1002 	struct unw_reg_info *reg = sr->curr.reg + regnum;
1003 
1004 	if (reg->where == UNW_WHERE_NONE)
1005 		reg->where = UNW_WHERE_GR_SAVE;
1006 	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1007 }
1008 
1009 static inline void
desc_spill_base(unw_word pspoff,struct unw_state_record * sr)1010 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1011 {
1012 	sr->spill_offset = 0x10 - 4*pspoff;
1013 }
1014 
1015 static inline unsigned char *
desc_spill_mask(unsigned char * imaskp,struct unw_state_record * sr)1016 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1017 {
1018 	sr->imask = imaskp;
1019 	return imaskp + (2*sr->region_len + 7)/8;
1020 }
1021 
1022 /*
1023  * Body descriptors.
1024  */
1025 static inline void
desc_epilogue(unw_word t,unw_word ecount,struct unw_state_record * sr)1026 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1027 {
1028 	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1029 	sr->epilogue_count = ecount + 1;
1030 }
1031 
1032 static inline void
desc_copy_state(unw_word label,struct unw_state_record * sr)1033 desc_copy_state (unw_word label, struct unw_state_record *sr)
1034 {
1035 	struct unw_labeled_state *ls;
1036 
1037 	for (ls = sr->labeled_states; ls; ls = ls->next) {
1038 		if (ls->label == label) {
1039 			free_state_stack(&sr->curr);
1040 			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1041 			sr->curr.next = dup_state_stack(ls->saved_state.next);
1042 			return;
1043 		}
1044 	}
1045 	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1046 }
1047 
1048 static inline void
desc_label_state(unw_word label,struct unw_state_record * sr)1049 desc_label_state (unw_word label, struct unw_state_record *sr)
1050 {
1051 	struct unw_labeled_state *ls;
1052 
1053 	ls = alloc_labeled_state();
1054 	if (!ls) {
1055 		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1056 		return;
1057 	}
1058 	ls->label = label;
1059 	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1060 	ls->saved_state.next = dup_state_stack(sr->curr.next);
1061 
1062 	/* insert into list of labeled states: */
1063 	ls->next = sr->labeled_states;
1064 	sr->labeled_states = ls;
1065 }
1066 
1067 /*
1068  * General descriptors.
1069  */
1070 
1071 static inline int
desc_is_active(unsigned char qp,unw_word t,struct unw_state_record * sr)1072 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1073 {
1074 	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1075 		return 0;
1076 	if (qp > 0) {
1077 		if ((sr->pr_val & (1UL << qp)) == 0)
1078 			return 0;
1079 		sr->pr_mask |= (1UL << qp);
1080 	}
1081 	return 1;
1082 }
1083 
1084 static inline void
desc_restore_p(unsigned char qp,unw_word t,unsigned char abreg,struct unw_state_record * sr)1085 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1086 {
1087 	struct unw_reg_info *r;
1088 
1089 	if (!desc_is_active(qp, t, sr))
1090 		return;
1091 
1092 	r = sr->curr.reg + decode_abreg(abreg, 0);
1093 	r->where = UNW_WHERE_NONE;
1094 	r->when = UNW_WHEN_NEVER;
1095 	r->val = 0;
1096 }
1097 
1098 static inline void
desc_spill_reg_p(unsigned char qp,unw_word t,unsigned char abreg,unsigned char x,unsigned char ytreg,struct unw_state_record * sr)1099 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1100 		     unsigned char ytreg, struct unw_state_record *sr)
1101 {
1102 	enum unw_where where = UNW_WHERE_GR;
1103 	struct unw_reg_info *r;
1104 
1105 	if (!desc_is_active(qp, t, sr))
1106 		return;
1107 
1108 	if (x)
1109 		where = UNW_WHERE_BR;
1110 	else if (ytreg & 0x80)
1111 		where = UNW_WHERE_FR;
1112 
1113 	r = sr->curr.reg + decode_abreg(abreg, 0);
1114 	r->where = where;
1115 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1116 	r->val = (ytreg & 0x7f);
1117 }
1118 
1119 static inline void
desc_spill_psprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word pspoff,struct unw_state_record * sr)1120 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1121 		     struct unw_state_record *sr)
1122 {
1123 	struct unw_reg_info *r;
1124 
1125 	if (!desc_is_active(qp, t, sr))
1126 		return;
1127 
1128 	r = sr->curr.reg + decode_abreg(abreg, 1);
1129 	r->where = UNW_WHERE_PSPREL;
1130 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1131 	r->val = 0x10 - 4*pspoff;
1132 }
1133 
1134 static inline void
desc_spill_sprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word spoff,struct unw_state_record * sr)1135 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1136 		       struct unw_state_record *sr)
1137 {
1138 	struct unw_reg_info *r;
1139 
1140 	if (!desc_is_active(qp, t, sr))
1141 		return;
1142 
1143 	r = sr->curr.reg + decode_abreg(abreg, 1);
1144 	r->where = UNW_WHERE_SPREL;
1145 	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1146 	r->val = 4*spoff;
1147 }
1148 
1149 #define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1150 						       code);
1151 
1152 /*
1153  * region headers:
1154  */
1155 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
1156 #define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
1157 /*
1158  * prologue descriptors:
1159  */
1160 #define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
1161 #define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
1162 #define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
1163 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
1164 #define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
1165 #define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
1166 #define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
1167 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
1168 #define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
1169 #define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
1170 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
1171 #define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
1172 #define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
1173 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1174 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1175 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1176 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1178 #define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
1179 #define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
1180 #define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
1181 /*
1182  * body descriptors:
1183  */
1184 #define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
1185 #define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
1186 #define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
1187 /*
1188  * general unwind descriptors:
1189  */
1190 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
1191 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
1192 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
1193 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
1194 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
1195 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
1196 #define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
1197 #define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
1198 
1199 #include "unwind_decoder.c"
1200 
1201 
1202 /* Unwind scripts. */
1203 
1204 static inline unw_hash_index_t
hash(unsigned long ip)1205 hash (unsigned long ip)
1206 {
1207 	/* magic number = ((sqrt(5)-1)/2)*2^64 */
1208 	static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1209 
1210 	return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1211 }
1212 
1213 static inline long
cache_match(struct unw_script * script,unsigned long ip,unsigned long pr)1214 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1215 {
1216 	read_lock(&script->lock);
1217 	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1218 		/* keep the read lock... */
1219 		return 1;
1220 	read_unlock(&script->lock);
1221 	return 0;
1222 }
1223 
1224 static inline struct unw_script *
script_lookup(struct unw_frame_info * info)1225 script_lookup (struct unw_frame_info *info)
1226 {
1227 	struct unw_script *script = unw.cache + info->hint;
1228 	unsigned short index;
1229 	unsigned long ip, pr;
1230 
1231 	if (UNW_DEBUG_ON(0))
1232 		return NULL;	/* Always regenerate scripts in debug mode */
1233 
1234 	STAT(++unw.stat.cache.lookups);
1235 
1236 	ip = info->ip;
1237 	pr = info->pr;
1238 
1239 	if (cache_match(script, ip, pr)) {
1240 		STAT(++unw.stat.cache.hinted_hits);
1241 		return script;
1242 	}
1243 
1244 	index = unw.hash[hash(ip)];
1245 	if (index >= UNW_CACHE_SIZE)
1246 		return NULL;
1247 
1248 	script = unw.cache + index;
1249 	while (1) {
1250 		if (cache_match(script, ip, pr)) {
1251 			/* update hint; no locking required as single-word writes are atomic */
1252 			STAT(++unw.stat.cache.normal_hits);
1253 			unw.cache[info->prev_script].hint = script - unw.cache;
1254 			return script;
1255 		}
1256 		if (script->coll_chain >= UNW_HASH_SIZE)
1257 			return NULL;
1258 		script = unw.cache + script->coll_chain;
1259 		STAT(++unw.stat.cache.collision_chain_traversals);
1260 	}
1261 }
1262 
1263 /*
1264  * On returning, a write lock for the SCRIPT is still being held.
1265  */
1266 static inline struct unw_script *
script_new(unsigned long ip)1267 script_new (unsigned long ip)
1268 {
1269 	struct unw_script *script, *prev, *tmp;
1270 	unw_hash_index_t index;
1271 	unsigned short head;
1272 
1273 	STAT(++unw.stat.script.news);
1274 
1275 	/*
1276 	 * Can't (easily) use cmpxchg() here because of ABA problem
1277 	 * that is intrinsic in cmpxchg()...
1278 	 */
1279 	head = unw.lru_head;
1280 	script = unw.cache + head;
1281 	unw.lru_head = script->lru_chain;
1282 
1283 	/*
1284 	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1285 	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
1286 	 * alternative would be to disable interrupts whenever we hold a read-lock, but
1287 	 * that seems silly.
1288 	 */
1289 	if (!write_trylock(&script->lock))
1290 		return NULL;
1291 
1292 	/* re-insert script at the tail of the LRU chain: */
1293 	unw.cache[unw.lru_tail].lru_chain = head;
1294 	unw.lru_tail = head;
1295 
1296 	/* remove the old script from the hash table (if it's there): */
1297 	if (script->ip) {
1298 		index = hash(script->ip);
1299 		tmp = unw.cache + unw.hash[index];
1300 		prev = NULL;
1301 		while (1) {
1302 			if (tmp == script) {
1303 				if (prev)
1304 					prev->coll_chain = tmp->coll_chain;
1305 				else
1306 					unw.hash[index] = tmp->coll_chain;
1307 				break;
1308 			} else
1309 				prev = tmp;
1310 			if (tmp->coll_chain >= UNW_CACHE_SIZE)
1311 			/* old script wasn't in the hash-table */
1312 				break;
1313 			tmp = unw.cache + tmp->coll_chain;
1314 		}
1315 	}
1316 
1317 	/* enter new script in the hash table */
1318 	index = hash(ip);
1319 	script->coll_chain = unw.hash[index];
1320 	unw.hash[index] = script - unw.cache;
1321 
1322 	script->ip = ip;	/* set new IP while we're holding the locks */
1323 
1324 	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1325 
1326 	script->flags = 0;
1327 	script->hint = 0;
1328 	script->count = 0;
1329 	return script;
1330 }
1331 
1332 static void
script_finalize(struct unw_script * script,struct unw_state_record * sr)1333 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1334 {
1335 	script->pr_mask = sr->pr_mask;
1336 	script->pr_val = sr->pr_val;
1337 	/*
1338 	 * We could down-grade our write-lock on script->lock here but
1339 	 * the rwlock API doesn't offer atomic lock downgrading, so
1340 	 * we'll just keep the write-lock and release it later when
1341 	 * we're done using the script.
1342 	 */
1343 }
1344 
1345 static inline void
script_emit(struct unw_script * script,struct unw_insn insn)1346 script_emit (struct unw_script *script, struct unw_insn insn)
1347 {
1348 	if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 			__func__, UNW_MAX_SCRIPT_LEN);
1351 		return;
1352 	}
1353 	script->insn[script->count++] = insn;
1354 }
1355 
1356 static inline void
emit_nat_info(struct unw_state_record * sr,int i,struct unw_script * script)1357 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1358 {
1359 	struct unw_reg_info *r = sr->curr.reg + i;
1360 	enum unw_insn_opcode opc;
1361 	struct unw_insn insn;
1362 	unsigned long val = 0;
1363 
1364 	switch (r->where) {
1365 	      case UNW_WHERE_GR:
1366 		if (r->val >= 32) {
1367 			/* register got spilled to a stacked register */
1368 			opc = UNW_INSN_SETNAT_TYPE;
1369 			val = UNW_NAT_REGSTK;
1370 		} else
1371 			/* register got spilled to a scratch register */
1372 			opc = UNW_INSN_SETNAT_MEMSTK;
1373 		break;
1374 
1375 	      case UNW_WHERE_FR:
1376 		opc = UNW_INSN_SETNAT_TYPE;
1377 		val = UNW_NAT_VAL;
1378 		break;
1379 
1380 	      case UNW_WHERE_BR:
1381 		opc = UNW_INSN_SETNAT_TYPE;
1382 		val = UNW_NAT_NONE;
1383 		break;
1384 
1385 	      case UNW_WHERE_PSPREL:
1386 	      case UNW_WHERE_SPREL:
1387 		opc = UNW_INSN_SETNAT_MEMSTK;
1388 		break;
1389 
1390 	      default:
1391 		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 			   __func__, r->where);
1393 		return;
1394 	}
1395 	insn.opc = opc;
1396 	insn.dst = unw.preg_index[i];
1397 	insn.val = val;
1398 	script_emit(script, insn);
1399 }
1400 
1401 static void
compile_reg(struct unw_state_record * sr,int i,struct unw_script * script)1402 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1403 {
1404 	struct unw_reg_info *r = sr->curr.reg + i;
1405 	enum unw_insn_opcode opc;
1406 	unsigned long val, rval;
1407 	struct unw_insn insn;
1408 	long need_nat_info;
1409 
1410 	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1411 		return;
1412 
1413 	opc = UNW_INSN_MOVE;
1414 	val = rval = r->val;
1415 	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1416 
1417 	switch (r->where) {
1418 	      case UNW_WHERE_GR:
1419 		if (rval >= 32) {
1420 			opc = UNW_INSN_MOVE_STACKED;
1421 			val = rval - 32;
1422 		} else if (rval >= 4 && rval <= 7) {
1423 			if (need_nat_info) {
1424 				opc = UNW_INSN_MOVE2;
1425 				need_nat_info = 0;
1426 			}
1427 			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1428 		} else if (rval == 0) {
1429 			opc = UNW_INSN_MOVE_CONST;
1430 			val = 0;
1431 		} else {
1432 			/* register got spilled to a scratch register */
1433 			opc = UNW_INSN_MOVE_SCRATCH;
1434 			val = pt_regs_off(rval);
1435 		}
1436 		break;
1437 
1438 	      case UNW_WHERE_FR:
1439 		if (rval <= 5)
1440 			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
1441 		else if (rval >= 16 && rval <= 31)
1442 			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1443 		else {
1444 			opc = UNW_INSN_MOVE_SCRATCH;
1445 			if (rval <= 11)
1446 				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447 			else
1448 				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 					   __func__, rval);
1450 		}
1451 		break;
1452 
1453 	      case UNW_WHERE_BR:
1454 		if (rval >= 1 && rval <= 5)
1455 			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1456 		else {
1457 			opc = UNW_INSN_MOVE_SCRATCH;
1458 			if (rval == 0)
1459 				val = offsetof(struct pt_regs, b0);
1460 			else if (rval == 6)
1461 				val = offsetof(struct pt_regs, b6);
1462 			else
1463 				val = offsetof(struct pt_regs, b7);
1464 		}
1465 		break;
1466 
1467 	      case UNW_WHERE_SPREL:
1468 		opc = UNW_INSN_ADD_SP;
1469 		break;
1470 
1471 	      case UNW_WHERE_PSPREL:
1472 		opc = UNW_INSN_ADD_PSP;
1473 		break;
1474 
1475 	      default:
1476 		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 			   __func__, i, r->where);
1478 		break;
1479 	}
1480 	insn.opc = opc;
1481 	insn.dst = unw.preg_index[i];
1482 	insn.val = val;
1483 	script_emit(script, insn);
1484 	if (need_nat_info)
1485 		emit_nat_info(sr, i, script);
1486 
1487 	if (i == UNW_REG_PSP) {
1488 		/*
1489 		 * info->psp must contain the _value_ of the previous
1490 		 * sp, not it's save location.  We get this by
1491 		 * dereferencing the value we just stored in
1492 		 * info->psp:
1493 		 */
1494 		insn.opc = UNW_INSN_LOAD;
1495 		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1496 		script_emit(script, insn);
1497 	}
1498 }
1499 
1500 static inline const struct unw_table_entry *
lookup(struct unw_table * table,unsigned long rel_ip)1501 lookup (struct unw_table *table, unsigned long rel_ip)
1502 {
1503 	const struct unw_table_entry *e = NULL;
1504 	unsigned long lo, hi, mid;
1505 
1506 	/* do a binary search for right entry: */
1507 	for (lo = 0, hi = table->length; lo < hi; ) {
1508 		mid = (lo + hi) / 2;
1509 		e = &table->array[mid];
1510 		if (rel_ip < e->start_offset)
1511 			hi = mid;
1512 		else if (rel_ip >= e->end_offset)
1513 			lo = mid + 1;
1514 		else
1515 			break;
1516 	}
1517 	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1518 		return NULL;
1519 	return e;
1520 }
1521 
1522 /*
1523  * Build an unwind script that unwinds from state OLD_STATE to the
1524  * entrypoint of the function that called OLD_STATE.
1525  */
1526 static inline struct unw_script *
build_script(struct unw_frame_info * info)1527 build_script (struct unw_frame_info *info)
1528 {
1529 	const struct unw_table_entry *e = NULL;
1530 	struct unw_script *script = NULL;
1531 	struct unw_labeled_state *ls, *next;
1532 	unsigned long ip = info->ip;
1533 	struct unw_state_record sr;
1534 	struct unw_table *table, *prev;
1535 	struct unw_reg_info *r;
1536 	struct unw_insn insn;
1537 	u8 *dp, *desc_end;
1538 	u64 hdr;
1539 	int i;
1540 	STAT(unsigned long start, parse_start;)
1541 
1542 	STAT(++unw.stat.script.builds; start = ia64_get_itc());
1543 
1544 	/* build state record */
1545 	memset(&sr, 0, sizeof(sr));
1546 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1547 		r->when = UNW_WHEN_NEVER;
1548 	sr.pr_val = info->pr;
1549 
1550 	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1551 	script = script_new(ip);
1552 	if (!script) {
1553 		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __func__);
1554 		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555 		return NULL;
1556 	}
1557 	unw.cache[info->prev_script].hint = script - unw.cache;
1558 
1559 	/* search the kernels and the modules' unwind tables for IP: */
1560 
1561 	STAT(parse_start = ia64_get_itc());
1562 
1563 	prev = NULL;
1564 	for (table = unw.tables; table; table = table->next) {
1565 		if (ip >= table->start && ip < table->end) {
1566 			/*
1567 			 * Leave the kernel unwind table at the very front,
1568 			 * lest moving it breaks some assumption elsewhere.
1569 			 * Otherwise, move the matching table to the second
1570 			 * position in the list so that traversals can benefit
1571 			 * from commonality in backtrace paths.
1572 			 */
1573 			if (prev && prev != unw.tables) {
1574 				/* unw is safe - we're already spinlocked */
1575 				prev->next = table->next;
1576 				table->next = unw.tables->next;
1577 				unw.tables->next = table;
1578 			}
1579 			e = lookup(table, ip - table->segment_base);
1580 			break;
1581 		}
1582 		prev = table;
1583 	}
1584 	if (!e) {
1585 		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
1586 		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1587 			__func__, ip, unw.cache[info->prev_script].ip);
1588 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1589 		sr.curr.reg[UNW_REG_RP].when = -1;
1590 		sr.curr.reg[UNW_REG_RP].val = 0;
1591 		compile_reg(&sr, UNW_REG_RP, script);
1592 		script_finalize(script, &sr);
1593 		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1594 		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1595 		return script;
1596 	}
1597 
1598 	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1599 			  + (ip & 0xfUL));
1600 	hdr = *(u64 *) (table->segment_base + e->info_offset);
1601 	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
1602 	desc_end = dp + 8*UNW_LENGTH(hdr);
1603 
1604 	while (!sr.done && dp < desc_end)
1605 		dp = unw_decode(dp, sr.in_body, &sr);
1606 
1607 	if (sr.when_target > sr.epilogue_start) {
1608 		/*
1609 		 * sp has been restored and all values on the memory stack below
1610 		 * psp also have been restored.
1611 		 */
1612 		sr.curr.reg[UNW_REG_PSP].val = 0;
1613 		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1614 		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1615 		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1616 			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1617 			    || r->where == UNW_WHERE_SPREL)
1618 			{
1619 				r->val = 0;
1620 				r->where = UNW_WHERE_NONE;
1621 				r->when = UNW_WHEN_NEVER;
1622 			}
1623 	}
1624 
1625 	script->flags = sr.flags;
1626 
1627 	/*
1628 	 * If RP did't get saved, generate entry for the return link
1629 	 * register.
1630 	 */
1631 	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1632 		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1633 		sr.curr.reg[UNW_REG_RP].when = -1;
1634 		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1635 		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1636 			   __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1637 			   sr.curr.reg[UNW_REG_RP].val);
1638 	}
1639 
1640 #ifdef UNW_DEBUG
1641 	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1642 		__func__, table->segment_base + e->start_offset, sr.when_target);
1643 	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1644 		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1645 			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
1646 			switch (r->where) {
1647 			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
1648 			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
1649 			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
1650 			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1651 			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1652 			      case UNW_WHERE_NONE:
1653 				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1654 				break;
1655 
1656 			      default:
1657 				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1658 				break;
1659 			}
1660 			UNW_DPRINT(1, "\t\t%d\n", r->when);
1661 		}
1662 	}
1663 #endif
1664 
1665 	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1666 
1667 	/* translate state record into unwinder instructions: */
1668 
1669 	/*
1670 	 * First, set psp if we're dealing with a fixed-size frame;
1671 	 * subsequent instructions may depend on this value.
1672 	 */
1673 	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1674 	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1675 	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
1676 		/* new psp is sp plus frame size */
1677 		insn.opc = UNW_INSN_ADD;
1678 		insn.dst = offsetof(struct unw_frame_info, psp)/8;
1679 		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
1680 		script_emit(script, insn);
1681 	}
1682 
1683 	/* determine where the primary UNaT is: */
1684 	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1685 		i = UNW_REG_PRI_UNAT_MEM;
1686 	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1687 		i = UNW_REG_PRI_UNAT_GR;
1688 	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1689 		i = UNW_REG_PRI_UNAT_MEM;
1690 	else
1691 		i = UNW_REG_PRI_UNAT_GR;
1692 
1693 	compile_reg(&sr, i, script);
1694 
1695 	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1696 		compile_reg(&sr, i, script);
1697 
1698 	/* free labeled register states & stack: */
1699 
1700 	STAT(parse_start = ia64_get_itc());
1701 	for (ls = sr.labeled_states; ls; ls = next) {
1702 		next = ls->next;
1703 		free_state_stack(&ls->saved_state);
1704 		free_labeled_state(ls);
1705 	}
1706 	free_state_stack(&sr.curr);
1707 	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1708 
1709 	script_finalize(script, &sr);
1710 	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1711 	return script;
1712 }
1713 
1714 /*
1715  * Apply the unwinding actions represented by OPS and update SR to
1716  * reflect the state that existed upon entry to the function that this
1717  * unwinder represents.
1718  */
1719 static inline void
run_script(struct unw_script * script,struct unw_frame_info * state)1720 run_script (struct unw_script *script, struct unw_frame_info *state)
1721 {
1722 	struct unw_insn *ip, *limit, next_insn;
1723 	unsigned long opc, dst, val, off;
1724 	unsigned long *s = (unsigned long *) state;
1725 	STAT(unsigned long start;)
1726 
1727 	STAT(++unw.stat.script.runs; start = ia64_get_itc());
1728 	state->flags = script->flags;
1729 	ip = script->insn;
1730 	limit = script->insn + script->count;
1731 	next_insn = *ip;
1732 
1733 	while (ip++ < limit) {
1734 		opc = next_insn.opc;
1735 		dst = next_insn.dst;
1736 		val = next_insn.val;
1737 		next_insn = *ip;
1738 
1739 	  redo:
1740 		switch (opc) {
1741 		      case UNW_INSN_ADD:
1742 			s[dst] += val;
1743 			break;
1744 
1745 		      case UNW_INSN_MOVE2:
1746 			if (!s[val])
1747 				goto lazy_init;
1748 			s[dst+1] = s[val+1];
1749 			s[dst] = s[val];
1750 			break;
1751 
1752 		      case UNW_INSN_MOVE:
1753 			if (!s[val])
1754 				goto lazy_init;
1755 			s[dst] = s[val];
1756 			break;
1757 
1758 		      case UNW_INSN_MOVE_SCRATCH:
1759 			if (state->pt) {
1760 				s[dst] = (unsigned long) get_scratch_regs(state) + val;
1761 			} else {
1762 				s[dst] = 0;
1763 				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1764 					   __func__, dst, val);
1765 			}
1766 			break;
1767 
1768 		      case UNW_INSN_MOVE_CONST:
1769 			if (val == 0)
1770 				s[dst] = (unsigned long) &unw.r0;
1771 			else {
1772 				s[dst] = 0;
1773 				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1774 					   __func__, val);
1775 			}
1776 			break;
1777 
1778 
1779 		      case UNW_INSN_MOVE_STACKED:
1780 			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1781 								    val);
1782 			break;
1783 
1784 		      case UNW_INSN_ADD_PSP:
1785 			s[dst] = state->psp + val;
1786 			break;
1787 
1788 		      case UNW_INSN_ADD_SP:
1789 			s[dst] = state->sp + val;
1790 			break;
1791 
1792 		      case UNW_INSN_SETNAT_MEMSTK:
1793 			if (!state->pri_unat_loc)
1794 				state->pri_unat_loc = &state->sw->caller_unat;
1795 			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1796 			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1797 			break;
1798 
1799 		      case UNW_INSN_SETNAT_TYPE:
1800 			s[dst+1] = val;
1801 			break;
1802 
1803 		      case UNW_INSN_LOAD:
1804 #ifdef UNW_DEBUG
1805 			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1806 			    || s[val] < TASK_SIZE)
1807 			{
1808 				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1809 					   __func__, s[val]);
1810 				break;
1811 			}
1812 #endif
1813 			s[dst] = *(unsigned long *) s[val];
1814 			break;
1815 		}
1816 	}
1817 	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1818 	return;
1819 
1820   lazy_init:
1821 	off = unw.sw_off[val];
1822 	s[val] = (unsigned long) state->sw + off;
1823 	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1824 		/*
1825 		 * We're initializing a general register: init NaT info, too.  Note that
1826 		 * the offset is a multiple of 8 which gives us the 3 bits needed for
1827 		 * the type field.
1828 		 */
1829 		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1830 	goto redo;
1831 }
1832 
1833 static int
find_save_locs(struct unw_frame_info * info)1834 find_save_locs (struct unw_frame_info *info)
1835 {
1836 	int have_write_lock = 0;
1837 	struct unw_script *scr;
1838 	unsigned long flags = 0;
1839 
1840 	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1841 		/* don't let obviously bad addresses pollute the cache */
1842 		/* FIXME: should really be level 0 but it occurs too often. KAO */
1843 		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1844 		info->rp_loc = NULL;
1845 		return -1;
1846 	}
1847 
1848 	scr = script_lookup(info);
1849 	if (!scr) {
1850 		spin_lock_irqsave(&unw.lock, flags);
1851 		scr = build_script(info);
1852 		if (!scr) {
1853 			spin_unlock_irqrestore(&unw.lock, flags);
1854 			UNW_DPRINT(0,
1855 				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1856 				   __func__, info->ip);
1857 			return -1;
1858 		}
1859 		have_write_lock = 1;
1860 	}
1861 	info->hint = scr->hint;
1862 	info->prev_script = scr - unw.cache;
1863 
1864 	run_script(scr, info);
1865 
1866 	if (have_write_lock) {
1867 		write_unlock(&scr->lock);
1868 		spin_unlock_irqrestore(&unw.lock, flags);
1869 	} else
1870 		read_unlock(&scr->lock);
1871 	return 0;
1872 }
1873 
1874 static int
unw_valid(const struct unw_frame_info * info,unsigned long * p)1875 unw_valid(const struct unw_frame_info *info, unsigned long* p)
1876 {
1877 	unsigned long loc = (unsigned long)p;
1878 	return (loc >= info->regstk.limit && loc < info->regstk.top) ||
1879 	       (loc >= info->memstk.top && loc < info->memstk.limit);
1880 }
1881 
1882 int
unw_unwind(struct unw_frame_info * info)1883 unw_unwind (struct unw_frame_info *info)
1884 {
1885 	unsigned long prev_ip, prev_sp, prev_bsp;
1886 	unsigned long ip, pr, num_regs;
1887 	STAT(unsigned long start, flags;)
1888 	int retval;
1889 
1890 	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1891 
1892 	prev_ip = info->ip;
1893 	prev_sp = info->sp;
1894 	prev_bsp = info->bsp;
1895 
1896 	/* validate the return IP pointer */
1897 	if (!unw_valid(info, info->rp_loc)) {
1898 		/* FIXME: should really be level 0 but it occurs too often. KAO */
1899 		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1900 			   __func__, info->ip);
1901 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1902 		return -1;
1903 	}
1904 	/* restore the ip */
1905 	ip = info->ip = *info->rp_loc;
1906 	if (ip < GATE_ADDR) {
1907 		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1908 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1909 		return -1;
1910 	}
1911 
1912 	/* validate the previous stack frame pointer */
1913 	if (!unw_valid(info, info->pfs_loc)) {
1914 		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1915 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1916 		return -1;
1917 	}
1918 	/* restore the cfm: */
1919 	info->cfm_loc = info->pfs_loc;
1920 
1921 	/* restore the bsp: */
1922 	pr = info->pr;
1923 	num_regs = 0;
1924 	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1925 		info->pt = info->sp + 16;
1926 		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1927 			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
1928 		info->pfs_loc =
1929 			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1930 		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1931 	} else
1932 		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
1933 	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1934 	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1935 		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1936 			__func__, info->bsp, info->regstk.limit, info->regstk.top);
1937 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1938 		return -1;
1939 	}
1940 
1941 	/* restore the sp: */
1942 	info->sp = info->psp;
1943 	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1944 		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1945 			__func__, info->sp, info->memstk.top, info->memstk.limit);
1946 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1947 		return -1;
1948 	}
1949 
1950 	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1951 		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1952 			   __func__, ip);
1953 		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1954 		return -1;
1955 	}
1956 
1957 	/* as we unwind, the saved ar.unat becomes the primary unat: */
1958 	info->pri_unat_loc = info->unat_loc;
1959 
1960 	/* finally, restore the predicates: */
1961 	unw_get_pr(info, &info->pr);
1962 
1963 	retval = find_save_locs(info);
1964 	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1965 	return retval;
1966 }
1967 EXPORT_SYMBOL(unw_unwind);
1968 
1969 int
unw_unwind_to_user(struct unw_frame_info * info)1970 unw_unwind_to_user (struct unw_frame_info *info)
1971 {
1972 	unsigned long ip, sp, pr = info->pr;
1973 
1974 	do {
1975 		unw_get_sp(info, &sp);
1976 		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1977 		    < IA64_PT_REGS_SIZE) {
1978 			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1979 				   __func__);
1980 			break;
1981 		}
1982 		if (unw_is_intr_frame(info) &&
1983 		    (pr & (1UL << PRED_USER_STACK)))
1984 			return 0;
1985 		if (unw_get_pr (info, &pr) < 0) {
1986 			unw_get_rp(info, &ip);
1987 			UNW_DPRINT(0, "unwind.%s: failed to read "
1988 				   "predicate register (ip=0x%lx)\n",
1989 				__func__, ip);
1990 			return -1;
1991 		}
1992 	} while (unw_unwind(info) >= 0);
1993 	unw_get_ip(info, &ip);
1994 	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1995 		   __func__, ip);
1996 	return -1;
1997 }
1998 EXPORT_SYMBOL(unw_unwind_to_user);
1999 
2000 static void
init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw,unsigned long stktop)2001 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2002 		 struct switch_stack *sw, unsigned long stktop)
2003 {
2004 	unsigned long rbslimit, rbstop, stklimit;
2005 	STAT(unsigned long start, flags;)
2006 
2007 	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2008 
2009 	/*
2010 	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2011 	 * don't want to do that because it would be slow as each preserved register would
2012 	 * have to be processed.  Instead, what we do here is zero out the frame info and
2013 	 * start the unwind process at the function that created the switch_stack frame.
2014 	 * When a preserved value in switch_stack needs to be accessed, run_script() will
2015 	 * initialize the appropriate pointer on demand.
2016 	 */
2017 	memset(info, 0, sizeof(*info));
2018 
2019 	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2020 	stklimit = (unsigned long) t + IA64_STK_OFFSET;
2021 
2022 	rbstop   = sw->ar_bspstore;
2023 	if (rbstop > stklimit || rbstop < rbslimit)
2024 		rbstop = rbslimit;
2025 
2026 	if (stktop <= rbstop)
2027 		stktop = rbstop;
2028 	if (stktop > stklimit)
2029 		stktop = stklimit;
2030 
2031 	info->regstk.limit = rbslimit;
2032 	info->regstk.top   = rbstop;
2033 	info->memstk.limit = stklimit;
2034 	info->memstk.top   = stktop;
2035 	info->task = t;
2036 	info->sw  = sw;
2037 	info->sp = info->psp = stktop;
2038 	info->pr = sw->pr;
2039 	UNW_DPRINT(3, "unwind.%s:\n"
2040 		   "  task   0x%lx\n"
2041 		   "  rbs = [0x%lx-0x%lx)\n"
2042 		   "  stk = [0x%lx-0x%lx)\n"
2043 		   "  pr     0x%lx\n"
2044 		   "  sw     0x%lx\n"
2045 		   "  sp     0x%lx\n",
2046 		   __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2047 		   info->pr, (unsigned long) info->sw, info->sp);
2048 	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2049 }
2050 
2051 void
unw_init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw)2052 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2053 {
2054 	unsigned long sol;
2055 
2056 	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2057 	info->cfm_loc = &sw->ar_pfs;
2058 	sol = (*info->cfm_loc >> 7) & 0x7f;
2059 	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2060 	info->ip = sw->b0;
2061 	UNW_DPRINT(3, "unwind.%s:\n"
2062 		   "  bsp    0x%lx\n"
2063 		   "  sol    0x%lx\n"
2064 		   "  ip     0x%lx\n",
2065 		   __func__, info->bsp, sol, info->ip);
2066 	find_save_locs(info);
2067 }
2068 
2069 EXPORT_SYMBOL(unw_init_frame_info);
2070 
2071 void
unw_init_from_blocked_task(struct unw_frame_info * info,struct task_struct * t)2072 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2073 {
2074 	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2075 
2076 	UNW_DPRINT(1, "unwind.%s\n", __func__);
2077 	unw_init_frame_info(info, t, sw);
2078 }
2079 EXPORT_SYMBOL(unw_init_from_blocked_task);
2080 
2081 static void
init_unwind_table(struct unw_table * table,const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2082 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2083 		   unsigned long gp, const void *table_start, const void *table_end)
2084 {
2085 	const struct unw_table_entry *start = table_start, *end = table_end;
2086 
2087 	table->name = name;
2088 	table->segment_base = segment_base;
2089 	table->gp = gp;
2090 	table->start = segment_base + start[0].start_offset;
2091 	table->end = segment_base + end[-1].end_offset;
2092 	table->array = start;
2093 	table->length = end - start;
2094 }
2095 
2096 void *
unw_add_unwind_table(const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2097 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2098 		      const void *table_start, const void *table_end)
2099 {
2100 	const struct unw_table_entry *start = table_start, *end = table_end;
2101 	struct unw_table *table;
2102 	unsigned long flags;
2103 
2104 	if (end - start <= 0) {
2105 		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2106 			   __func__);
2107 		return NULL;
2108 	}
2109 
2110 	table = kmalloc(sizeof(*table), GFP_USER);
2111 	if (!table)
2112 		return NULL;
2113 
2114 	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2115 
2116 	spin_lock_irqsave(&unw.lock, flags);
2117 	{
2118 		/* keep kernel unwind table at the front (it's searched most commonly): */
2119 		table->next = unw.tables->next;
2120 		unw.tables->next = table;
2121 	}
2122 	spin_unlock_irqrestore(&unw.lock, flags);
2123 
2124 	return table;
2125 }
2126 
2127 void
unw_remove_unwind_table(void * handle)2128 unw_remove_unwind_table (void *handle)
2129 {
2130 	struct unw_table *table, *prev;
2131 	struct unw_script *tmp;
2132 	unsigned long flags;
2133 	long index;
2134 
2135 	if (!handle) {
2136 		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2137 			   __func__);
2138 		return;
2139 	}
2140 
2141 	table = handle;
2142 	if (table == &unw.kernel_table) {
2143 		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2144 			   "no-can-do!\n", __func__);
2145 		return;
2146 	}
2147 
2148 	spin_lock_irqsave(&unw.lock, flags);
2149 	{
2150 		/* first, delete the table: */
2151 
2152 		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2153 			if (prev->next == table)
2154 				break;
2155 		if (!prev) {
2156 			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2157 				   __func__, (void *) table);
2158 			spin_unlock_irqrestore(&unw.lock, flags);
2159 			return;
2160 		}
2161 		prev->next = table->next;
2162 	}
2163 	spin_unlock_irqrestore(&unw.lock, flags);
2164 
2165 	/* next, remove hash table entries for this table */
2166 
2167 	for (index = 0; index < UNW_HASH_SIZE; ++index) {
2168 		tmp = unw.cache + unw.hash[index];
2169 		if (unw.hash[index] >= UNW_CACHE_SIZE
2170 		    || tmp->ip < table->start || tmp->ip >= table->end)
2171 			continue;
2172 
2173 		write_lock(&tmp->lock);
2174 		{
2175 			if (tmp->ip >= table->start && tmp->ip < table->end) {
2176 				unw.hash[index] = tmp->coll_chain;
2177 				tmp->ip = 0;
2178 			}
2179 		}
2180 		write_unlock(&tmp->lock);
2181 	}
2182 
2183 	kfree(table);
2184 }
2185 
2186 static int __init
create_gate_table(void)2187 create_gate_table (void)
2188 {
2189 	const struct unw_table_entry *entry, *start, *end;
2190 	unsigned long *lp, segbase = GATE_ADDR;
2191 	size_t info_size, size;
2192 	char *info;
2193 	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2194 	int i;
2195 
2196 	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2197 		if (phdr->p_type == PT_IA_64_UNWIND) {
2198 			punw = phdr;
2199 			break;
2200 		}
2201 
2202 	if (!punw) {
2203 		printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2204 		return 0;
2205 	}
2206 
2207 	start = (const struct unw_table_entry *) punw->p_vaddr;
2208 	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2209 	size  = 0;
2210 
2211 	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2212 
2213 	for (entry = start; entry < end; ++entry)
2214 		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2215 	size += 8;	/* reserve space for "end of table" marker */
2216 
2217 	unw.gate_table = kmalloc(size, GFP_KERNEL);
2218 	if (!unw.gate_table) {
2219 		unw.gate_table_size = 0;
2220 		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2221 		return 0;
2222 	}
2223 	unw.gate_table_size = size;
2224 
2225 	lp = unw.gate_table;
2226 	info = (char *) unw.gate_table + size;
2227 
2228 	for (entry = start; entry < end; ++entry, lp += 3) {
2229 		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2230 		info -= info_size;
2231 		memcpy(info, (char *) segbase + entry->info_offset, info_size);
2232 
2233 		lp[0] = segbase + entry->start_offset;		/* start */
2234 		lp[1] = segbase + entry->end_offset;		/* end */
2235 		lp[2] = info - (char *) unw.gate_table;		/* info */
2236 	}
2237 	*lp = 0;	/* end-of-table marker */
2238 	return 0;
2239 }
2240 
2241 __initcall(create_gate_table);
2242 
2243 void __init
unw_init(void)2244 unw_init (void)
2245 {
2246 	extern char __gp[];
2247 	extern void unw_hash_index_t_is_too_narrow (void);
2248 	long i, off;
2249 
2250 	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2251 		unw_hash_index_t_is_too_narrow();
2252 
2253 	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2254 	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2255 	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2256 	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2257 	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2258 	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2259 	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2260 	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2261 	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2262 		unw.sw_off[unw.preg_index[i]] = off;
2263 	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2264 		unw.sw_off[unw.preg_index[i]] = off;
2265 	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2266 		unw.sw_off[unw.preg_index[i]] = off;
2267 	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2268 		unw.sw_off[unw.preg_index[i]] = off;
2269 
2270 	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2271 		if (i > 0)
2272 			unw.cache[i].lru_chain = (i - 1);
2273 		unw.cache[i].coll_chain = -1;
2274 		rwlock_init(&unw.cache[i].lock);
2275 	}
2276 	unw.lru_head = UNW_CACHE_SIZE - 1;
2277 	unw.lru_tail = 0;
2278 
2279 	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2280 			  __start_unwind, __end_unwind);
2281 }
2282 
2283 /*
2284  * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2285  *
2286  *	This system call has been deprecated.  The new and improved way to get
2287  *	at the kernel's unwind info is via the gate DSO.  The address of the
2288  *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2289  *
2290  * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2291  *
2292  * This system call copies the unwind data into the buffer pointed to by BUF and returns
2293  * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
2294  * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2295  * unwind data.
2296  *
2297  * The first portion of the unwind data contains an unwind table and rest contains the
2298  * associated unwind info (in no particular order).  The unwind table consists of a table
2299  * of entries of the form:
2300  *
2301  *	u64 start;	(64-bit address of start of function)
2302  *	u64 end;	(64-bit address of start of function)
2303  *	u64 info;	(BUF-relative offset to unwind info)
2304  *
2305  * The end of the unwind table is indicated by an entry with a START address of zero.
2306  *
2307  * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2308  * on the format of the unwind info.
2309  *
2310  * ERRORS
2311  *	EFAULT	BUF points outside your accessible address space.
2312  */
2313 asmlinkage long
sys_getunwind(void __user * buf,size_t buf_size)2314 sys_getunwind (void __user *buf, size_t buf_size)
2315 {
2316 	if (buf && buf_size >= unw.gate_table_size)
2317 		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2318 			return -EFAULT;
2319 	return unw.gate_table_size;
2320 }
2321