xref: /openbmc/linux/arch/sh/kernel/dwarf.c (revision fd589a8f)
1 /*
2  * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * This is an implementation of a DWARF unwinder. Its main purpose is
9  * for generating stacktrace information. Based on the DWARF 3
10  * specification from http://www.dwarfstd.org.
11  *
12  * TODO:
13  *	- DWARF64 doesn't work.
14  *	- Registers with DWARF_VAL_OFFSET rules aren't handled properly.
15  */
16 
17 /* #define DEBUG */
18 #include <linux/kernel.h>
19 #include <linux/io.h>
20 #include <linux/list.h>
21 #include <linux/mempool.h>
22 #include <linux/mm.h>
23 #include <asm/dwarf.h>
24 #include <asm/unwinder.h>
25 #include <asm/sections.h>
26 #include <asm/unaligned.h>
27 #include <asm/dwarf.h>
28 #include <asm/stacktrace.h>
29 
30 /* Reserve enough memory for two stack frames */
31 #define DWARF_FRAME_MIN_REQ	2
32 /* ... with 4 registers per frame. */
33 #define DWARF_REG_MIN_REQ	(DWARF_FRAME_MIN_REQ * 4)
34 
35 static struct kmem_cache *dwarf_frame_cachep;
36 static mempool_t *dwarf_frame_pool;
37 
38 static struct kmem_cache *dwarf_reg_cachep;
39 static mempool_t *dwarf_reg_pool;
40 
41 static LIST_HEAD(dwarf_cie_list);
42 static DEFINE_SPINLOCK(dwarf_cie_lock);
43 
44 static LIST_HEAD(dwarf_fde_list);
45 static DEFINE_SPINLOCK(dwarf_fde_lock);
46 
47 static struct dwarf_cie *cached_cie;
48 
49 /**
50  *	dwarf_frame_alloc_reg - allocate memory for a DWARF register
51  *	@frame: the DWARF frame whose list of registers we insert on
52  *	@reg_num: the register number
53  *
54  *	Allocate space for, and initialise, a dwarf reg from
55  *	dwarf_reg_pool and insert it onto the (unsorted) linked-list of
56  *	dwarf registers for @frame.
57  *
58  *	Return the initialised DWARF reg.
59  */
60 static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
61 					       unsigned int reg_num)
62 {
63 	struct dwarf_reg *reg;
64 
65 	reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
66 	if (!reg) {
67 		printk(KERN_WARNING "Unable to allocate a DWARF register\n");
68 		/*
69 		 * Let's just bomb hard here, we have no way to
70 		 * gracefully recover.
71 		 */
72 		UNWINDER_BUG();
73 	}
74 
75 	reg->number = reg_num;
76 	reg->addr = 0;
77 	reg->flags = 0;
78 
79 	list_add(&reg->link, &frame->reg_list);
80 
81 	return reg;
82 }
83 
84 static void dwarf_frame_free_regs(struct dwarf_frame *frame)
85 {
86 	struct dwarf_reg *reg, *n;
87 
88 	list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
89 		list_del(&reg->link);
90 		mempool_free(reg, dwarf_reg_pool);
91 	}
92 }
93 
94 /**
95  *	dwarf_frame_reg - return a DWARF register
96  *	@frame: the DWARF frame to search in for @reg_num
97  *	@reg_num: the register number to search for
98  *
99  *	Lookup and return the dwarf reg @reg_num for this frame. Return
100  *	NULL if @reg_num is an register invalid number.
101  */
102 static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
103 					 unsigned int reg_num)
104 {
105 	struct dwarf_reg *reg;
106 
107 	list_for_each_entry(reg, &frame->reg_list, link) {
108 		if (reg->number == reg_num)
109 			return reg;
110 	}
111 
112 	return NULL;
113 }
114 
115 /**
116  *	dwarf_read_addr - read dwarf data
117  *	@src: source address of data
118  *	@dst: destination address to store the data to
119  *
120  *	Read 'n' bytes from @src, where 'n' is the size of an address on
121  *	the native machine. We return the number of bytes read, which
122  *	should always be 'n'. We also have to be careful when reading
123  *	from @src and writing to @dst, because they can be arbitrarily
124  *	aligned. Return 'n' - the number of bytes read.
125  */
126 static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
127 {
128 	u32 val = get_unaligned(src);
129 	put_unaligned(val, dst);
130 	return sizeof(unsigned long *);
131 }
132 
133 /**
134  *	dwarf_read_uleb128 - read unsigned LEB128 data
135  *	@addr: the address where the ULEB128 data is stored
136  *	@ret: address to store the result
137  *
138  *	Decode an unsigned LEB128 encoded datum. The algorithm is taken
139  *	from Appendix C of the DWARF 3 spec. For information on the
140  *	encodings refer to section "7.6 - Variable Length Data". Return
141  *	the number of bytes read.
142  */
143 static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
144 {
145 	unsigned int result;
146 	unsigned char byte;
147 	int shift, count;
148 
149 	result = 0;
150 	shift = 0;
151 	count = 0;
152 
153 	while (1) {
154 		byte = __raw_readb(addr);
155 		addr++;
156 		count++;
157 
158 		result |= (byte & 0x7f) << shift;
159 		shift += 7;
160 
161 		if (!(byte & 0x80))
162 			break;
163 	}
164 
165 	*ret = result;
166 
167 	return count;
168 }
169 
170 /**
171  *	dwarf_read_leb128 - read signed LEB128 data
172  *	@addr: the address of the LEB128 encoded data
173  *	@ret: address to store the result
174  *
175  *	Decode signed LEB128 data. The algorithm is taken from Appendix
176  *	C of the DWARF 3 spec. Return the number of bytes read.
177  */
178 static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
179 {
180 	unsigned char byte;
181 	int result, shift;
182 	int num_bits;
183 	int count;
184 
185 	result = 0;
186 	shift = 0;
187 	count = 0;
188 
189 	while (1) {
190 		byte = __raw_readb(addr);
191 		addr++;
192 		result |= (byte & 0x7f) << shift;
193 		shift += 7;
194 		count++;
195 
196 		if (!(byte & 0x80))
197 			break;
198 	}
199 
200 	/* The number of bits in a signed integer. */
201 	num_bits = 8 * sizeof(result);
202 
203 	if ((shift < num_bits) && (byte & 0x40))
204 		result |= (-1 << shift);
205 
206 	*ret = result;
207 
208 	return count;
209 }
210 
211 /**
212  *	dwarf_read_encoded_value - return the decoded value at @addr
213  *	@addr: the address of the encoded value
214  *	@val: where to write the decoded value
215  *	@encoding: the encoding with which we can decode @addr
216  *
217  *	GCC emits encoded address in the .eh_frame FDE entries. Decode
218  *	the value at @addr using @encoding. The decoded value is written
219  *	to @val and the number of bytes read is returned.
220  */
221 static int dwarf_read_encoded_value(char *addr, unsigned long *val,
222 				    char encoding)
223 {
224 	unsigned long decoded_addr = 0;
225 	int count = 0;
226 
227 	switch (encoding & 0x70) {
228 	case DW_EH_PE_absptr:
229 		break;
230 	case DW_EH_PE_pcrel:
231 		decoded_addr = (unsigned long)addr;
232 		break;
233 	default:
234 		pr_debug("encoding=0x%x\n", (encoding & 0x70));
235 		UNWINDER_BUG();
236 	}
237 
238 	if ((encoding & 0x07) == 0x00)
239 		encoding |= DW_EH_PE_udata4;
240 
241 	switch (encoding & 0x0f) {
242 	case DW_EH_PE_sdata4:
243 	case DW_EH_PE_udata4:
244 		count += 4;
245 		decoded_addr += get_unaligned((u32 *)addr);
246 		__raw_writel(decoded_addr, val);
247 		break;
248 	default:
249 		pr_debug("encoding=0x%x\n", encoding);
250 		UNWINDER_BUG();
251 	}
252 
253 	return count;
254 }
255 
256 /**
257  *	dwarf_entry_len - return the length of an FDE or CIE
258  *	@addr: the address of the entry
259  *	@len: the length of the entry
260  *
261  *	Read the initial_length field of the entry and store the size of
262  *	the entry in @len. We return the number of bytes read. Return a
263  *	count of 0 on error.
264  */
265 static inline int dwarf_entry_len(char *addr, unsigned long *len)
266 {
267 	u32 initial_len;
268 	int count;
269 
270 	initial_len = get_unaligned((u32 *)addr);
271 	count = 4;
272 
273 	/*
274 	 * An initial length field value in the range DW_LEN_EXT_LO -
275 	 * DW_LEN_EXT_HI indicates an extension, and should not be
276 	 * interpreted as a length. The only extension that we currently
277 	 * understand is the use of DWARF64 addresses.
278 	 */
279 	if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
280 		/*
281 		 * The 64-bit length field immediately follows the
282 		 * compulsory 32-bit length field.
283 		 */
284 		if (initial_len == DW_EXT_DWARF64) {
285 			*len = get_unaligned((u64 *)addr + 4);
286 			count = 12;
287 		} else {
288 			printk(KERN_WARNING "Unknown DWARF extension\n");
289 			count = 0;
290 		}
291 	} else
292 		*len = initial_len;
293 
294 	return count;
295 }
296 
297 /**
298  *	dwarf_lookup_cie - locate the cie
299  *	@cie_ptr: pointer to help with lookup
300  */
301 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
302 {
303 	struct dwarf_cie *cie;
304 	unsigned long flags;
305 
306 	spin_lock_irqsave(&dwarf_cie_lock, flags);
307 
308 	/*
309 	 * We've cached the last CIE we looked up because chances are
310 	 * that the FDE wants this CIE.
311 	 */
312 	if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
313 		cie = cached_cie;
314 		goto out;
315 	}
316 
317 	list_for_each_entry(cie, &dwarf_cie_list, link) {
318 		if (cie->cie_pointer == cie_ptr) {
319 			cached_cie = cie;
320 			break;
321 		}
322 	}
323 
324 	/* Couldn't find the entry in the list. */
325 	if (&cie->link == &dwarf_cie_list)
326 		cie = NULL;
327 out:
328 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
329 	return cie;
330 }
331 
332 /**
333  *	dwarf_lookup_fde - locate the FDE that covers pc
334  *	@pc: the program counter
335  */
336 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
337 {
338 	struct dwarf_fde *fde;
339 	unsigned long flags;
340 
341 	spin_lock_irqsave(&dwarf_fde_lock, flags);
342 
343 	list_for_each_entry(fde, &dwarf_fde_list, link) {
344 		unsigned long start, end;
345 
346 		start = fde->initial_location;
347 		end = fde->initial_location + fde->address_range;
348 
349 		if (pc >= start && pc < end)
350 			break;
351 	}
352 
353 	/* Couldn't find the entry in the list. */
354 	if (&fde->link == &dwarf_fde_list)
355 		fde = NULL;
356 
357 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
358 
359 	return fde;
360 }
361 
362 /**
363  *	dwarf_cfa_execute_insns - execute instructions to calculate a CFA
364  *	@insn_start: address of the first instruction
365  *	@insn_end: address of the last instruction
366  *	@cie: the CIE for this function
367  *	@fde: the FDE for this function
368  *	@frame: the instructions calculate the CFA for this frame
369  *	@pc: the program counter of the address we're interested in
370  *
371  *	Execute the Call Frame instruction sequence starting at
372  *	@insn_start and ending at @insn_end. The instructions describe
373  *	how to calculate the Canonical Frame Address of a stackframe.
374  *	Store the results in @frame.
375  */
376 static int dwarf_cfa_execute_insns(unsigned char *insn_start,
377 				   unsigned char *insn_end,
378 				   struct dwarf_cie *cie,
379 				   struct dwarf_fde *fde,
380 				   struct dwarf_frame *frame,
381 				   unsigned long pc)
382 {
383 	unsigned char insn;
384 	unsigned char *current_insn;
385 	unsigned int count, delta, reg, expr_len, offset;
386 	struct dwarf_reg *regp;
387 
388 	current_insn = insn_start;
389 
390 	while (current_insn < insn_end && frame->pc <= pc) {
391 		insn = __raw_readb(current_insn++);
392 
393 		/*
394 		 * Firstly, handle the opcodes that embed their operands
395 		 * in the instructions.
396 		 */
397 		switch (DW_CFA_opcode(insn)) {
398 		case DW_CFA_advance_loc:
399 			delta = DW_CFA_operand(insn);
400 			delta *= cie->code_alignment_factor;
401 			frame->pc += delta;
402 			continue;
403 			/* NOTREACHED */
404 		case DW_CFA_offset:
405 			reg = DW_CFA_operand(insn);
406 			count = dwarf_read_uleb128(current_insn, &offset);
407 			current_insn += count;
408 			offset *= cie->data_alignment_factor;
409 			regp = dwarf_frame_alloc_reg(frame, reg);
410 			regp->addr = offset;
411 			regp->flags |= DWARF_REG_OFFSET;
412 			continue;
413 			/* NOTREACHED */
414 		case DW_CFA_restore:
415 			reg = DW_CFA_operand(insn);
416 			continue;
417 			/* NOTREACHED */
418 		}
419 
420 		/*
421 		 * Secondly, handle the opcodes that don't embed their
422 		 * operands in the instruction.
423 		 */
424 		switch (insn) {
425 		case DW_CFA_nop:
426 			continue;
427 		case DW_CFA_advance_loc1:
428 			delta = *current_insn++;
429 			frame->pc += delta * cie->code_alignment_factor;
430 			break;
431 		case DW_CFA_advance_loc2:
432 			delta = get_unaligned((u16 *)current_insn);
433 			current_insn += 2;
434 			frame->pc += delta * cie->code_alignment_factor;
435 			break;
436 		case DW_CFA_advance_loc4:
437 			delta = get_unaligned((u32 *)current_insn);
438 			current_insn += 4;
439 			frame->pc += delta * cie->code_alignment_factor;
440 			break;
441 		case DW_CFA_offset_extended:
442 			count = dwarf_read_uleb128(current_insn, &reg);
443 			current_insn += count;
444 			count = dwarf_read_uleb128(current_insn, &offset);
445 			current_insn += count;
446 			offset *= cie->data_alignment_factor;
447 			break;
448 		case DW_CFA_restore_extended:
449 			count = dwarf_read_uleb128(current_insn, &reg);
450 			current_insn += count;
451 			break;
452 		case DW_CFA_undefined:
453 			count = dwarf_read_uleb128(current_insn, &reg);
454 			current_insn += count;
455 			regp = dwarf_frame_alloc_reg(frame, reg);
456 			regp->flags |= DWARF_UNDEFINED;
457 			break;
458 		case DW_CFA_def_cfa:
459 			count = dwarf_read_uleb128(current_insn,
460 						   &frame->cfa_register);
461 			current_insn += count;
462 			count = dwarf_read_uleb128(current_insn,
463 						   &frame->cfa_offset);
464 			current_insn += count;
465 
466 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
467 			break;
468 		case DW_CFA_def_cfa_register:
469 			count = dwarf_read_uleb128(current_insn,
470 						   &frame->cfa_register);
471 			current_insn += count;
472 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
473 			break;
474 		case DW_CFA_def_cfa_offset:
475 			count = dwarf_read_uleb128(current_insn, &offset);
476 			current_insn += count;
477 			frame->cfa_offset = offset;
478 			break;
479 		case DW_CFA_def_cfa_expression:
480 			count = dwarf_read_uleb128(current_insn, &expr_len);
481 			current_insn += count;
482 
483 			frame->cfa_expr = current_insn;
484 			frame->cfa_expr_len = expr_len;
485 			current_insn += expr_len;
486 
487 			frame->flags |= DWARF_FRAME_CFA_REG_EXP;
488 			break;
489 		case DW_CFA_offset_extended_sf:
490 			count = dwarf_read_uleb128(current_insn, &reg);
491 			current_insn += count;
492 			count = dwarf_read_leb128(current_insn, &offset);
493 			current_insn += count;
494 			offset *= cie->data_alignment_factor;
495 			regp = dwarf_frame_alloc_reg(frame, reg);
496 			regp->flags |= DWARF_REG_OFFSET;
497 			regp->addr = offset;
498 			break;
499 		case DW_CFA_val_offset:
500 			count = dwarf_read_uleb128(current_insn, &reg);
501 			current_insn += count;
502 			count = dwarf_read_leb128(current_insn, &offset);
503 			offset *= cie->data_alignment_factor;
504 			regp = dwarf_frame_alloc_reg(frame, reg);
505 			regp->flags |= DWARF_VAL_OFFSET;
506 			regp->addr = offset;
507 			break;
508 		case DW_CFA_GNU_args_size:
509 			count = dwarf_read_uleb128(current_insn, &offset);
510 			current_insn += count;
511 			break;
512 		case DW_CFA_GNU_negative_offset_extended:
513 			count = dwarf_read_uleb128(current_insn, &reg);
514 			current_insn += count;
515 			count = dwarf_read_uleb128(current_insn, &offset);
516 			offset *= cie->data_alignment_factor;
517 
518 			regp = dwarf_frame_alloc_reg(frame, reg);
519 			regp->flags |= DWARF_REG_OFFSET;
520 			regp->addr = -offset;
521 			break;
522 		default:
523 			pr_debug("unhandled DWARF instruction 0x%x\n", insn);
524 			UNWINDER_BUG();
525 			break;
526 		}
527 	}
528 
529 	return 0;
530 }
531 
532 /**
533  *	dwarf_unwind_stack - recursively unwind the stack
534  *	@pc: address of the function to unwind
535  *	@prev: struct dwarf_frame of the previous stackframe on the callstack
536  *
537  *	Return a struct dwarf_frame representing the most recent frame
538  *	on the callstack. Each of the lower (older) stack frames are
539  *	linked via the "prev" member.
540  */
541 struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
542 					struct dwarf_frame *prev)
543 {
544 	struct dwarf_frame *frame;
545 	struct dwarf_cie *cie;
546 	struct dwarf_fde *fde;
547 	struct dwarf_reg *reg;
548 	unsigned long addr;
549 
550 	/*
551 	 * If this is the first invocation of this recursive function we
552 	 * need get the contents of a physical register to get the CFA
553 	 * in order to begin the virtual unwinding of the stack.
554 	 *
555 	 * NOTE: the return address is guaranteed to be setup by the
556 	 * time this function makes its first function call.
557 	 */
558 	if (!pc && !prev)
559 		pc = (unsigned long)current_text_addr();
560 
561 	frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
562 	if (!frame) {
563 		printk(KERN_ERR "Unable to allocate a dwarf frame\n");
564 		UNWINDER_BUG();
565 	}
566 
567 	INIT_LIST_HEAD(&frame->reg_list);
568 	frame->flags = 0;
569 	frame->prev = prev;
570 	frame->return_addr = 0;
571 
572 	fde = dwarf_lookup_fde(pc);
573 	if (!fde) {
574 		/*
575 		 * This is our normal exit path - the one that stops the
576 		 * recursion. There's two reasons why we might exit
577 		 * here,
578 		 *
579 		 *	a) pc has no asscociated DWARF frame info and so
580 		 *	we don't know how to unwind this frame. This is
581 		 *	usually the case when we're trying to unwind a
582 		 *	frame that was called from some assembly code
583 		 *	that has no DWARF info, e.g. syscalls.
584 		 *
585 		 *	b) the DEBUG info for pc is bogus. There's
586 		 *	really no way to distinguish this case from the
587 		 *	case above, which sucks because we could print a
588 		 *	warning here.
589 		 */
590 		goto bail;
591 	}
592 
593 	cie = dwarf_lookup_cie(fde->cie_pointer);
594 
595 	frame->pc = fde->initial_location;
596 
597 	/* CIE initial instructions */
598 	dwarf_cfa_execute_insns(cie->initial_instructions,
599 				cie->instructions_end, cie, fde,
600 				frame, pc);
601 
602 	/* FDE instructions */
603 	dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
604 				fde, frame, pc);
605 
606 	/* Calculate the CFA */
607 	switch (frame->flags) {
608 	case DWARF_FRAME_CFA_REG_OFFSET:
609 		if (prev) {
610 			reg = dwarf_frame_reg(prev, frame->cfa_register);
611 			UNWINDER_BUG_ON(!reg);
612 			UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
613 
614 			addr = prev->cfa + reg->addr;
615 			frame->cfa = __raw_readl(addr);
616 
617 		} else {
618 			/*
619 			 * Again, this is the first invocation of this
620 			 * recurisve function. We need to physically
621 			 * read the contents of a register in order to
622 			 * get the Canonical Frame Address for this
623 			 * function.
624 			 */
625 			frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
626 		}
627 
628 		frame->cfa += frame->cfa_offset;
629 		break;
630 	default:
631 		UNWINDER_BUG();
632 	}
633 
634 	reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
635 
636 	/*
637 	 * If we haven't seen the return address register or the return
638 	 * address column is undefined then we must assume that this is
639 	 * the end of the callstack.
640 	 */
641 	if (!reg || reg->flags == DWARF_UNDEFINED)
642 		goto bail;
643 
644 	UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
645 
646 	addr = frame->cfa + reg->addr;
647 	frame->return_addr = __raw_readl(addr);
648 
649 	return frame;
650 
651 bail:
652 	dwarf_frame_free_regs(frame);
653 	mempool_free(frame, dwarf_frame_pool);
654 	return NULL;
655 }
656 
657 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
658 			   unsigned char *end)
659 {
660 	struct dwarf_cie *cie;
661 	unsigned long flags;
662 	int count;
663 
664 	cie = kzalloc(sizeof(*cie), GFP_KERNEL);
665 	if (!cie)
666 		return -ENOMEM;
667 
668 	cie->length = len;
669 
670 	/*
671 	 * Record the offset into the .eh_frame section
672 	 * for this CIE. It allows this CIE to be
673 	 * quickly and easily looked up from the
674 	 * corresponding FDE.
675 	 */
676 	cie->cie_pointer = (unsigned long)entry;
677 
678 	cie->version = *(char *)p++;
679 	UNWINDER_BUG_ON(cie->version != 1);
680 
681 	cie->augmentation = p;
682 	p += strlen(cie->augmentation) + 1;
683 
684 	count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
685 	p += count;
686 
687 	count = dwarf_read_leb128(p, &cie->data_alignment_factor);
688 	p += count;
689 
690 	/*
691 	 * Which column in the rule table contains the
692 	 * return address?
693 	 */
694 	if (cie->version == 1) {
695 		cie->return_address_reg = __raw_readb(p);
696 		p++;
697 	} else {
698 		count = dwarf_read_uleb128(p, &cie->return_address_reg);
699 		p += count;
700 	}
701 
702 	if (cie->augmentation[0] == 'z') {
703 		unsigned int length, count;
704 		cie->flags |= DWARF_CIE_Z_AUGMENTATION;
705 
706 		count = dwarf_read_uleb128(p, &length);
707 		p += count;
708 
709 		UNWINDER_BUG_ON((unsigned char *)p > end);
710 
711 		cie->initial_instructions = p + length;
712 		cie->augmentation++;
713 	}
714 
715 	while (*cie->augmentation) {
716 		/*
717 		 * "L" indicates a byte showing how the
718 		 * LSDA pointer is encoded. Skip it.
719 		 */
720 		if (*cie->augmentation == 'L') {
721 			p++;
722 			cie->augmentation++;
723 		} else if (*cie->augmentation == 'R') {
724 			/*
725 			 * "R" indicates a byte showing
726 			 * how FDE addresses are
727 			 * encoded.
728 			 */
729 			cie->encoding = *(char *)p++;
730 			cie->augmentation++;
731 		} else if (*cie->augmentation == 'P') {
732 			/*
733 			 * "R" indicates a personality
734 			 * routine in the CIE
735 			 * augmentation.
736 			 */
737 			UNWINDER_BUG();
738 		} else if (*cie->augmentation == 'S') {
739 			UNWINDER_BUG();
740 		} else {
741 			/*
742 			 * Unknown augmentation. Assume
743 			 * 'z' augmentation.
744 			 */
745 			p = cie->initial_instructions;
746 			UNWINDER_BUG_ON(!p);
747 			break;
748 		}
749 	}
750 
751 	cie->initial_instructions = p;
752 	cie->instructions_end = end;
753 
754 	/* Add to list */
755 	spin_lock_irqsave(&dwarf_cie_lock, flags);
756 	list_add_tail(&cie->link, &dwarf_cie_list);
757 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
758 
759 	return 0;
760 }
761 
762 static int dwarf_parse_fde(void *entry, u32 entry_type,
763 			   void *start, unsigned long len,
764 			   unsigned char *end)
765 {
766 	struct dwarf_fde *fde;
767 	struct dwarf_cie *cie;
768 	unsigned long flags;
769 	int count;
770 	void *p = start;
771 
772 	fde = kzalloc(sizeof(*fde), GFP_KERNEL);
773 	if (!fde)
774 		return -ENOMEM;
775 
776 	fde->length = len;
777 
778 	/*
779 	 * In a .eh_frame section the CIE pointer is the
780 	 * delta between the address within the FDE
781 	 */
782 	fde->cie_pointer = (unsigned long)(p - entry_type - 4);
783 
784 	cie = dwarf_lookup_cie(fde->cie_pointer);
785 	fde->cie = cie;
786 
787 	if (cie->encoding)
788 		count = dwarf_read_encoded_value(p, &fde->initial_location,
789 						 cie->encoding);
790 	else
791 		count = dwarf_read_addr(p, &fde->initial_location);
792 
793 	p += count;
794 
795 	if (cie->encoding)
796 		count = dwarf_read_encoded_value(p, &fde->address_range,
797 						 cie->encoding & 0x0f);
798 	else
799 		count = dwarf_read_addr(p, &fde->address_range);
800 
801 	p += count;
802 
803 	if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
804 		unsigned int length;
805 		count = dwarf_read_uleb128(p, &length);
806 		p += count + length;
807 	}
808 
809 	/* Call frame instructions. */
810 	fde->instructions = p;
811 	fde->end = end;
812 
813 	/* Add to list. */
814 	spin_lock_irqsave(&dwarf_fde_lock, flags);
815 	list_add_tail(&fde->link, &dwarf_fde_list);
816 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
817 
818 	return 0;
819 }
820 
821 static void dwarf_unwinder_dump(struct task_struct *task,
822 				struct pt_regs *regs,
823 				unsigned long *sp,
824 				const struct stacktrace_ops *ops,
825 				void *data)
826 {
827 	struct dwarf_frame *frame, *_frame;
828 	unsigned long return_addr;
829 
830 	_frame = NULL;
831 	return_addr = 0;
832 
833 	while (1) {
834 		frame = dwarf_unwind_stack(return_addr, _frame);
835 
836 		if (_frame) {
837 			dwarf_frame_free_regs(_frame);
838 			mempool_free(_frame, dwarf_frame_pool);
839 		}
840 
841 		_frame = frame;
842 
843 		if (!frame || !frame->return_addr)
844 			break;
845 
846 		return_addr = frame->return_addr;
847 		ops->address(data, return_addr, 1);
848 	}
849 }
850 
851 static struct unwinder dwarf_unwinder = {
852 	.name = "dwarf-unwinder",
853 	.dump = dwarf_unwinder_dump,
854 	.rating = 150,
855 };
856 
857 static void dwarf_unwinder_cleanup(void)
858 {
859 	struct dwarf_cie *cie;
860 	struct dwarf_fde *fde;
861 
862 	/*
863 	 * Deallocate all the memory allocated for the DWARF unwinder.
864 	 * Traverse all the FDE/CIE lists and remove and free all the
865 	 * memory associated with those data structures.
866 	 */
867 	list_for_each_entry(cie, &dwarf_cie_list, link)
868 		kfree(cie);
869 
870 	list_for_each_entry(fde, &dwarf_fde_list, link)
871 		kfree(fde);
872 
873 	kmem_cache_destroy(dwarf_reg_cachep);
874 	kmem_cache_destroy(dwarf_frame_cachep);
875 }
876 
877 /**
878  *	dwarf_unwinder_init - initialise the dwarf unwinder
879  *
880  *	Build the data structures describing the .dwarf_frame section to
881  *	make it easier to lookup CIE and FDE entries. Because the
882  *	.eh_frame section is packed as tightly as possible it is not
883  *	easy to lookup the FDE for a given PC, so we build a list of FDE
884  *	and CIE entries that make it easier.
885  */
886 static int __init dwarf_unwinder_init(void)
887 {
888 	u32 entry_type;
889 	void *p, *entry;
890 	int count, err = 0;
891 	unsigned long len;
892 	unsigned int c_entries, f_entries;
893 	unsigned char *end;
894 	INIT_LIST_HEAD(&dwarf_cie_list);
895 	INIT_LIST_HEAD(&dwarf_fde_list);
896 
897 	c_entries = 0;
898 	f_entries = 0;
899 	entry = &__start_eh_frame;
900 
901 	dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
902 			sizeof(struct dwarf_frame), 0,
903 			SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
904 
905 	dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
906 			sizeof(struct dwarf_reg), 0,
907 			SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
908 
909 	dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
910 					  mempool_alloc_slab,
911 					  mempool_free_slab,
912 					  dwarf_frame_cachep);
913 
914 	dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
915 					 mempool_alloc_slab,
916 					 mempool_free_slab,
917 					 dwarf_reg_cachep);
918 
919 	while ((char *)entry < __stop_eh_frame) {
920 		p = entry;
921 
922 		count = dwarf_entry_len(p, &len);
923 		if (count == 0) {
924 			/*
925 			 * We read a bogus length field value. There is
926 			 * nothing we can do here apart from disabling
927 			 * the DWARF unwinder. We can't even skip this
928 			 * entry and move to the next one because 'len'
929 			 * tells us where our next entry is.
930 			 */
931 			goto out;
932 		} else
933 			p += count;
934 
935 		/* initial length does not include itself */
936 		end = p + len;
937 
938 		entry_type = get_unaligned((u32 *)p);
939 		p += 4;
940 
941 		if (entry_type == DW_EH_FRAME_CIE) {
942 			err = dwarf_parse_cie(entry, p, len, end);
943 			if (err < 0)
944 				goto out;
945 			else
946 				c_entries++;
947 		} else {
948 			err = dwarf_parse_fde(entry, entry_type, p, len, end);
949 			if (err < 0)
950 				goto out;
951 			else
952 				f_entries++;
953 		}
954 
955 		entry = (char *)entry + len + 4;
956 	}
957 
958 	printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
959 	       c_entries, f_entries);
960 
961 	err = unwinder_register(&dwarf_unwinder);
962 	if (err)
963 		goto out;
964 
965 	return 0;
966 
967 out:
968 	printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
969 	dwarf_unwinder_cleanup();
970 	return -EINVAL;
971 }
972 early_initcall(dwarf_unwinder_init);
973