xref: /openbmc/linux/arch/sh/kernel/dwarf.c (revision a09d2831)
1 /*
2  * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * This is an implementation of a DWARF unwinder. Its main purpose is
9  * for generating stacktrace information. Based on the DWARF 3
10  * specification from http://www.dwarfstd.org.
11  *
12  * TODO:
13  *	- DWARF64 doesn't work.
14  *	- Registers with DWARF_VAL_OFFSET rules aren't handled properly.
15  */
16 
17 /* #define DEBUG */
18 #include <linux/kernel.h>
19 #include <linux/io.h>
20 #include <linux/list.h>
21 #include <linux/mempool.h>
22 #include <linux/mm.h>
23 #include <linux/elf.h>
24 #include <linux/ftrace.h>
25 #include <asm/dwarf.h>
26 #include <asm/unwinder.h>
27 #include <asm/sections.h>
28 #include <asm/unaligned.h>
29 #include <asm/stacktrace.h>
30 
31 /* Reserve enough memory for two stack frames */
32 #define DWARF_FRAME_MIN_REQ	2
33 /* ... with 4 registers per frame. */
34 #define DWARF_REG_MIN_REQ	(DWARF_FRAME_MIN_REQ * 4)
35 
36 static struct kmem_cache *dwarf_frame_cachep;
37 static mempool_t *dwarf_frame_pool;
38 
39 static struct kmem_cache *dwarf_reg_cachep;
40 static mempool_t *dwarf_reg_pool;
41 
42 static LIST_HEAD(dwarf_cie_list);
43 static DEFINE_SPINLOCK(dwarf_cie_lock);
44 
45 static LIST_HEAD(dwarf_fde_list);
46 static DEFINE_SPINLOCK(dwarf_fde_lock);
47 
48 static struct dwarf_cie *cached_cie;
49 
50 /**
51  *	dwarf_frame_alloc_reg - allocate memory for a DWARF register
52  *	@frame: the DWARF frame whose list of registers we insert on
53  *	@reg_num: the register number
54  *
55  *	Allocate space for, and initialise, a dwarf reg from
56  *	dwarf_reg_pool and insert it onto the (unsorted) linked-list of
57  *	dwarf registers for @frame.
58  *
59  *	Return the initialised DWARF reg.
60  */
61 static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
62 					       unsigned int reg_num)
63 {
64 	struct dwarf_reg *reg;
65 
66 	reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
67 	if (!reg) {
68 		printk(KERN_WARNING "Unable to allocate a DWARF register\n");
69 		/*
70 		 * Let's just bomb hard here, we have no way to
71 		 * gracefully recover.
72 		 */
73 		UNWINDER_BUG();
74 	}
75 
76 	reg->number = reg_num;
77 	reg->addr = 0;
78 	reg->flags = 0;
79 
80 	list_add(&reg->link, &frame->reg_list);
81 
82 	return reg;
83 }
84 
85 static void dwarf_frame_free_regs(struct dwarf_frame *frame)
86 {
87 	struct dwarf_reg *reg, *n;
88 
89 	list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
90 		list_del(&reg->link);
91 		mempool_free(reg, dwarf_reg_pool);
92 	}
93 }
94 
95 /**
96  *	dwarf_frame_reg - return a DWARF register
97  *	@frame: the DWARF frame to search in for @reg_num
98  *	@reg_num: the register number to search for
99  *
100  *	Lookup and return the dwarf reg @reg_num for this frame. Return
101  *	NULL if @reg_num is an register invalid number.
102  */
103 static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
104 					 unsigned int reg_num)
105 {
106 	struct dwarf_reg *reg;
107 
108 	list_for_each_entry(reg, &frame->reg_list, link) {
109 		if (reg->number == reg_num)
110 			return reg;
111 	}
112 
113 	return NULL;
114 }
115 
116 /**
117  *	dwarf_read_addr - read dwarf data
118  *	@src: source address of data
119  *	@dst: destination address to store the data to
120  *
121  *	Read 'n' bytes from @src, where 'n' is the size of an address on
122  *	the native machine. We return the number of bytes read, which
123  *	should always be 'n'. We also have to be careful when reading
124  *	from @src and writing to @dst, because they can be arbitrarily
125  *	aligned. Return 'n' - the number of bytes read.
126  */
127 static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
128 {
129 	u32 val = get_unaligned(src);
130 	put_unaligned(val, dst);
131 	return sizeof(unsigned long *);
132 }
133 
134 /**
135  *	dwarf_read_uleb128 - read unsigned LEB128 data
136  *	@addr: the address where the ULEB128 data is stored
137  *	@ret: address to store the result
138  *
139  *	Decode an unsigned LEB128 encoded datum. The algorithm is taken
140  *	from Appendix C of the DWARF 3 spec. For information on the
141  *	encodings refer to section "7.6 - Variable Length Data". Return
142  *	the number of bytes read.
143  */
144 static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
145 {
146 	unsigned int result;
147 	unsigned char byte;
148 	int shift, count;
149 
150 	result = 0;
151 	shift = 0;
152 	count = 0;
153 
154 	while (1) {
155 		byte = __raw_readb(addr);
156 		addr++;
157 		count++;
158 
159 		result |= (byte & 0x7f) << shift;
160 		shift += 7;
161 
162 		if (!(byte & 0x80))
163 			break;
164 	}
165 
166 	*ret = result;
167 
168 	return count;
169 }
170 
171 /**
172  *	dwarf_read_leb128 - read signed LEB128 data
173  *	@addr: the address of the LEB128 encoded data
174  *	@ret: address to store the result
175  *
176  *	Decode signed LEB128 data. The algorithm is taken from Appendix
177  *	C of the DWARF 3 spec. Return the number of bytes read.
178  */
179 static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
180 {
181 	unsigned char byte;
182 	int result, shift;
183 	int num_bits;
184 	int count;
185 
186 	result = 0;
187 	shift = 0;
188 	count = 0;
189 
190 	while (1) {
191 		byte = __raw_readb(addr);
192 		addr++;
193 		result |= (byte & 0x7f) << shift;
194 		shift += 7;
195 		count++;
196 
197 		if (!(byte & 0x80))
198 			break;
199 	}
200 
201 	/* The number of bits in a signed integer. */
202 	num_bits = 8 * sizeof(result);
203 
204 	if ((shift < num_bits) && (byte & 0x40))
205 		result |= (-1 << shift);
206 
207 	*ret = result;
208 
209 	return count;
210 }
211 
212 /**
213  *	dwarf_read_encoded_value - return the decoded value at @addr
214  *	@addr: the address of the encoded value
215  *	@val: where to write the decoded value
216  *	@encoding: the encoding with which we can decode @addr
217  *
218  *	GCC emits encoded address in the .eh_frame FDE entries. Decode
219  *	the value at @addr using @encoding. The decoded value is written
220  *	to @val and the number of bytes read is returned.
221  */
222 static int dwarf_read_encoded_value(char *addr, unsigned long *val,
223 				    char encoding)
224 {
225 	unsigned long decoded_addr = 0;
226 	int count = 0;
227 
228 	switch (encoding & 0x70) {
229 	case DW_EH_PE_absptr:
230 		break;
231 	case DW_EH_PE_pcrel:
232 		decoded_addr = (unsigned long)addr;
233 		break;
234 	default:
235 		pr_debug("encoding=0x%x\n", (encoding & 0x70));
236 		UNWINDER_BUG();
237 	}
238 
239 	if ((encoding & 0x07) == 0x00)
240 		encoding |= DW_EH_PE_udata4;
241 
242 	switch (encoding & 0x0f) {
243 	case DW_EH_PE_sdata4:
244 	case DW_EH_PE_udata4:
245 		count += 4;
246 		decoded_addr += get_unaligned((u32 *)addr);
247 		__raw_writel(decoded_addr, val);
248 		break;
249 	default:
250 		pr_debug("encoding=0x%x\n", encoding);
251 		UNWINDER_BUG();
252 	}
253 
254 	return count;
255 }
256 
257 /**
258  *	dwarf_entry_len - return the length of an FDE or CIE
259  *	@addr: the address of the entry
260  *	@len: the length of the entry
261  *
262  *	Read the initial_length field of the entry and store the size of
263  *	the entry in @len. We return the number of bytes read. Return a
264  *	count of 0 on error.
265  */
266 static inline int dwarf_entry_len(char *addr, unsigned long *len)
267 {
268 	u32 initial_len;
269 	int count;
270 
271 	initial_len = get_unaligned((u32 *)addr);
272 	count = 4;
273 
274 	/*
275 	 * An initial length field value in the range DW_LEN_EXT_LO -
276 	 * DW_LEN_EXT_HI indicates an extension, and should not be
277 	 * interpreted as a length. The only extension that we currently
278 	 * understand is the use of DWARF64 addresses.
279 	 */
280 	if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
281 		/*
282 		 * The 64-bit length field immediately follows the
283 		 * compulsory 32-bit length field.
284 		 */
285 		if (initial_len == DW_EXT_DWARF64) {
286 			*len = get_unaligned((u64 *)addr + 4);
287 			count = 12;
288 		} else {
289 			printk(KERN_WARNING "Unknown DWARF extension\n");
290 			count = 0;
291 		}
292 	} else
293 		*len = initial_len;
294 
295 	return count;
296 }
297 
298 /**
299  *	dwarf_lookup_cie - locate the cie
300  *	@cie_ptr: pointer to help with lookup
301  */
302 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
303 {
304 	struct dwarf_cie *cie;
305 	unsigned long flags;
306 
307 	spin_lock_irqsave(&dwarf_cie_lock, flags);
308 
309 	/*
310 	 * We've cached the last CIE we looked up because chances are
311 	 * that the FDE wants this CIE.
312 	 */
313 	if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
314 		cie = cached_cie;
315 		goto out;
316 	}
317 
318 	list_for_each_entry(cie, &dwarf_cie_list, link) {
319 		if (cie->cie_pointer == cie_ptr) {
320 			cached_cie = cie;
321 			break;
322 		}
323 	}
324 
325 	/* Couldn't find the entry in the list. */
326 	if (&cie->link == &dwarf_cie_list)
327 		cie = NULL;
328 out:
329 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
330 	return cie;
331 }
332 
333 /**
334  *	dwarf_lookup_fde - locate the FDE that covers pc
335  *	@pc: the program counter
336  */
337 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
338 {
339 	struct dwarf_fde *fde;
340 	unsigned long flags;
341 
342 	spin_lock_irqsave(&dwarf_fde_lock, flags);
343 
344 	list_for_each_entry(fde, &dwarf_fde_list, link) {
345 		unsigned long start, end;
346 
347 		start = fde->initial_location;
348 		end = fde->initial_location + fde->address_range;
349 
350 		if (pc >= start && pc < end)
351 			break;
352 	}
353 
354 	/* Couldn't find the entry in the list. */
355 	if (&fde->link == &dwarf_fde_list)
356 		fde = NULL;
357 
358 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
359 
360 	return fde;
361 }
362 
363 /**
364  *	dwarf_cfa_execute_insns - execute instructions to calculate a CFA
365  *	@insn_start: address of the first instruction
366  *	@insn_end: address of the last instruction
367  *	@cie: the CIE for this function
368  *	@fde: the FDE for this function
369  *	@frame: the instructions calculate the CFA for this frame
370  *	@pc: the program counter of the address we're interested in
371  *
372  *	Execute the Call Frame instruction sequence starting at
373  *	@insn_start and ending at @insn_end. The instructions describe
374  *	how to calculate the Canonical Frame Address of a stackframe.
375  *	Store the results in @frame.
376  */
377 static int dwarf_cfa_execute_insns(unsigned char *insn_start,
378 				   unsigned char *insn_end,
379 				   struct dwarf_cie *cie,
380 				   struct dwarf_fde *fde,
381 				   struct dwarf_frame *frame,
382 				   unsigned long pc)
383 {
384 	unsigned char insn;
385 	unsigned char *current_insn;
386 	unsigned int count, delta, reg, expr_len, offset;
387 	struct dwarf_reg *regp;
388 
389 	current_insn = insn_start;
390 
391 	while (current_insn < insn_end && frame->pc <= pc) {
392 		insn = __raw_readb(current_insn++);
393 
394 		/*
395 		 * Firstly, handle the opcodes that embed their operands
396 		 * in the instructions.
397 		 */
398 		switch (DW_CFA_opcode(insn)) {
399 		case DW_CFA_advance_loc:
400 			delta = DW_CFA_operand(insn);
401 			delta *= cie->code_alignment_factor;
402 			frame->pc += delta;
403 			continue;
404 			/* NOTREACHED */
405 		case DW_CFA_offset:
406 			reg = DW_CFA_operand(insn);
407 			count = dwarf_read_uleb128(current_insn, &offset);
408 			current_insn += count;
409 			offset *= cie->data_alignment_factor;
410 			regp = dwarf_frame_alloc_reg(frame, reg);
411 			regp->addr = offset;
412 			regp->flags |= DWARF_REG_OFFSET;
413 			continue;
414 			/* NOTREACHED */
415 		case DW_CFA_restore:
416 			reg = DW_CFA_operand(insn);
417 			continue;
418 			/* NOTREACHED */
419 		}
420 
421 		/*
422 		 * Secondly, handle the opcodes that don't embed their
423 		 * operands in the instruction.
424 		 */
425 		switch (insn) {
426 		case DW_CFA_nop:
427 			continue;
428 		case DW_CFA_advance_loc1:
429 			delta = *current_insn++;
430 			frame->pc += delta * cie->code_alignment_factor;
431 			break;
432 		case DW_CFA_advance_loc2:
433 			delta = get_unaligned((u16 *)current_insn);
434 			current_insn += 2;
435 			frame->pc += delta * cie->code_alignment_factor;
436 			break;
437 		case DW_CFA_advance_loc4:
438 			delta = get_unaligned((u32 *)current_insn);
439 			current_insn += 4;
440 			frame->pc += delta * cie->code_alignment_factor;
441 			break;
442 		case DW_CFA_offset_extended:
443 			count = dwarf_read_uleb128(current_insn, &reg);
444 			current_insn += count;
445 			count = dwarf_read_uleb128(current_insn, &offset);
446 			current_insn += count;
447 			offset *= cie->data_alignment_factor;
448 			break;
449 		case DW_CFA_restore_extended:
450 			count = dwarf_read_uleb128(current_insn, &reg);
451 			current_insn += count;
452 			break;
453 		case DW_CFA_undefined:
454 			count = dwarf_read_uleb128(current_insn, &reg);
455 			current_insn += count;
456 			regp = dwarf_frame_alloc_reg(frame, reg);
457 			regp->flags |= DWARF_UNDEFINED;
458 			break;
459 		case DW_CFA_def_cfa:
460 			count = dwarf_read_uleb128(current_insn,
461 						   &frame->cfa_register);
462 			current_insn += count;
463 			count = dwarf_read_uleb128(current_insn,
464 						   &frame->cfa_offset);
465 			current_insn += count;
466 
467 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
468 			break;
469 		case DW_CFA_def_cfa_register:
470 			count = dwarf_read_uleb128(current_insn,
471 						   &frame->cfa_register);
472 			current_insn += count;
473 			frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
474 			break;
475 		case DW_CFA_def_cfa_offset:
476 			count = dwarf_read_uleb128(current_insn, &offset);
477 			current_insn += count;
478 			frame->cfa_offset = offset;
479 			break;
480 		case DW_CFA_def_cfa_expression:
481 			count = dwarf_read_uleb128(current_insn, &expr_len);
482 			current_insn += count;
483 
484 			frame->cfa_expr = current_insn;
485 			frame->cfa_expr_len = expr_len;
486 			current_insn += expr_len;
487 
488 			frame->flags |= DWARF_FRAME_CFA_REG_EXP;
489 			break;
490 		case DW_CFA_offset_extended_sf:
491 			count = dwarf_read_uleb128(current_insn, &reg);
492 			current_insn += count;
493 			count = dwarf_read_leb128(current_insn, &offset);
494 			current_insn += count;
495 			offset *= cie->data_alignment_factor;
496 			regp = dwarf_frame_alloc_reg(frame, reg);
497 			regp->flags |= DWARF_REG_OFFSET;
498 			regp->addr = offset;
499 			break;
500 		case DW_CFA_val_offset:
501 			count = dwarf_read_uleb128(current_insn, &reg);
502 			current_insn += count;
503 			count = dwarf_read_leb128(current_insn, &offset);
504 			offset *= cie->data_alignment_factor;
505 			regp = dwarf_frame_alloc_reg(frame, reg);
506 			regp->flags |= DWARF_VAL_OFFSET;
507 			regp->addr = offset;
508 			break;
509 		case DW_CFA_GNU_args_size:
510 			count = dwarf_read_uleb128(current_insn, &offset);
511 			current_insn += count;
512 			break;
513 		case DW_CFA_GNU_negative_offset_extended:
514 			count = dwarf_read_uleb128(current_insn, &reg);
515 			current_insn += count;
516 			count = dwarf_read_uleb128(current_insn, &offset);
517 			offset *= cie->data_alignment_factor;
518 
519 			regp = dwarf_frame_alloc_reg(frame, reg);
520 			regp->flags |= DWARF_REG_OFFSET;
521 			regp->addr = -offset;
522 			break;
523 		default:
524 			pr_debug("unhandled DWARF instruction 0x%x\n", insn);
525 			UNWINDER_BUG();
526 			break;
527 		}
528 	}
529 
530 	return 0;
531 }
532 
533 /**
534  *	dwarf_free_frame - free the memory allocated for @frame
535  *	@frame: the frame to free
536  */
537 void dwarf_free_frame(struct dwarf_frame *frame)
538 {
539 	dwarf_frame_free_regs(frame);
540 	mempool_free(frame, dwarf_frame_pool);
541 }
542 
543 /**
544  *	dwarf_unwind_stack - unwind the stack
545  *
546  *	@pc: address of the function to unwind
547  *	@prev: struct dwarf_frame of the previous stackframe on the callstack
548  *
549  *	Return a struct dwarf_frame representing the most recent frame
550  *	on the callstack. Each of the lower (older) stack frames are
551  *	linked via the "prev" member.
552  */
553 struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
554 					struct dwarf_frame *prev)
555 {
556 	struct dwarf_frame *frame;
557 	struct dwarf_cie *cie;
558 	struct dwarf_fde *fde;
559 	struct dwarf_reg *reg;
560 	unsigned long addr;
561 
562 	/*
563 	 * If we're starting at the top of the stack we need get the
564 	 * contents of a physical register to get the CFA in order to
565 	 * begin the virtual unwinding of the stack.
566 	 *
567 	 * NOTE: the return address is guaranteed to be setup by the
568 	 * time this function makes its first function call.
569 	 */
570 	if (!pc || !prev)
571 		pc = (unsigned long)current_text_addr();
572 
573 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
574 	/*
575 	 * If our stack has been patched by the function graph tracer
576 	 * then we might see the address of return_to_handler() where we
577 	 * expected to find the real return address.
578 	 */
579 	if (pc == (unsigned long)&return_to_handler) {
580 		int index = current->curr_ret_stack;
581 
582 		/*
583 		 * We currently have no way of tracking how many
584 		 * return_to_handler()'s we've seen. If there is more
585 		 * than one patched return address on our stack,
586 		 * complain loudly.
587 		 */
588 		WARN_ON(index > 0);
589 
590 		pc = current->ret_stack[index].ret;
591 	}
592 #endif
593 
594 	frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
595 	if (!frame) {
596 		printk(KERN_ERR "Unable to allocate a dwarf frame\n");
597 		UNWINDER_BUG();
598 	}
599 
600 	INIT_LIST_HEAD(&frame->reg_list);
601 	frame->flags = 0;
602 	frame->prev = prev;
603 	frame->return_addr = 0;
604 
605 	fde = dwarf_lookup_fde(pc);
606 	if (!fde) {
607 		/*
608 		 * This is our normal exit path. There are two reasons
609 		 * why we might exit here,
610 		 *
611 		 *	a) pc has no asscociated DWARF frame info and so
612 		 *	we don't know how to unwind this frame. This is
613 		 *	usually the case when we're trying to unwind a
614 		 *	frame that was called from some assembly code
615 		 *	that has no DWARF info, e.g. syscalls.
616 		 *
617 		 *	b) the DEBUG info for pc is bogus. There's
618 		 *	really no way to distinguish this case from the
619 		 *	case above, which sucks because we could print a
620 		 *	warning here.
621 		 */
622 		goto bail;
623 	}
624 
625 	cie = dwarf_lookup_cie(fde->cie_pointer);
626 
627 	frame->pc = fde->initial_location;
628 
629 	/* CIE initial instructions */
630 	dwarf_cfa_execute_insns(cie->initial_instructions,
631 				cie->instructions_end, cie, fde,
632 				frame, pc);
633 
634 	/* FDE instructions */
635 	dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
636 				fde, frame, pc);
637 
638 	/* Calculate the CFA */
639 	switch (frame->flags) {
640 	case DWARF_FRAME_CFA_REG_OFFSET:
641 		if (prev) {
642 			reg = dwarf_frame_reg(prev, frame->cfa_register);
643 			UNWINDER_BUG_ON(!reg);
644 			UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
645 
646 			addr = prev->cfa + reg->addr;
647 			frame->cfa = __raw_readl(addr);
648 
649 		} else {
650 			/*
651 			 * Again, we're starting from the top of the
652 			 * stack. We need to physically read
653 			 * the contents of a register in order to get
654 			 * the Canonical Frame Address for this
655 			 * function.
656 			 */
657 			frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
658 		}
659 
660 		frame->cfa += frame->cfa_offset;
661 		break;
662 	default:
663 		UNWINDER_BUG();
664 	}
665 
666 	reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
667 
668 	/*
669 	 * If we haven't seen the return address register or the return
670 	 * address column is undefined then we must assume that this is
671 	 * the end of the callstack.
672 	 */
673 	if (!reg || reg->flags == DWARF_UNDEFINED)
674 		goto bail;
675 
676 	UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
677 
678 	addr = frame->cfa + reg->addr;
679 	frame->return_addr = __raw_readl(addr);
680 
681 	return frame;
682 
683 bail:
684 	dwarf_free_frame(frame);
685 	return NULL;
686 }
687 
688 static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
689 			   unsigned char *end, struct module *mod)
690 {
691 	struct dwarf_cie *cie;
692 	unsigned long flags;
693 	int count;
694 
695 	cie = kzalloc(sizeof(*cie), GFP_KERNEL);
696 	if (!cie)
697 		return -ENOMEM;
698 
699 	cie->length = len;
700 
701 	/*
702 	 * Record the offset into the .eh_frame section
703 	 * for this CIE. It allows this CIE to be
704 	 * quickly and easily looked up from the
705 	 * corresponding FDE.
706 	 */
707 	cie->cie_pointer = (unsigned long)entry;
708 
709 	cie->version = *(char *)p++;
710 	UNWINDER_BUG_ON(cie->version != 1);
711 
712 	cie->augmentation = p;
713 	p += strlen(cie->augmentation) + 1;
714 
715 	count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
716 	p += count;
717 
718 	count = dwarf_read_leb128(p, &cie->data_alignment_factor);
719 	p += count;
720 
721 	/*
722 	 * Which column in the rule table contains the
723 	 * return address?
724 	 */
725 	if (cie->version == 1) {
726 		cie->return_address_reg = __raw_readb(p);
727 		p++;
728 	} else {
729 		count = dwarf_read_uleb128(p, &cie->return_address_reg);
730 		p += count;
731 	}
732 
733 	if (cie->augmentation[0] == 'z') {
734 		unsigned int length, count;
735 		cie->flags |= DWARF_CIE_Z_AUGMENTATION;
736 
737 		count = dwarf_read_uleb128(p, &length);
738 		p += count;
739 
740 		UNWINDER_BUG_ON((unsigned char *)p > end);
741 
742 		cie->initial_instructions = p + length;
743 		cie->augmentation++;
744 	}
745 
746 	while (*cie->augmentation) {
747 		/*
748 		 * "L" indicates a byte showing how the
749 		 * LSDA pointer is encoded. Skip it.
750 		 */
751 		if (*cie->augmentation == 'L') {
752 			p++;
753 			cie->augmentation++;
754 		} else if (*cie->augmentation == 'R') {
755 			/*
756 			 * "R" indicates a byte showing
757 			 * how FDE addresses are
758 			 * encoded.
759 			 */
760 			cie->encoding = *(char *)p++;
761 			cie->augmentation++;
762 		} else if (*cie->augmentation == 'P') {
763 			/*
764 			 * "R" indicates a personality
765 			 * routine in the CIE
766 			 * augmentation.
767 			 */
768 			UNWINDER_BUG();
769 		} else if (*cie->augmentation == 'S') {
770 			UNWINDER_BUG();
771 		} else {
772 			/*
773 			 * Unknown augmentation. Assume
774 			 * 'z' augmentation.
775 			 */
776 			p = cie->initial_instructions;
777 			UNWINDER_BUG_ON(!p);
778 			break;
779 		}
780 	}
781 
782 	cie->initial_instructions = p;
783 	cie->instructions_end = end;
784 
785 	cie->mod = mod;
786 
787 	/* Add to list */
788 	spin_lock_irqsave(&dwarf_cie_lock, flags);
789 	list_add_tail(&cie->link, &dwarf_cie_list);
790 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
791 
792 	return 0;
793 }
794 
795 static int dwarf_parse_fde(void *entry, u32 entry_type,
796 			   void *start, unsigned long len,
797 			   unsigned char *end, struct module *mod)
798 {
799 	struct dwarf_fde *fde;
800 	struct dwarf_cie *cie;
801 	unsigned long flags;
802 	int count;
803 	void *p = start;
804 
805 	fde = kzalloc(sizeof(*fde), GFP_KERNEL);
806 	if (!fde)
807 		return -ENOMEM;
808 
809 	fde->length = len;
810 
811 	/*
812 	 * In a .eh_frame section the CIE pointer is the
813 	 * delta between the address within the FDE
814 	 */
815 	fde->cie_pointer = (unsigned long)(p - entry_type - 4);
816 
817 	cie = dwarf_lookup_cie(fde->cie_pointer);
818 	fde->cie = cie;
819 
820 	if (cie->encoding)
821 		count = dwarf_read_encoded_value(p, &fde->initial_location,
822 						 cie->encoding);
823 	else
824 		count = dwarf_read_addr(p, &fde->initial_location);
825 
826 	p += count;
827 
828 	if (cie->encoding)
829 		count = dwarf_read_encoded_value(p, &fde->address_range,
830 						 cie->encoding & 0x0f);
831 	else
832 		count = dwarf_read_addr(p, &fde->address_range);
833 
834 	p += count;
835 
836 	if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
837 		unsigned int length;
838 		count = dwarf_read_uleb128(p, &length);
839 		p += count + length;
840 	}
841 
842 	/* Call frame instructions. */
843 	fde->instructions = p;
844 	fde->end = end;
845 
846 	fde->mod = mod;
847 
848 	/* Add to list. */
849 	spin_lock_irqsave(&dwarf_fde_lock, flags);
850 	list_add_tail(&fde->link, &dwarf_fde_list);
851 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
852 
853 	return 0;
854 }
855 
856 static void dwarf_unwinder_dump(struct task_struct *task,
857 				struct pt_regs *regs,
858 				unsigned long *sp,
859 				const struct stacktrace_ops *ops,
860 				void *data)
861 {
862 	struct dwarf_frame *frame, *_frame;
863 	unsigned long return_addr;
864 
865 	_frame = NULL;
866 	return_addr = 0;
867 
868 	while (1) {
869 		frame = dwarf_unwind_stack(return_addr, _frame);
870 
871 		if (_frame)
872 			dwarf_free_frame(_frame);
873 
874 		_frame = frame;
875 
876 		if (!frame || !frame->return_addr)
877 			break;
878 
879 		return_addr = frame->return_addr;
880 		ops->address(data, return_addr, 1);
881 	}
882 
883 	if (frame)
884 		dwarf_free_frame(frame);
885 }
886 
887 static struct unwinder dwarf_unwinder = {
888 	.name = "dwarf-unwinder",
889 	.dump = dwarf_unwinder_dump,
890 	.rating = 150,
891 };
892 
893 static void dwarf_unwinder_cleanup(void)
894 {
895 	struct dwarf_cie *cie;
896 	struct dwarf_fde *fde;
897 
898 	/*
899 	 * Deallocate all the memory allocated for the DWARF unwinder.
900 	 * Traverse all the FDE/CIE lists and remove and free all the
901 	 * memory associated with those data structures.
902 	 */
903 	list_for_each_entry(cie, &dwarf_cie_list, link)
904 		kfree(cie);
905 
906 	list_for_each_entry(fde, &dwarf_fde_list, link)
907 		kfree(fde);
908 
909 	kmem_cache_destroy(dwarf_reg_cachep);
910 	kmem_cache_destroy(dwarf_frame_cachep);
911 }
912 
913 /**
914  *	dwarf_parse_section - parse DWARF section
915  *	@eh_frame_start: start address of the .eh_frame section
916  *	@eh_frame_end: end address of the .eh_frame section
917  *	@mod: the kernel module containing the .eh_frame section
918  *
919  *	Parse the information in a .eh_frame section.
920  */
921 static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
922 			       struct module *mod)
923 {
924 	u32 entry_type;
925 	void *p, *entry;
926 	int count, err = 0;
927 	unsigned long len = 0;
928 	unsigned int c_entries, f_entries;
929 	unsigned char *end;
930 
931 	c_entries = 0;
932 	f_entries = 0;
933 	entry = eh_frame_start;
934 
935 	while ((char *)entry < eh_frame_end) {
936 		p = entry;
937 
938 		count = dwarf_entry_len(p, &len);
939 		if (count == 0) {
940 			/*
941 			 * We read a bogus length field value. There is
942 			 * nothing we can do here apart from disabling
943 			 * the DWARF unwinder. We can't even skip this
944 			 * entry and move to the next one because 'len'
945 			 * tells us where our next entry is.
946 			 */
947 			err = -EINVAL;
948 			goto out;
949 		} else
950 			p += count;
951 
952 		/* initial length does not include itself */
953 		end = p + len;
954 
955 		entry_type = get_unaligned((u32 *)p);
956 		p += 4;
957 
958 		if (entry_type == DW_EH_FRAME_CIE) {
959 			err = dwarf_parse_cie(entry, p, len, end, mod);
960 			if (err < 0)
961 				goto out;
962 			else
963 				c_entries++;
964 		} else {
965 			err = dwarf_parse_fde(entry, entry_type, p, len,
966 					      end, mod);
967 			if (err < 0)
968 				goto out;
969 			else
970 				f_entries++;
971 		}
972 
973 		entry = (char *)entry + len + 4;
974 	}
975 
976 	printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
977 	       c_entries, f_entries);
978 
979 	return 0;
980 
981 out:
982 	return err;
983 }
984 
985 #ifdef CONFIG_MODULES
986 int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
987 			  struct module *me)
988 {
989 	unsigned int i, err;
990 	unsigned long start, end;
991 	char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
992 
993 	start = end = 0;
994 
995 	for (i = 1; i < hdr->e_shnum; i++) {
996 		/* Alloc bit cleared means "ignore it." */
997 		if ((sechdrs[i].sh_flags & SHF_ALLOC)
998 		    && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
999 			start = sechdrs[i].sh_addr;
1000 			end = start + sechdrs[i].sh_size;
1001 			break;
1002 		}
1003 	}
1004 
1005 	/* Did we find the .eh_frame section? */
1006 	if (i != hdr->e_shnum) {
1007 		err = dwarf_parse_section((char *)start, (char *)end, me);
1008 		if (err) {
1009 			printk(KERN_WARNING "%s: failed to parse DWARF info\n",
1010 			       me->name);
1011 			return err;
1012 		}
1013 	}
1014 
1015 	return 0;
1016 }
1017 
1018 /**
1019  *	module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1020  *	@mod: the module that is being unloaded
1021  *
1022  *	Remove any FDEs and CIEs from the global lists that came from
1023  *	@mod's .eh_frame section because @mod is being unloaded.
1024  */
1025 void module_dwarf_cleanup(struct module *mod)
1026 {
1027 	struct dwarf_fde *fde;
1028 	struct dwarf_cie *cie;
1029 	unsigned long flags;
1030 
1031 	spin_lock_irqsave(&dwarf_cie_lock, flags);
1032 
1033 again_cie:
1034 	list_for_each_entry(cie, &dwarf_cie_list, link) {
1035 		if (cie->mod == mod)
1036 			break;
1037 	}
1038 
1039 	if (&cie->link != &dwarf_cie_list) {
1040 		list_del(&cie->link);
1041 		kfree(cie);
1042 		goto again_cie;
1043 	}
1044 
1045 	spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1046 
1047 	spin_lock_irqsave(&dwarf_fde_lock, flags);
1048 
1049 again_fde:
1050 	list_for_each_entry(fde, &dwarf_fde_list, link) {
1051 		if (fde->mod == mod)
1052 			break;
1053 	}
1054 
1055 	if (&fde->link != &dwarf_fde_list) {
1056 		list_del(&fde->link);
1057 		kfree(fde);
1058 		goto again_fde;
1059 	}
1060 
1061 	spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1062 }
1063 #endif /* CONFIG_MODULES */
1064 
1065 /**
1066  *	dwarf_unwinder_init - initialise the dwarf unwinder
1067  *
1068  *	Build the data structures describing the .dwarf_frame section to
1069  *	make it easier to lookup CIE and FDE entries. Because the
1070  *	.eh_frame section is packed as tightly as possible it is not
1071  *	easy to lookup the FDE for a given PC, so we build a list of FDE
1072  *	and CIE entries that make it easier.
1073  */
1074 static int __init dwarf_unwinder_init(void)
1075 {
1076 	int err;
1077 	INIT_LIST_HEAD(&dwarf_cie_list);
1078 	INIT_LIST_HEAD(&dwarf_fde_list);
1079 
1080 	dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
1081 			sizeof(struct dwarf_frame), 0,
1082 			SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1083 
1084 	dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
1085 			sizeof(struct dwarf_reg), 0,
1086 			SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
1087 
1088 	dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
1089 					  mempool_alloc_slab,
1090 					  mempool_free_slab,
1091 					  dwarf_frame_cachep);
1092 
1093 	dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
1094 					 mempool_alloc_slab,
1095 					 mempool_free_slab,
1096 					 dwarf_reg_cachep);
1097 
1098 	err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1099 	if (err)
1100 		goto out;
1101 
1102 	err = unwinder_register(&dwarf_unwinder);
1103 	if (err)
1104 		goto out;
1105 
1106 	return 0;
1107 
1108 out:
1109 	printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1110 	dwarf_unwinder_cleanup();
1111 	return -EINVAL;
1112 }
1113 early_initcall(dwarf_unwinder_init);
1114