1 /* 2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * This is an implementation of a DWARF unwinder. Its main purpose is 9 * for generating stacktrace information. Based on the DWARF 3 10 * specification from http://www.dwarfstd.org. 11 * 12 * TODO: 13 * - DWARF64 doesn't work. 14 * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. 15 */ 16 17 /* #define DEBUG */ 18 #include <linux/kernel.h> 19 #include <linux/io.h> 20 #include <linux/list.h> 21 #include <linux/mempool.h> 22 #include <linux/mm.h> 23 #include <asm/dwarf.h> 24 #include <asm/unwinder.h> 25 #include <asm/sections.h> 26 #include <asm/unaligned.h> 27 #include <asm/stacktrace.h> 28 29 /* Reserve enough memory for two stack frames */ 30 #define DWARF_FRAME_MIN_REQ 2 31 /* ... with 4 registers per frame. */ 32 #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) 33 34 static struct kmem_cache *dwarf_frame_cachep; 35 static mempool_t *dwarf_frame_pool; 36 37 static struct kmem_cache *dwarf_reg_cachep; 38 static mempool_t *dwarf_reg_pool; 39 40 static LIST_HEAD(dwarf_cie_list); 41 static DEFINE_SPINLOCK(dwarf_cie_lock); 42 43 static LIST_HEAD(dwarf_fde_list); 44 static DEFINE_SPINLOCK(dwarf_fde_lock); 45 46 static struct dwarf_cie *cached_cie; 47 48 /** 49 * dwarf_frame_alloc_reg - allocate memory for a DWARF register 50 * @frame: the DWARF frame whose list of registers we insert on 51 * @reg_num: the register number 52 * 53 * Allocate space for, and initialise, a dwarf reg from 54 * dwarf_reg_pool and insert it onto the (unsorted) linked-list of 55 * dwarf registers for @frame. 56 * 57 * Return the initialised DWARF reg. 58 */ 59 static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, 60 unsigned int reg_num) 61 { 62 struct dwarf_reg *reg; 63 64 reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); 65 if (!reg) { 66 printk(KERN_WARNING "Unable to allocate a DWARF register\n"); 67 /* 68 * Let's just bomb hard here, we have no way to 69 * gracefully recover. 70 */ 71 UNWINDER_BUG(); 72 } 73 74 reg->number = reg_num; 75 reg->addr = 0; 76 reg->flags = 0; 77 78 list_add(®->link, &frame->reg_list); 79 80 return reg; 81 } 82 83 static void dwarf_frame_free_regs(struct dwarf_frame *frame) 84 { 85 struct dwarf_reg *reg, *n; 86 87 list_for_each_entry_safe(reg, n, &frame->reg_list, link) { 88 list_del(®->link); 89 mempool_free(reg, dwarf_reg_pool); 90 } 91 } 92 93 /** 94 * dwarf_frame_reg - return a DWARF register 95 * @frame: the DWARF frame to search in for @reg_num 96 * @reg_num: the register number to search for 97 * 98 * Lookup and return the dwarf reg @reg_num for this frame. Return 99 * NULL if @reg_num is an register invalid number. 100 */ 101 static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, 102 unsigned int reg_num) 103 { 104 struct dwarf_reg *reg; 105 106 list_for_each_entry(reg, &frame->reg_list, link) { 107 if (reg->number == reg_num) 108 return reg; 109 } 110 111 return NULL; 112 } 113 114 /** 115 * dwarf_read_addr - read dwarf data 116 * @src: source address of data 117 * @dst: destination address to store the data to 118 * 119 * Read 'n' bytes from @src, where 'n' is the size of an address on 120 * the native machine. We return the number of bytes read, which 121 * should always be 'n'. We also have to be careful when reading 122 * from @src and writing to @dst, because they can be arbitrarily 123 * aligned. Return 'n' - the number of bytes read. 124 */ 125 static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) 126 { 127 u32 val = get_unaligned(src); 128 put_unaligned(val, dst); 129 return sizeof(unsigned long *); 130 } 131 132 /** 133 * dwarf_read_uleb128 - read unsigned LEB128 data 134 * @addr: the address where the ULEB128 data is stored 135 * @ret: address to store the result 136 * 137 * Decode an unsigned LEB128 encoded datum. The algorithm is taken 138 * from Appendix C of the DWARF 3 spec. For information on the 139 * encodings refer to section "7.6 - Variable Length Data". Return 140 * the number of bytes read. 141 */ 142 static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) 143 { 144 unsigned int result; 145 unsigned char byte; 146 int shift, count; 147 148 result = 0; 149 shift = 0; 150 count = 0; 151 152 while (1) { 153 byte = __raw_readb(addr); 154 addr++; 155 count++; 156 157 result |= (byte & 0x7f) << shift; 158 shift += 7; 159 160 if (!(byte & 0x80)) 161 break; 162 } 163 164 *ret = result; 165 166 return count; 167 } 168 169 /** 170 * dwarf_read_leb128 - read signed LEB128 data 171 * @addr: the address of the LEB128 encoded data 172 * @ret: address to store the result 173 * 174 * Decode signed LEB128 data. The algorithm is taken from Appendix 175 * C of the DWARF 3 spec. Return the number of bytes read. 176 */ 177 static inline unsigned long dwarf_read_leb128(char *addr, int *ret) 178 { 179 unsigned char byte; 180 int result, shift; 181 int num_bits; 182 int count; 183 184 result = 0; 185 shift = 0; 186 count = 0; 187 188 while (1) { 189 byte = __raw_readb(addr); 190 addr++; 191 result |= (byte & 0x7f) << shift; 192 shift += 7; 193 count++; 194 195 if (!(byte & 0x80)) 196 break; 197 } 198 199 /* The number of bits in a signed integer. */ 200 num_bits = 8 * sizeof(result); 201 202 if ((shift < num_bits) && (byte & 0x40)) 203 result |= (-1 << shift); 204 205 *ret = result; 206 207 return count; 208 } 209 210 /** 211 * dwarf_read_encoded_value - return the decoded value at @addr 212 * @addr: the address of the encoded value 213 * @val: where to write the decoded value 214 * @encoding: the encoding with which we can decode @addr 215 * 216 * GCC emits encoded address in the .eh_frame FDE entries. Decode 217 * the value at @addr using @encoding. The decoded value is written 218 * to @val and the number of bytes read is returned. 219 */ 220 static int dwarf_read_encoded_value(char *addr, unsigned long *val, 221 char encoding) 222 { 223 unsigned long decoded_addr = 0; 224 int count = 0; 225 226 switch (encoding & 0x70) { 227 case DW_EH_PE_absptr: 228 break; 229 case DW_EH_PE_pcrel: 230 decoded_addr = (unsigned long)addr; 231 break; 232 default: 233 pr_debug("encoding=0x%x\n", (encoding & 0x70)); 234 UNWINDER_BUG(); 235 } 236 237 if ((encoding & 0x07) == 0x00) 238 encoding |= DW_EH_PE_udata4; 239 240 switch (encoding & 0x0f) { 241 case DW_EH_PE_sdata4: 242 case DW_EH_PE_udata4: 243 count += 4; 244 decoded_addr += get_unaligned((u32 *)addr); 245 __raw_writel(decoded_addr, val); 246 break; 247 default: 248 pr_debug("encoding=0x%x\n", encoding); 249 UNWINDER_BUG(); 250 } 251 252 return count; 253 } 254 255 /** 256 * dwarf_entry_len - return the length of an FDE or CIE 257 * @addr: the address of the entry 258 * @len: the length of the entry 259 * 260 * Read the initial_length field of the entry and store the size of 261 * the entry in @len. We return the number of bytes read. Return a 262 * count of 0 on error. 263 */ 264 static inline int dwarf_entry_len(char *addr, unsigned long *len) 265 { 266 u32 initial_len; 267 int count; 268 269 initial_len = get_unaligned((u32 *)addr); 270 count = 4; 271 272 /* 273 * An initial length field value in the range DW_LEN_EXT_LO - 274 * DW_LEN_EXT_HI indicates an extension, and should not be 275 * interpreted as a length. The only extension that we currently 276 * understand is the use of DWARF64 addresses. 277 */ 278 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { 279 /* 280 * The 64-bit length field immediately follows the 281 * compulsory 32-bit length field. 282 */ 283 if (initial_len == DW_EXT_DWARF64) { 284 *len = get_unaligned((u64 *)addr + 4); 285 count = 12; 286 } else { 287 printk(KERN_WARNING "Unknown DWARF extension\n"); 288 count = 0; 289 } 290 } else 291 *len = initial_len; 292 293 return count; 294 } 295 296 /** 297 * dwarf_lookup_cie - locate the cie 298 * @cie_ptr: pointer to help with lookup 299 */ 300 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) 301 { 302 struct dwarf_cie *cie; 303 unsigned long flags; 304 305 spin_lock_irqsave(&dwarf_cie_lock, flags); 306 307 /* 308 * We've cached the last CIE we looked up because chances are 309 * that the FDE wants this CIE. 310 */ 311 if (cached_cie && cached_cie->cie_pointer == cie_ptr) { 312 cie = cached_cie; 313 goto out; 314 } 315 316 list_for_each_entry(cie, &dwarf_cie_list, link) { 317 if (cie->cie_pointer == cie_ptr) { 318 cached_cie = cie; 319 break; 320 } 321 } 322 323 /* Couldn't find the entry in the list. */ 324 if (&cie->link == &dwarf_cie_list) 325 cie = NULL; 326 out: 327 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 328 return cie; 329 } 330 331 /** 332 * dwarf_lookup_fde - locate the FDE that covers pc 333 * @pc: the program counter 334 */ 335 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) 336 { 337 struct dwarf_fde *fde; 338 unsigned long flags; 339 340 spin_lock_irqsave(&dwarf_fde_lock, flags); 341 342 list_for_each_entry(fde, &dwarf_fde_list, link) { 343 unsigned long start, end; 344 345 start = fde->initial_location; 346 end = fde->initial_location + fde->address_range; 347 348 if (pc >= start && pc < end) 349 break; 350 } 351 352 /* Couldn't find the entry in the list. */ 353 if (&fde->link == &dwarf_fde_list) 354 fde = NULL; 355 356 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 357 358 return fde; 359 } 360 361 /** 362 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA 363 * @insn_start: address of the first instruction 364 * @insn_end: address of the last instruction 365 * @cie: the CIE for this function 366 * @fde: the FDE for this function 367 * @frame: the instructions calculate the CFA for this frame 368 * @pc: the program counter of the address we're interested in 369 * 370 * Execute the Call Frame instruction sequence starting at 371 * @insn_start and ending at @insn_end. The instructions describe 372 * how to calculate the Canonical Frame Address of a stackframe. 373 * Store the results in @frame. 374 */ 375 static int dwarf_cfa_execute_insns(unsigned char *insn_start, 376 unsigned char *insn_end, 377 struct dwarf_cie *cie, 378 struct dwarf_fde *fde, 379 struct dwarf_frame *frame, 380 unsigned long pc) 381 { 382 unsigned char insn; 383 unsigned char *current_insn; 384 unsigned int count, delta, reg, expr_len, offset; 385 struct dwarf_reg *regp; 386 387 current_insn = insn_start; 388 389 while (current_insn < insn_end && frame->pc <= pc) { 390 insn = __raw_readb(current_insn++); 391 392 /* 393 * Firstly, handle the opcodes that embed their operands 394 * in the instructions. 395 */ 396 switch (DW_CFA_opcode(insn)) { 397 case DW_CFA_advance_loc: 398 delta = DW_CFA_operand(insn); 399 delta *= cie->code_alignment_factor; 400 frame->pc += delta; 401 continue; 402 /* NOTREACHED */ 403 case DW_CFA_offset: 404 reg = DW_CFA_operand(insn); 405 count = dwarf_read_uleb128(current_insn, &offset); 406 current_insn += count; 407 offset *= cie->data_alignment_factor; 408 regp = dwarf_frame_alloc_reg(frame, reg); 409 regp->addr = offset; 410 regp->flags |= DWARF_REG_OFFSET; 411 continue; 412 /* NOTREACHED */ 413 case DW_CFA_restore: 414 reg = DW_CFA_operand(insn); 415 continue; 416 /* NOTREACHED */ 417 } 418 419 /* 420 * Secondly, handle the opcodes that don't embed their 421 * operands in the instruction. 422 */ 423 switch (insn) { 424 case DW_CFA_nop: 425 continue; 426 case DW_CFA_advance_loc1: 427 delta = *current_insn++; 428 frame->pc += delta * cie->code_alignment_factor; 429 break; 430 case DW_CFA_advance_loc2: 431 delta = get_unaligned((u16 *)current_insn); 432 current_insn += 2; 433 frame->pc += delta * cie->code_alignment_factor; 434 break; 435 case DW_CFA_advance_loc4: 436 delta = get_unaligned((u32 *)current_insn); 437 current_insn += 4; 438 frame->pc += delta * cie->code_alignment_factor; 439 break; 440 case DW_CFA_offset_extended: 441 count = dwarf_read_uleb128(current_insn, ®); 442 current_insn += count; 443 count = dwarf_read_uleb128(current_insn, &offset); 444 current_insn += count; 445 offset *= cie->data_alignment_factor; 446 break; 447 case DW_CFA_restore_extended: 448 count = dwarf_read_uleb128(current_insn, ®); 449 current_insn += count; 450 break; 451 case DW_CFA_undefined: 452 count = dwarf_read_uleb128(current_insn, ®); 453 current_insn += count; 454 regp = dwarf_frame_alloc_reg(frame, reg); 455 regp->flags |= DWARF_UNDEFINED; 456 break; 457 case DW_CFA_def_cfa: 458 count = dwarf_read_uleb128(current_insn, 459 &frame->cfa_register); 460 current_insn += count; 461 count = dwarf_read_uleb128(current_insn, 462 &frame->cfa_offset); 463 current_insn += count; 464 465 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; 466 break; 467 case DW_CFA_def_cfa_register: 468 count = dwarf_read_uleb128(current_insn, 469 &frame->cfa_register); 470 current_insn += count; 471 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; 472 break; 473 case DW_CFA_def_cfa_offset: 474 count = dwarf_read_uleb128(current_insn, &offset); 475 current_insn += count; 476 frame->cfa_offset = offset; 477 break; 478 case DW_CFA_def_cfa_expression: 479 count = dwarf_read_uleb128(current_insn, &expr_len); 480 current_insn += count; 481 482 frame->cfa_expr = current_insn; 483 frame->cfa_expr_len = expr_len; 484 current_insn += expr_len; 485 486 frame->flags |= DWARF_FRAME_CFA_REG_EXP; 487 break; 488 case DW_CFA_offset_extended_sf: 489 count = dwarf_read_uleb128(current_insn, ®); 490 current_insn += count; 491 count = dwarf_read_leb128(current_insn, &offset); 492 current_insn += count; 493 offset *= cie->data_alignment_factor; 494 regp = dwarf_frame_alloc_reg(frame, reg); 495 regp->flags |= DWARF_REG_OFFSET; 496 regp->addr = offset; 497 break; 498 case DW_CFA_val_offset: 499 count = dwarf_read_uleb128(current_insn, ®); 500 current_insn += count; 501 count = dwarf_read_leb128(current_insn, &offset); 502 offset *= cie->data_alignment_factor; 503 regp = dwarf_frame_alloc_reg(frame, reg); 504 regp->flags |= DWARF_VAL_OFFSET; 505 regp->addr = offset; 506 break; 507 case DW_CFA_GNU_args_size: 508 count = dwarf_read_uleb128(current_insn, &offset); 509 current_insn += count; 510 break; 511 case DW_CFA_GNU_negative_offset_extended: 512 count = dwarf_read_uleb128(current_insn, ®); 513 current_insn += count; 514 count = dwarf_read_uleb128(current_insn, &offset); 515 offset *= cie->data_alignment_factor; 516 517 regp = dwarf_frame_alloc_reg(frame, reg); 518 regp->flags |= DWARF_REG_OFFSET; 519 regp->addr = -offset; 520 break; 521 default: 522 pr_debug("unhandled DWARF instruction 0x%x\n", insn); 523 UNWINDER_BUG(); 524 break; 525 } 526 } 527 528 return 0; 529 } 530 531 /** 532 * dwarf_unwind_stack - recursively unwind the stack 533 * @pc: address of the function to unwind 534 * @prev: struct dwarf_frame of the previous stackframe on the callstack 535 * 536 * Return a struct dwarf_frame representing the most recent frame 537 * on the callstack. Each of the lower (older) stack frames are 538 * linked via the "prev" member. 539 */ 540 struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, 541 struct dwarf_frame *prev) 542 { 543 struct dwarf_frame *frame; 544 struct dwarf_cie *cie; 545 struct dwarf_fde *fde; 546 struct dwarf_reg *reg; 547 unsigned long addr; 548 549 /* 550 * If this is the first invocation of this recursive function we 551 * need get the contents of a physical register to get the CFA 552 * in order to begin the virtual unwinding of the stack. 553 * 554 * NOTE: the return address is guaranteed to be setup by the 555 * time this function makes its first function call. 556 */ 557 if (!pc && !prev) 558 pc = (unsigned long)current_text_addr(); 559 560 frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); 561 if (!frame) { 562 printk(KERN_ERR "Unable to allocate a dwarf frame\n"); 563 UNWINDER_BUG(); 564 } 565 566 INIT_LIST_HEAD(&frame->reg_list); 567 frame->flags = 0; 568 frame->prev = prev; 569 frame->return_addr = 0; 570 571 fde = dwarf_lookup_fde(pc); 572 if (!fde) { 573 /* 574 * This is our normal exit path - the one that stops the 575 * recursion. There's two reasons why we might exit 576 * here, 577 * 578 * a) pc has no asscociated DWARF frame info and so 579 * we don't know how to unwind this frame. This is 580 * usually the case when we're trying to unwind a 581 * frame that was called from some assembly code 582 * that has no DWARF info, e.g. syscalls. 583 * 584 * b) the DEBUG info for pc is bogus. There's 585 * really no way to distinguish this case from the 586 * case above, which sucks because we could print a 587 * warning here. 588 */ 589 goto bail; 590 } 591 592 cie = dwarf_lookup_cie(fde->cie_pointer); 593 594 frame->pc = fde->initial_location; 595 596 /* CIE initial instructions */ 597 dwarf_cfa_execute_insns(cie->initial_instructions, 598 cie->instructions_end, cie, fde, 599 frame, pc); 600 601 /* FDE instructions */ 602 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, 603 fde, frame, pc); 604 605 /* Calculate the CFA */ 606 switch (frame->flags) { 607 case DWARF_FRAME_CFA_REG_OFFSET: 608 if (prev) { 609 reg = dwarf_frame_reg(prev, frame->cfa_register); 610 UNWINDER_BUG_ON(!reg); 611 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); 612 613 addr = prev->cfa + reg->addr; 614 frame->cfa = __raw_readl(addr); 615 616 } else { 617 /* 618 * Again, this is the first invocation of this 619 * recurisve function. We need to physically 620 * read the contents of a register in order to 621 * get the Canonical Frame Address for this 622 * function. 623 */ 624 frame->cfa = dwarf_read_arch_reg(frame->cfa_register); 625 } 626 627 frame->cfa += frame->cfa_offset; 628 break; 629 default: 630 UNWINDER_BUG(); 631 } 632 633 reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); 634 635 /* 636 * If we haven't seen the return address register or the return 637 * address column is undefined then we must assume that this is 638 * the end of the callstack. 639 */ 640 if (!reg || reg->flags == DWARF_UNDEFINED) 641 goto bail; 642 643 UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); 644 645 addr = frame->cfa + reg->addr; 646 frame->return_addr = __raw_readl(addr); 647 648 return frame; 649 650 bail: 651 dwarf_frame_free_regs(frame); 652 mempool_free(frame, dwarf_frame_pool); 653 return NULL; 654 } 655 656 static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 657 unsigned char *end) 658 { 659 struct dwarf_cie *cie; 660 unsigned long flags; 661 int count; 662 663 cie = kzalloc(sizeof(*cie), GFP_KERNEL); 664 if (!cie) 665 return -ENOMEM; 666 667 cie->length = len; 668 669 /* 670 * Record the offset into the .eh_frame section 671 * for this CIE. It allows this CIE to be 672 * quickly and easily looked up from the 673 * corresponding FDE. 674 */ 675 cie->cie_pointer = (unsigned long)entry; 676 677 cie->version = *(char *)p++; 678 UNWINDER_BUG_ON(cie->version != 1); 679 680 cie->augmentation = p; 681 p += strlen(cie->augmentation) + 1; 682 683 count = dwarf_read_uleb128(p, &cie->code_alignment_factor); 684 p += count; 685 686 count = dwarf_read_leb128(p, &cie->data_alignment_factor); 687 p += count; 688 689 /* 690 * Which column in the rule table contains the 691 * return address? 692 */ 693 if (cie->version == 1) { 694 cie->return_address_reg = __raw_readb(p); 695 p++; 696 } else { 697 count = dwarf_read_uleb128(p, &cie->return_address_reg); 698 p += count; 699 } 700 701 if (cie->augmentation[0] == 'z') { 702 unsigned int length, count; 703 cie->flags |= DWARF_CIE_Z_AUGMENTATION; 704 705 count = dwarf_read_uleb128(p, &length); 706 p += count; 707 708 UNWINDER_BUG_ON((unsigned char *)p > end); 709 710 cie->initial_instructions = p + length; 711 cie->augmentation++; 712 } 713 714 while (*cie->augmentation) { 715 /* 716 * "L" indicates a byte showing how the 717 * LSDA pointer is encoded. Skip it. 718 */ 719 if (*cie->augmentation == 'L') { 720 p++; 721 cie->augmentation++; 722 } else if (*cie->augmentation == 'R') { 723 /* 724 * "R" indicates a byte showing 725 * how FDE addresses are 726 * encoded. 727 */ 728 cie->encoding = *(char *)p++; 729 cie->augmentation++; 730 } else if (*cie->augmentation == 'P') { 731 /* 732 * "R" indicates a personality 733 * routine in the CIE 734 * augmentation. 735 */ 736 UNWINDER_BUG(); 737 } else if (*cie->augmentation == 'S') { 738 UNWINDER_BUG(); 739 } else { 740 /* 741 * Unknown augmentation. Assume 742 * 'z' augmentation. 743 */ 744 p = cie->initial_instructions; 745 UNWINDER_BUG_ON(!p); 746 break; 747 } 748 } 749 750 cie->initial_instructions = p; 751 cie->instructions_end = end; 752 753 /* Add to list */ 754 spin_lock_irqsave(&dwarf_cie_lock, flags); 755 list_add_tail(&cie->link, &dwarf_cie_list); 756 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 757 758 return 0; 759 } 760 761 static int dwarf_parse_fde(void *entry, u32 entry_type, 762 void *start, unsigned long len, 763 unsigned char *end) 764 { 765 struct dwarf_fde *fde; 766 struct dwarf_cie *cie; 767 unsigned long flags; 768 int count; 769 void *p = start; 770 771 fde = kzalloc(sizeof(*fde), GFP_KERNEL); 772 if (!fde) 773 return -ENOMEM; 774 775 fde->length = len; 776 777 /* 778 * In a .eh_frame section the CIE pointer is the 779 * delta between the address within the FDE 780 */ 781 fde->cie_pointer = (unsigned long)(p - entry_type - 4); 782 783 cie = dwarf_lookup_cie(fde->cie_pointer); 784 fde->cie = cie; 785 786 if (cie->encoding) 787 count = dwarf_read_encoded_value(p, &fde->initial_location, 788 cie->encoding); 789 else 790 count = dwarf_read_addr(p, &fde->initial_location); 791 792 p += count; 793 794 if (cie->encoding) 795 count = dwarf_read_encoded_value(p, &fde->address_range, 796 cie->encoding & 0x0f); 797 else 798 count = dwarf_read_addr(p, &fde->address_range); 799 800 p += count; 801 802 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { 803 unsigned int length; 804 count = dwarf_read_uleb128(p, &length); 805 p += count + length; 806 } 807 808 /* Call frame instructions. */ 809 fde->instructions = p; 810 fde->end = end; 811 812 /* Add to list. */ 813 spin_lock_irqsave(&dwarf_fde_lock, flags); 814 list_add_tail(&fde->link, &dwarf_fde_list); 815 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 816 817 return 0; 818 } 819 820 static void dwarf_unwinder_dump(struct task_struct *task, 821 struct pt_regs *regs, 822 unsigned long *sp, 823 const struct stacktrace_ops *ops, 824 void *data) 825 { 826 struct dwarf_frame *frame, *_frame; 827 unsigned long return_addr; 828 829 _frame = NULL; 830 return_addr = 0; 831 832 while (1) { 833 frame = dwarf_unwind_stack(return_addr, _frame); 834 835 if (_frame) { 836 dwarf_frame_free_regs(_frame); 837 mempool_free(_frame, dwarf_frame_pool); 838 } 839 840 _frame = frame; 841 842 if (!frame || !frame->return_addr) 843 break; 844 845 return_addr = frame->return_addr; 846 ops->address(data, return_addr, 1); 847 } 848 } 849 850 static struct unwinder dwarf_unwinder = { 851 .name = "dwarf-unwinder", 852 .dump = dwarf_unwinder_dump, 853 .rating = 150, 854 }; 855 856 static void dwarf_unwinder_cleanup(void) 857 { 858 struct dwarf_cie *cie; 859 struct dwarf_fde *fde; 860 861 /* 862 * Deallocate all the memory allocated for the DWARF unwinder. 863 * Traverse all the FDE/CIE lists and remove and free all the 864 * memory associated with those data structures. 865 */ 866 list_for_each_entry(cie, &dwarf_cie_list, link) 867 kfree(cie); 868 869 list_for_each_entry(fde, &dwarf_fde_list, link) 870 kfree(fde); 871 872 kmem_cache_destroy(dwarf_reg_cachep); 873 kmem_cache_destroy(dwarf_frame_cachep); 874 } 875 876 /** 877 * dwarf_unwinder_init - initialise the dwarf unwinder 878 * 879 * Build the data structures describing the .dwarf_frame section to 880 * make it easier to lookup CIE and FDE entries. Because the 881 * .eh_frame section is packed as tightly as possible it is not 882 * easy to lookup the FDE for a given PC, so we build a list of FDE 883 * and CIE entries that make it easier. 884 */ 885 static int __init dwarf_unwinder_init(void) 886 { 887 u32 entry_type; 888 void *p, *entry; 889 int count, err = 0; 890 unsigned long len; 891 unsigned int c_entries, f_entries; 892 unsigned char *end; 893 INIT_LIST_HEAD(&dwarf_cie_list); 894 INIT_LIST_HEAD(&dwarf_fde_list); 895 896 c_entries = 0; 897 f_entries = 0; 898 entry = &__start_eh_frame; 899 900 dwarf_frame_cachep = kmem_cache_create("dwarf_frames", 901 sizeof(struct dwarf_frame), 0, 902 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); 903 904 dwarf_reg_cachep = kmem_cache_create("dwarf_regs", 905 sizeof(struct dwarf_reg), 0, 906 SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); 907 908 dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, 909 mempool_alloc_slab, 910 mempool_free_slab, 911 dwarf_frame_cachep); 912 913 dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, 914 mempool_alloc_slab, 915 mempool_free_slab, 916 dwarf_reg_cachep); 917 918 while ((char *)entry < __stop_eh_frame) { 919 p = entry; 920 921 count = dwarf_entry_len(p, &len); 922 if (count == 0) { 923 /* 924 * We read a bogus length field value. There is 925 * nothing we can do here apart from disabling 926 * the DWARF unwinder. We can't even skip this 927 * entry and move to the next one because 'len' 928 * tells us where our next entry is. 929 */ 930 goto out; 931 } else 932 p += count; 933 934 /* initial length does not include itself */ 935 end = p + len; 936 937 entry_type = get_unaligned((u32 *)p); 938 p += 4; 939 940 if (entry_type == DW_EH_FRAME_CIE) { 941 err = dwarf_parse_cie(entry, p, len, end); 942 if (err < 0) 943 goto out; 944 else 945 c_entries++; 946 } else { 947 err = dwarf_parse_fde(entry, entry_type, p, len, end); 948 if (err < 0) 949 goto out; 950 else 951 f_entries++; 952 } 953 954 entry = (char *)entry + len + 4; 955 } 956 957 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", 958 c_entries, f_entries); 959 960 err = unwinder_register(&dwarf_unwinder); 961 if (err) 962 goto out; 963 964 return 0; 965 966 out: 967 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); 968 dwarf_unwinder_cleanup(); 969 return -EINVAL; 970 } 971 early_initcall(dwarf_unwinder_init); 972