1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. 4 * 5 * Rewritten and vastly simplified by Rusty Russell for in-kernel 6 * module loader: 7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 8 * 9 * ChangeLog: 10 * 11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> 12 * Changed the compression method from stem compression to "table lookup" 13 * compression (see scripts/kallsyms.c for a more complete description) 14 */ 15 #include <linux/kallsyms.h> 16 #include <linux/init.h> 17 #include <linux/seq_file.h> 18 #include <linux/fs.h> 19 #include <linux/kdb.h> 20 #include <linux/err.h> 21 #include <linux/proc_fs.h> 22 #include <linux/sched.h> /* for cond_resched */ 23 #include <linux/ctype.h> 24 #include <linux/slab.h> 25 #include <linux/filter.h> 26 #include <linux/ftrace.h> 27 #include <linux/compiler.h> 28 29 /* 30 * These will be re-linked against their real values 31 * during the second link stage. 32 */ 33 extern const unsigned long kallsyms_addresses[] __weak; 34 extern const int kallsyms_offsets[] __weak; 35 extern const u8 kallsyms_names[] __weak; 36 37 /* 38 * Tell the compiler that the count isn't in the small data section if the arch 39 * has one (eg: FRV). 40 */ 41 extern const unsigned int kallsyms_num_syms 42 __attribute__((weak, section(".rodata"))); 43 44 extern const unsigned long kallsyms_relative_base 45 __attribute__((weak, section(".rodata"))); 46 47 extern const char kallsyms_token_table[] __weak; 48 extern const u16 kallsyms_token_index[] __weak; 49 50 extern const unsigned int kallsyms_markers[] __weak; 51 52 /* 53 * Expand a compressed symbol data into the resulting uncompressed string, 54 * if uncompressed string is too long (>= maxlen), it will be truncated, 55 * given the offset to where the symbol is in the compressed stream. 56 */ 57 static unsigned int kallsyms_expand_symbol(unsigned int off, 58 char *result, size_t maxlen) 59 { 60 int len, skipped_first = 0; 61 const char *tptr; 62 const u8 *data; 63 64 /* Get the compressed symbol length from the first symbol byte. */ 65 data = &kallsyms_names[off]; 66 len = *data; 67 data++; 68 69 /* 70 * Update the offset to return the offset for the next symbol on 71 * the compressed stream. 72 */ 73 off += len + 1; 74 75 /* 76 * For every byte on the compressed symbol data, copy the table 77 * entry for that byte. 78 */ 79 while (len) { 80 tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; 81 data++; 82 len--; 83 84 while (*tptr) { 85 if (skipped_first) { 86 if (maxlen <= 1) 87 goto tail; 88 *result = *tptr; 89 result++; 90 maxlen--; 91 } else 92 skipped_first = 1; 93 tptr++; 94 } 95 } 96 97 tail: 98 if (maxlen) 99 *result = '\0'; 100 101 /* Return to offset to the next symbol. */ 102 return off; 103 } 104 105 /* 106 * Get symbol type information. This is encoded as a single char at the 107 * beginning of the symbol name. 108 */ 109 static char kallsyms_get_symbol_type(unsigned int off) 110 { 111 /* 112 * Get just the first code, look it up in the token table, 113 * and return the first char from this token. 114 */ 115 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; 116 } 117 118 119 /* 120 * Find the offset on the compressed stream given and index in the 121 * kallsyms array. 122 */ 123 static unsigned int get_symbol_offset(unsigned long pos) 124 { 125 const u8 *name; 126 int i; 127 128 /* 129 * Use the closest marker we have. We have markers every 256 positions, 130 * so that should be close enough. 131 */ 132 name = &kallsyms_names[kallsyms_markers[pos >> 8]]; 133 134 /* 135 * Sequentially scan all the symbols up to the point we're searching 136 * for. Every symbol is stored in a [<len>][<len> bytes of data] format, 137 * so we just need to add the len to the current pointer for every 138 * symbol we wish to skip. 139 */ 140 for (i = 0; i < (pos & 0xFF); i++) 141 name = name + (*name) + 1; 142 143 return name - kallsyms_names; 144 } 145 146 static unsigned long kallsyms_sym_address(int idx) 147 { 148 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 149 return kallsyms_addresses[idx]; 150 151 /* values are unsigned offsets if --absolute-percpu is not in effect */ 152 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) 153 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; 154 155 /* ...otherwise, positive offsets are absolute values */ 156 if (kallsyms_offsets[idx] >= 0) 157 return kallsyms_offsets[idx]; 158 159 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ 160 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 161 } 162 163 /* Lookup the address for this symbol. Returns 0 if not found. */ 164 unsigned long kallsyms_lookup_name(const char *name) 165 { 166 char namebuf[KSYM_NAME_LEN]; 167 unsigned long i; 168 unsigned int off; 169 170 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 171 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 172 173 if (strcmp(namebuf, name) == 0) 174 return kallsyms_sym_address(i); 175 } 176 return module_kallsyms_lookup_name(name); 177 } 178 EXPORT_SYMBOL_GPL(kallsyms_lookup_name); 179 180 int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, 181 unsigned long), 182 void *data) 183 { 184 char namebuf[KSYM_NAME_LEN]; 185 unsigned long i; 186 unsigned int off; 187 int ret; 188 189 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 190 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 191 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); 192 if (ret != 0) 193 return ret; 194 } 195 return module_kallsyms_on_each_symbol(fn, data); 196 } 197 EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol); 198 199 static unsigned long get_symbol_pos(unsigned long addr, 200 unsigned long *symbolsize, 201 unsigned long *offset) 202 { 203 unsigned long symbol_start = 0, symbol_end = 0; 204 unsigned long i, low, high, mid; 205 206 /* This kernel should never had been booted. */ 207 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 208 BUG_ON(!kallsyms_addresses); 209 else 210 BUG_ON(!kallsyms_offsets); 211 212 /* Do a binary search on the sorted kallsyms_addresses array. */ 213 low = 0; 214 high = kallsyms_num_syms; 215 216 while (high - low > 1) { 217 mid = low + (high - low) / 2; 218 if (kallsyms_sym_address(mid) <= addr) 219 low = mid; 220 else 221 high = mid; 222 } 223 224 /* 225 * Search for the first aliased symbol. Aliased 226 * symbols are symbols with the same address. 227 */ 228 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) 229 --low; 230 231 symbol_start = kallsyms_sym_address(low); 232 233 /* Search for next non-aliased symbol. */ 234 for (i = low + 1; i < kallsyms_num_syms; i++) { 235 if (kallsyms_sym_address(i) > symbol_start) { 236 symbol_end = kallsyms_sym_address(i); 237 break; 238 } 239 } 240 241 /* If we found no next symbol, we use the end of the section. */ 242 if (!symbol_end) { 243 if (is_kernel_inittext(addr)) 244 symbol_end = (unsigned long)_einittext; 245 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) 246 symbol_end = (unsigned long)_end; 247 else 248 symbol_end = (unsigned long)_etext; 249 } 250 251 if (symbolsize) 252 *symbolsize = symbol_end - symbol_start; 253 if (offset) 254 *offset = addr - symbol_start; 255 256 return low; 257 } 258 259 /* 260 * Lookup an address but don't bother to find any names. 261 */ 262 int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, 263 unsigned long *offset) 264 { 265 char namebuf[KSYM_NAME_LEN]; 266 267 if (is_ksym_addr(addr)) { 268 get_symbol_pos(addr, symbolsize, offset); 269 return 1; 270 } 271 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 272 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 273 } 274 275 /* 276 * Lookup an address 277 * - modname is set to NULL if it's in the kernel. 278 * - We guarantee that the returned name is valid until we reschedule even if. 279 * It resides in a module. 280 * - We also guarantee that modname will be valid until rescheduled. 281 */ 282 const char *kallsyms_lookup(unsigned long addr, 283 unsigned long *symbolsize, 284 unsigned long *offset, 285 char **modname, char *namebuf) 286 { 287 const char *ret; 288 289 namebuf[KSYM_NAME_LEN - 1] = 0; 290 namebuf[0] = 0; 291 292 if (is_ksym_addr(addr)) { 293 unsigned long pos; 294 295 pos = get_symbol_pos(addr, symbolsize, offset); 296 /* Grab name */ 297 kallsyms_expand_symbol(get_symbol_offset(pos), 298 namebuf, KSYM_NAME_LEN); 299 if (modname) 300 *modname = NULL; 301 return namebuf; 302 } 303 304 /* See if it's in a module or a BPF JITed image. */ 305 ret = module_address_lookup(addr, symbolsize, offset, 306 modname, namebuf); 307 if (!ret) 308 ret = bpf_address_lookup(addr, symbolsize, 309 offset, modname, namebuf); 310 311 if (!ret) 312 ret = ftrace_mod_address_lookup(addr, symbolsize, 313 offset, modname, namebuf); 314 return ret; 315 } 316 317 int lookup_symbol_name(unsigned long addr, char *symname) 318 { 319 symname[0] = '\0'; 320 symname[KSYM_NAME_LEN - 1] = '\0'; 321 322 if (is_ksym_addr(addr)) { 323 unsigned long pos; 324 325 pos = get_symbol_pos(addr, NULL, NULL); 326 /* Grab name */ 327 kallsyms_expand_symbol(get_symbol_offset(pos), 328 symname, KSYM_NAME_LEN); 329 return 0; 330 } 331 /* See if it's in a module. */ 332 return lookup_module_symbol_name(addr, symname); 333 } 334 335 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, 336 unsigned long *offset, char *modname, char *name) 337 { 338 name[0] = '\0'; 339 name[KSYM_NAME_LEN - 1] = '\0'; 340 341 if (is_ksym_addr(addr)) { 342 unsigned long pos; 343 344 pos = get_symbol_pos(addr, size, offset); 345 /* Grab name */ 346 kallsyms_expand_symbol(get_symbol_offset(pos), 347 name, KSYM_NAME_LEN); 348 modname[0] = '\0'; 349 return 0; 350 } 351 /* See if it's in a module. */ 352 return lookup_module_symbol_attrs(addr, size, offset, modname, name); 353 } 354 355 /* Look up a kernel symbol and return it in a text buffer. */ 356 static int __sprint_symbol(char *buffer, unsigned long address, 357 int symbol_offset, int add_offset) 358 { 359 char *modname; 360 const char *name; 361 unsigned long offset, size; 362 int len; 363 364 address += symbol_offset; 365 name = kallsyms_lookup(address, &size, &offset, &modname, buffer); 366 if (!name) 367 return sprintf(buffer, "0x%lx", address - symbol_offset); 368 369 if (name != buffer) 370 strcpy(buffer, name); 371 len = strlen(buffer); 372 offset -= symbol_offset; 373 374 if (add_offset) 375 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); 376 377 if (modname) 378 len += sprintf(buffer + len, " [%s]", modname); 379 380 return len; 381 } 382 383 /** 384 * sprint_symbol - Look up a kernel symbol and return it in a text buffer 385 * @buffer: buffer to be stored 386 * @address: address to lookup 387 * 388 * This function looks up a kernel symbol with @address and stores its name, 389 * offset, size and module name to @buffer if possible. If no symbol was found, 390 * just saves its @address as is. 391 * 392 * This function returns the number of bytes stored in @buffer. 393 */ 394 int sprint_symbol(char *buffer, unsigned long address) 395 { 396 return __sprint_symbol(buffer, address, 0, 1); 397 } 398 EXPORT_SYMBOL_GPL(sprint_symbol); 399 400 /** 401 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer 402 * @buffer: buffer to be stored 403 * @address: address to lookup 404 * 405 * This function looks up a kernel symbol with @address and stores its name 406 * and module name to @buffer if possible. If no symbol was found, just saves 407 * its @address as is. 408 * 409 * This function returns the number of bytes stored in @buffer. 410 */ 411 int sprint_symbol_no_offset(char *buffer, unsigned long address) 412 { 413 return __sprint_symbol(buffer, address, 0, 0); 414 } 415 EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); 416 417 /** 418 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 419 * @buffer: buffer to be stored 420 * @address: address to lookup 421 * 422 * This function is for stack backtrace and does the same thing as 423 * sprint_symbol() but with modified/decreased @address. If there is a 424 * tail-call to the function marked "noreturn", gcc optimized out code after 425 * the call so that the stack-saved return address could point outside of the 426 * caller. This function ensures that kallsyms will find the original caller 427 * by decreasing @address. 428 * 429 * This function returns the number of bytes stored in @buffer. 430 */ 431 int sprint_backtrace(char *buffer, unsigned long address) 432 { 433 return __sprint_symbol(buffer, address, -1, 1); 434 } 435 436 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 437 struct kallsym_iter { 438 loff_t pos; 439 loff_t pos_arch_end; 440 loff_t pos_mod_end; 441 loff_t pos_ftrace_mod_end; 442 unsigned long value; 443 unsigned int nameoff; /* If iterating in core kernel symbols. */ 444 char type; 445 char name[KSYM_NAME_LEN]; 446 char module_name[MODULE_NAME_LEN]; 447 int exported; 448 int show_value; 449 }; 450 451 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, 452 char *type, char *name) 453 { 454 return -EINVAL; 455 } 456 457 static int get_ksymbol_arch(struct kallsym_iter *iter) 458 { 459 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms, 460 &iter->value, &iter->type, 461 iter->name); 462 463 if (ret < 0) { 464 iter->pos_arch_end = iter->pos; 465 return 0; 466 } 467 468 return 1; 469 } 470 471 static int get_ksymbol_mod(struct kallsym_iter *iter) 472 { 473 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end, 474 &iter->value, &iter->type, 475 iter->name, iter->module_name, 476 &iter->exported); 477 if (ret < 0) { 478 iter->pos_mod_end = iter->pos; 479 return 0; 480 } 481 482 return 1; 483 } 484 485 static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) 486 { 487 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, 488 &iter->value, &iter->type, 489 iter->name, iter->module_name, 490 &iter->exported); 491 if (ret < 0) { 492 iter->pos_ftrace_mod_end = iter->pos; 493 return 0; 494 } 495 496 return 1; 497 } 498 499 static int get_ksymbol_bpf(struct kallsym_iter *iter) 500 { 501 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); 502 iter->exported = 0; 503 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, 504 &iter->value, &iter->type, 505 iter->name) < 0 ? 0 : 1; 506 } 507 508 /* Returns space to next name. */ 509 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 510 { 511 unsigned off = iter->nameoff; 512 513 iter->module_name[0] = '\0'; 514 iter->value = kallsyms_sym_address(iter->pos); 515 516 iter->type = kallsyms_get_symbol_type(off); 517 518 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); 519 520 return off - iter->nameoff; 521 } 522 523 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) 524 { 525 iter->name[0] = '\0'; 526 iter->nameoff = get_symbol_offset(new_pos); 527 iter->pos = new_pos; 528 if (new_pos == 0) { 529 iter->pos_arch_end = 0; 530 iter->pos_mod_end = 0; 531 iter->pos_ftrace_mod_end = 0; 532 } 533 } 534 535 /* 536 * The end position (last + 1) of each additional kallsyms section is recorded 537 * in iter->pos_..._end as each section is added, and so can be used to 538 * determine which get_ksymbol_...() function to call next. 539 */ 540 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 541 { 542 iter->pos = pos; 543 544 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) && 545 get_ksymbol_arch(iter)) 546 return 1; 547 548 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && 549 get_ksymbol_mod(iter)) 550 return 1; 551 552 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && 553 get_ksymbol_ftrace_mod(iter)) 554 return 1; 555 556 return get_ksymbol_bpf(iter); 557 } 558 559 /* Returns false if pos at or past end of file. */ 560 static int update_iter(struct kallsym_iter *iter, loff_t pos) 561 { 562 /* Module symbols can be accessed randomly. */ 563 if (pos >= kallsyms_num_syms) 564 return update_iter_mod(iter, pos); 565 566 /* If we're not on the desired position, reset to new position. */ 567 if (pos != iter->pos) 568 reset_iter(iter, pos); 569 570 iter->nameoff += get_ksymbol_core(iter); 571 iter->pos++; 572 573 return 1; 574 } 575 576 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 577 { 578 (*pos)++; 579 580 if (!update_iter(m->private, *pos)) 581 return NULL; 582 return p; 583 } 584 585 static void *s_start(struct seq_file *m, loff_t *pos) 586 { 587 if (!update_iter(m->private, *pos)) 588 return NULL; 589 return m->private; 590 } 591 592 static void s_stop(struct seq_file *m, void *p) 593 { 594 } 595 596 static int s_show(struct seq_file *m, void *p) 597 { 598 void *value; 599 struct kallsym_iter *iter = m->private; 600 601 /* Some debugging symbols have no name. Ignore them. */ 602 if (!iter->name[0]) 603 return 0; 604 605 value = iter->show_value ? (void *)iter->value : NULL; 606 607 if (iter->module_name[0]) { 608 char type; 609 610 /* 611 * Label it "global" if it is exported, 612 * "local" if not exported. 613 */ 614 type = iter->exported ? toupper(iter->type) : 615 tolower(iter->type); 616 seq_printf(m, "%px %c %s\t[%s]\n", value, 617 type, iter->name, iter->module_name); 618 } else 619 seq_printf(m, "%px %c %s\n", value, 620 iter->type, iter->name); 621 return 0; 622 } 623 624 static const struct seq_operations kallsyms_op = { 625 .start = s_start, 626 .next = s_next, 627 .stop = s_stop, 628 .show = s_show 629 }; 630 631 static inline int kallsyms_for_perf(void) 632 { 633 #ifdef CONFIG_PERF_EVENTS 634 extern int sysctl_perf_event_paranoid; 635 if (sysctl_perf_event_paranoid <= 1) 636 return 1; 637 #endif 638 return 0; 639 } 640 641 /* 642 * We show kallsyms information even to normal users if we've enabled 643 * kernel profiling and are explicitly not paranoid (so kptr_restrict 644 * is clear, and sysctl_perf_event_paranoid isn't set). 645 * 646 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to 647 * block even that). 648 */ 649 int kallsyms_show_value(void) 650 { 651 switch (kptr_restrict) { 652 case 0: 653 if (kallsyms_for_perf()) 654 return 1; 655 /* fallthrough */ 656 case 1: 657 if (has_capability_noaudit(current, CAP_SYSLOG)) 658 return 1; 659 /* fallthrough */ 660 default: 661 return 0; 662 } 663 } 664 665 static int kallsyms_open(struct inode *inode, struct file *file) 666 { 667 /* 668 * We keep iterator in m->private, since normal case is to 669 * s_start from where we left off, so we avoid doing 670 * using get_symbol_offset for every symbol. 671 */ 672 struct kallsym_iter *iter; 673 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); 674 if (!iter) 675 return -ENOMEM; 676 reset_iter(iter, 0); 677 678 iter->show_value = kallsyms_show_value(); 679 return 0; 680 } 681 682 #ifdef CONFIG_KGDB_KDB 683 const char *kdb_walk_kallsyms(loff_t *pos) 684 { 685 static struct kallsym_iter kdb_walk_kallsyms_iter; 686 if (*pos == 0) { 687 memset(&kdb_walk_kallsyms_iter, 0, 688 sizeof(kdb_walk_kallsyms_iter)); 689 reset_iter(&kdb_walk_kallsyms_iter, 0); 690 } 691 while (1) { 692 if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) 693 return NULL; 694 ++*pos; 695 /* Some debugging symbols have no name. Ignore them. */ 696 if (kdb_walk_kallsyms_iter.name[0]) 697 return kdb_walk_kallsyms_iter.name; 698 } 699 } 700 #endif /* CONFIG_KGDB_KDB */ 701 702 static const struct proc_ops kallsyms_proc_ops = { 703 .proc_open = kallsyms_open, 704 .proc_read = seq_read, 705 .proc_lseek = seq_lseek, 706 .proc_release = seq_release_private, 707 }; 708 709 static int __init kallsyms_init(void) 710 { 711 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); 712 return 0; 713 } 714 device_initcall(kallsyms_init); 715