1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. 4 * 5 * Rewritten and vastly simplified by Rusty Russell for in-kernel 6 * module loader: 7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 8 * 9 * ChangeLog: 10 * 11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> 12 * Changed the compression method from stem compression to "table lookup" 13 * compression (see scripts/kallsyms.c for a more complete description) 14 */ 15 #include <linux/kallsyms.h> 16 #include <linux/init.h> 17 #include <linux/seq_file.h> 18 #include <linux/fs.h> 19 #include <linux/kdb.h> 20 #include <linux/err.h> 21 #include <linux/proc_fs.h> 22 #include <linux/sched.h> /* for cond_resched */ 23 #include <linux/ctype.h> 24 #include <linux/slab.h> 25 #include <linux/filter.h> 26 #include <linux/ftrace.h> 27 #include <linux/compiler.h> 28 29 /* 30 * These will be re-linked against their real values 31 * during the second link stage. 32 */ 33 extern const unsigned long kallsyms_addresses[] __weak; 34 extern const int kallsyms_offsets[] __weak; 35 extern const u8 kallsyms_names[] __weak; 36 37 /* 38 * Tell the compiler that the count isn't in the small data section if the arch 39 * has one (eg: FRV). 40 */ 41 extern const unsigned int kallsyms_num_syms 42 __attribute__((weak, section(".rodata"))); 43 44 extern const unsigned long kallsyms_relative_base 45 __attribute__((weak, section(".rodata"))); 46 47 extern const u8 kallsyms_token_table[] __weak; 48 extern const u16 kallsyms_token_index[] __weak; 49 50 extern const unsigned int kallsyms_markers[] __weak; 51 52 /* 53 * Expand a compressed symbol data into the resulting uncompressed string, 54 * if uncompressed string is too long (>= maxlen), it will be truncated, 55 * given the offset to where the symbol is in the compressed stream. 56 */ 57 static unsigned int kallsyms_expand_symbol(unsigned int off, 58 char *result, size_t maxlen) 59 { 60 int len, skipped_first = 0; 61 const u8 *tptr, *data; 62 63 /* Get the compressed symbol length from the first symbol byte. */ 64 data = &kallsyms_names[off]; 65 len = *data; 66 data++; 67 68 /* 69 * Update the offset to return the offset for the next symbol on 70 * the compressed stream. 71 */ 72 off += len + 1; 73 74 /* 75 * For every byte on the compressed symbol data, copy the table 76 * entry for that byte. 77 */ 78 while (len) { 79 tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; 80 data++; 81 len--; 82 83 while (*tptr) { 84 if (skipped_first) { 85 if (maxlen <= 1) 86 goto tail; 87 *result = *tptr; 88 result++; 89 maxlen--; 90 } else 91 skipped_first = 1; 92 tptr++; 93 } 94 } 95 96 tail: 97 if (maxlen) 98 *result = '\0'; 99 100 /* Return to offset to the next symbol. */ 101 return off; 102 } 103 104 /* 105 * Get symbol type information. This is encoded as a single char at the 106 * beginning of the symbol name. 107 */ 108 static char kallsyms_get_symbol_type(unsigned int off) 109 { 110 /* 111 * Get just the first code, look it up in the token table, 112 * and return the first char from this token. 113 */ 114 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; 115 } 116 117 118 /* 119 * Find the offset on the compressed stream given and index in the 120 * kallsyms array. 121 */ 122 static unsigned int get_symbol_offset(unsigned long pos) 123 { 124 const u8 *name; 125 int i; 126 127 /* 128 * Use the closest marker we have. We have markers every 256 positions, 129 * so that should be close enough. 130 */ 131 name = &kallsyms_names[kallsyms_markers[pos >> 8]]; 132 133 /* 134 * Sequentially scan all the symbols up to the point we're searching 135 * for. Every symbol is stored in a [<len>][<len> bytes of data] format, 136 * so we just need to add the len to the current pointer for every 137 * symbol we wish to skip. 138 */ 139 for (i = 0; i < (pos & 0xFF); i++) 140 name = name + (*name) + 1; 141 142 return name - kallsyms_names; 143 } 144 145 static unsigned long kallsyms_sym_address(int idx) 146 { 147 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 148 return kallsyms_addresses[idx]; 149 150 /* values are unsigned offsets if --absolute-percpu is not in effect */ 151 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) 152 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; 153 154 /* ...otherwise, positive offsets are absolute values */ 155 if (kallsyms_offsets[idx] >= 0) 156 return kallsyms_offsets[idx]; 157 158 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ 159 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 160 } 161 162 /* Lookup the address for this symbol. Returns 0 if not found. */ 163 unsigned long kallsyms_lookup_name(const char *name) 164 { 165 char namebuf[KSYM_NAME_LEN]; 166 unsigned long i; 167 unsigned int off; 168 169 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 170 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 171 172 if (strcmp(namebuf, name) == 0) 173 return kallsyms_sym_address(i); 174 } 175 return module_kallsyms_lookup_name(name); 176 } 177 EXPORT_SYMBOL_GPL(kallsyms_lookup_name); 178 179 int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, 180 unsigned long), 181 void *data) 182 { 183 char namebuf[KSYM_NAME_LEN]; 184 unsigned long i; 185 unsigned int off; 186 int ret; 187 188 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 189 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 190 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); 191 if (ret != 0) 192 return ret; 193 } 194 return module_kallsyms_on_each_symbol(fn, data); 195 } 196 EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol); 197 198 static unsigned long get_symbol_pos(unsigned long addr, 199 unsigned long *symbolsize, 200 unsigned long *offset) 201 { 202 unsigned long symbol_start = 0, symbol_end = 0; 203 unsigned long i, low, high, mid; 204 205 /* This kernel should never had been booted. */ 206 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 207 BUG_ON(!kallsyms_addresses); 208 else 209 BUG_ON(!kallsyms_offsets); 210 211 /* Do a binary search on the sorted kallsyms_addresses array. */ 212 low = 0; 213 high = kallsyms_num_syms; 214 215 while (high - low > 1) { 216 mid = low + (high - low) / 2; 217 if (kallsyms_sym_address(mid) <= addr) 218 low = mid; 219 else 220 high = mid; 221 } 222 223 /* 224 * Search for the first aliased symbol. Aliased 225 * symbols are symbols with the same address. 226 */ 227 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) 228 --low; 229 230 symbol_start = kallsyms_sym_address(low); 231 232 /* Search for next non-aliased symbol. */ 233 for (i = low + 1; i < kallsyms_num_syms; i++) { 234 if (kallsyms_sym_address(i) > symbol_start) { 235 symbol_end = kallsyms_sym_address(i); 236 break; 237 } 238 } 239 240 /* If we found no next symbol, we use the end of the section. */ 241 if (!symbol_end) { 242 if (is_kernel_inittext(addr)) 243 symbol_end = (unsigned long)_einittext; 244 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) 245 symbol_end = (unsigned long)_end; 246 else 247 symbol_end = (unsigned long)_etext; 248 } 249 250 if (symbolsize) 251 *symbolsize = symbol_end - symbol_start; 252 if (offset) 253 *offset = addr - symbol_start; 254 255 return low; 256 } 257 258 /* 259 * Lookup an address but don't bother to find any names. 260 */ 261 int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, 262 unsigned long *offset) 263 { 264 char namebuf[KSYM_NAME_LEN]; 265 266 if (is_ksym_addr(addr)) { 267 get_symbol_pos(addr, symbolsize, offset); 268 return 1; 269 } 270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 272 } 273 274 /* 275 * Lookup an address 276 * - modname is set to NULL if it's in the kernel. 277 * - We guarantee that the returned name is valid until we reschedule even if. 278 * It resides in a module. 279 * - We also guarantee that modname will be valid until rescheduled. 280 */ 281 const char *kallsyms_lookup(unsigned long addr, 282 unsigned long *symbolsize, 283 unsigned long *offset, 284 char **modname, char *namebuf) 285 { 286 const char *ret; 287 288 namebuf[KSYM_NAME_LEN - 1] = 0; 289 namebuf[0] = 0; 290 291 if (is_ksym_addr(addr)) { 292 unsigned long pos; 293 294 pos = get_symbol_pos(addr, symbolsize, offset); 295 /* Grab name */ 296 kallsyms_expand_symbol(get_symbol_offset(pos), 297 namebuf, KSYM_NAME_LEN); 298 if (modname) 299 *modname = NULL; 300 return namebuf; 301 } 302 303 /* See if it's in a module or a BPF JITed image. */ 304 ret = module_address_lookup(addr, symbolsize, offset, 305 modname, namebuf); 306 if (!ret) 307 ret = bpf_address_lookup(addr, symbolsize, 308 offset, modname, namebuf); 309 310 if (!ret) 311 ret = ftrace_mod_address_lookup(addr, symbolsize, 312 offset, modname, namebuf); 313 return ret; 314 } 315 316 int lookup_symbol_name(unsigned long addr, char *symname) 317 { 318 symname[0] = '\0'; 319 symname[KSYM_NAME_LEN - 1] = '\0'; 320 321 if (is_ksym_addr(addr)) { 322 unsigned long pos; 323 324 pos = get_symbol_pos(addr, NULL, NULL); 325 /* Grab name */ 326 kallsyms_expand_symbol(get_symbol_offset(pos), 327 symname, KSYM_NAME_LEN); 328 return 0; 329 } 330 /* See if it's in a module. */ 331 return lookup_module_symbol_name(addr, symname); 332 } 333 334 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, 335 unsigned long *offset, char *modname, char *name) 336 { 337 name[0] = '\0'; 338 name[KSYM_NAME_LEN - 1] = '\0'; 339 340 if (is_ksym_addr(addr)) { 341 unsigned long pos; 342 343 pos = get_symbol_pos(addr, size, offset); 344 /* Grab name */ 345 kallsyms_expand_symbol(get_symbol_offset(pos), 346 name, KSYM_NAME_LEN); 347 modname[0] = '\0'; 348 return 0; 349 } 350 /* See if it's in a module. */ 351 return lookup_module_symbol_attrs(addr, size, offset, modname, name); 352 } 353 354 /* Look up a kernel symbol and return it in a text buffer. */ 355 static int __sprint_symbol(char *buffer, unsigned long address, 356 int symbol_offset, int add_offset) 357 { 358 char *modname; 359 const char *name; 360 unsigned long offset, size; 361 int len; 362 363 address += symbol_offset; 364 name = kallsyms_lookup(address, &size, &offset, &modname, buffer); 365 if (!name) 366 return sprintf(buffer, "0x%lx", address - symbol_offset); 367 368 if (name != buffer) 369 strcpy(buffer, name); 370 len = strlen(buffer); 371 offset -= symbol_offset; 372 373 if (add_offset) 374 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); 375 376 if (modname) 377 len += sprintf(buffer + len, " [%s]", modname); 378 379 return len; 380 } 381 382 /** 383 * sprint_symbol - Look up a kernel symbol and return it in a text buffer 384 * @buffer: buffer to be stored 385 * @address: address to lookup 386 * 387 * This function looks up a kernel symbol with @address and stores its name, 388 * offset, size and module name to @buffer if possible. If no symbol was found, 389 * just saves its @address as is. 390 * 391 * This function returns the number of bytes stored in @buffer. 392 */ 393 int sprint_symbol(char *buffer, unsigned long address) 394 { 395 return __sprint_symbol(buffer, address, 0, 1); 396 } 397 EXPORT_SYMBOL_GPL(sprint_symbol); 398 399 /** 400 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer 401 * @buffer: buffer to be stored 402 * @address: address to lookup 403 * 404 * This function looks up a kernel symbol with @address and stores its name 405 * and module name to @buffer if possible. If no symbol was found, just saves 406 * its @address as is. 407 * 408 * This function returns the number of bytes stored in @buffer. 409 */ 410 int sprint_symbol_no_offset(char *buffer, unsigned long address) 411 { 412 return __sprint_symbol(buffer, address, 0, 0); 413 } 414 EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); 415 416 /** 417 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 418 * @buffer: buffer to be stored 419 * @address: address to lookup 420 * 421 * This function is for stack backtrace and does the same thing as 422 * sprint_symbol() but with modified/decreased @address. If there is a 423 * tail-call to the function marked "noreturn", gcc optimized out code after 424 * the call so that the stack-saved return address could point outside of the 425 * caller. This function ensures that kallsyms will find the original caller 426 * by decreasing @address. 427 * 428 * This function returns the number of bytes stored in @buffer. 429 */ 430 int sprint_backtrace(char *buffer, unsigned long address) 431 { 432 return __sprint_symbol(buffer, address, -1, 1); 433 } 434 435 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 436 struct kallsym_iter { 437 loff_t pos; 438 loff_t pos_arch_end; 439 loff_t pos_mod_end; 440 loff_t pos_ftrace_mod_end; 441 unsigned long value; 442 unsigned int nameoff; /* If iterating in core kernel symbols. */ 443 char type; 444 char name[KSYM_NAME_LEN]; 445 char module_name[MODULE_NAME_LEN]; 446 int exported; 447 int show_value; 448 }; 449 450 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, 451 char *type, char *name) 452 { 453 return -EINVAL; 454 } 455 456 static int get_ksymbol_arch(struct kallsym_iter *iter) 457 { 458 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms, 459 &iter->value, &iter->type, 460 iter->name); 461 462 if (ret < 0) { 463 iter->pos_arch_end = iter->pos; 464 return 0; 465 } 466 467 return 1; 468 } 469 470 static int get_ksymbol_mod(struct kallsym_iter *iter) 471 { 472 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end, 473 &iter->value, &iter->type, 474 iter->name, iter->module_name, 475 &iter->exported); 476 if (ret < 0) { 477 iter->pos_mod_end = iter->pos; 478 return 0; 479 } 480 481 return 1; 482 } 483 484 static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) 485 { 486 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, 487 &iter->value, &iter->type, 488 iter->name, iter->module_name, 489 &iter->exported); 490 if (ret < 0) { 491 iter->pos_ftrace_mod_end = iter->pos; 492 return 0; 493 } 494 495 return 1; 496 } 497 498 static int get_ksymbol_bpf(struct kallsym_iter *iter) 499 { 500 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); 501 iter->exported = 0; 502 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, 503 &iter->value, &iter->type, 504 iter->name) < 0 ? 0 : 1; 505 } 506 507 /* Returns space to next name. */ 508 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 509 { 510 unsigned off = iter->nameoff; 511 512 iter->module_name[0] = '\0'; 513 iter->value = kallsyms_sym_address(iter->pos); 514 515 iter->type = kallsyms_get_symbol_type(off); 516 517 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); 518 519 return off - iter->nameoff; 520 } 521 522 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) 523 { 524 iter->name[0] = '\0'; 525 iter->nameoff = get_symbol_offset(new_pos); 526 iter->pos = new_pos; 527 if (new_pos == 0) { 528 iter->pos_arch_end = 0; 529 iter->pos_mod_end = 0; 530 iter->pos_ftrace_mod_end = 0; 531 } 532 } 533 534 /* 535 * The end position (last + 1) of each additional kallsyms section is recorded 536 * in iter->pos_..._end as each section is added, and so can be used to 537 * determine which get_ksymbol_...() function to call next. 538 */ 539 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 540 { 541 iter->pos = pos; 542 543 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) && 544 get_ksymbol_arch(iter)) 545 return 1; 546 547 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && 548 get_ksymbol_mod(iter)) 549 return 1; 550 551 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && 552 get_ksymbol_ftrace_mod(iter)) 553 return 1; 554 555 return get_ksymbol_bpf(iter); 556 } 557 558 /* Returns false if pos at or past end of file. */ 559 static int update_iter(struct kallsym_iter *iter, loff_t pos) 560 { 561 /* Module symbols can be accessed randomly. */ 562 if (pos >= kallsyms_num_syms) 563 return update_iter_mod(iter, pos); 564 565 /* If we're not on the desired position, reset to new position. */ 566 if (pos != iter->pos) 567 reset_iter(iter, pos); 568 569 iter->nameoff += get_ksymbol_core(iter); 570 iter->pos++; 571 572 return 1; 573 } 574 575 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 576 { 577 (*pos)++; 578 579 if (!update_iter(m->private, *pos)) 580 return NULL; 581 return p; 582 } 583 584 static void *s_start(struct seq_file *m, loff_t *pos) 585 { 586 if (!update_iter(m->private, *pos)) 587 return NULL; 588 return m->private; 589 } 590 591 static void s_stop(struct seq_file *m, void *p) 592 { 593 } 594 595 static int s_show(struct seq_file *m, void *p) 596 { 597 void *value; 598 struct kallsym_iter *iter = m->private; 599 600 /* Some debugging symbols have no name. Ignore them. */ 601 if (!iter->name[0]) 602 return 0; 603 604 value = iter->show_value ? (void *)iter->value : NULL; 605 606 if (iter->module_name[0]) { 607 char type; 608 609 /* 610 * Label it "global" if it is exported, 611 * "local" if not exported. 612 */ 613 type = iter->exported ? toupper(iter->type) : 614 tolower(iter->type); 615 seq_printf(m, "%px %c %s\t[%s]\n", value, 616 type, iter->name, iter->module_name); 617 } else 618 seq_printf(m, "%px %c %s\n", value, 619 iter->type, iter->name); 620 return 0; 621 } 622 623 static const struct seq_operations kallsyms_op = { 624 .start = s_start, 625 .next = s_next, 626 .stop = s_stop, 627 .show = s_show 628 }; 629 630 static inline int kallsyms_for_perf(void) 631 { 632 #ifdef CONFIG_PERF_EVENTS 633 extern int sysctl_perf_event_paranoid; 634 if (sysctl_perf_event_paranoid <= 1) 635 return 1; 636 #endif 637 return 0; 638 } 639 640 /* 641 * We show kallsyms information even to normal users if we've enabled 642 * kernel profiling and are explicitly not paranoid (so kptr_restrict 643 * is clear, and sysctl_perf_event_paranoid isn't set). 644 * 645 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to 646 * block even that). 647 */ 648 int kallsyms_show_value(void) 649 { 650 switch (kptr_restrict) { 651 case 0: 652 if (kallsyms_for_perf()) 653 return 1; 654 /* fallthrough */ 655 case 1: 656 if (has_capability_noaudit(current, CAP_SYSLOG)) 657 return 1; 658 /* fallthrough */ 659 default: 660 return 0; 661 } 662 } 663 664 static int kallsyms_open(struct inode *inode, struct file *file) 665 { 666 /* 667 * We keep iterator in m->private, since normal case is to 668 * s_start from where we left off, so we avoid doing 669 * using get_symbol_offset for every symbol. 670 */ 671 struct kallsym_iter *iter; 672 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); 673 if (!iter) 674 return -ENOMEM; 675 reset_iter(iter, 0); 676 677 iter->show_value = kallsyms_show_value(); 678 return 0; 679 } 680 681 #ifdef CONFIG_KGDB_KDB 682 const char *kdb_walk_kallsyms(loff_t *pos) 683 { 684 static struct kallsym_iter kdb_walk_kallsyms_iter; 685 if (*pos == 0) { 686 memset(&kdb_walk_kallsyms_iter, 0, 687 sizeof(kdb_walk_kallsyms_iter)); 688 reset_iter(&kdb_walk_kallsyms_iter, 0); 689 } 690 while (1) { 691 if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) 692 return NULL; 693 ++*pos; 694 /* Some debugging symbols have no name. Ignore them. */ 695 if (kdb_walk_kallsyms_iter.name[0]) 696 return kdb_walk_kallsyms_iter.name; 697 } 698 } 699 #endif /* CONFIG_KGDB_KDB */ 700 701 static const struct file_operations kallsyms_operations = { 702 .open = kallsyms_open, 703 .read = seq_read, 704 .llseek = seq_lseek, 705 .release = seq_release_private, 706 }; 707 708 static int __init kallsyms_init(void) 709 { 710 proc_create("kallsyms", 0444, NULL, &kallsyms_operations); 711 return 0; 712 } 713 device_initcall(kallsyms_init); 714