1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. 4 * 5 * Rewritten and vastly simplified by Rusty Russell for in-kernel 6 * module loader: 7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 8 * 9 * ChangeLog: 10 * 11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> 12 * Changed the compression method from stem compression to "table lookup" 13 * compression (see scripts/kallsyms.c for a more complete description) 14 */ 15 #include <linux/kallsyms.h> 16 #include <linux/init.h> 17 #include <linux/seq_file.h> 18 #include <linux/fs.h> 19 #include <linux/kdb.h> 20 #include <linux/err.h> 21 #include <linux/proc_fs.h> 22 #include <linux/sched.h> /* for cond_resched */ 23 #include <linux/ctype.h> 24 #include <linux/slab.h> 25 #include <linux/filter.h> 26 #include <linux/ftrace.h> 27 #include <linux/compiler.h> 28 29 /* 30 * These will be re-linked against their real values 31 * during the second link stage. 32 */ 33 extern const unsigned long kallsyms_addresses[] __weak; 34 extern const int kallsyms_offsets[] __weak; 35 extern const u8 kallsyms_names[] __weak; 36 37 /* 38 * Tell the compiler that the count isn't in the small data section if the arch 39 * has one (eg: FRV). 40 */ 41 extern const unsigned int kallsyms_num_syms 42 __attribute__((weak, section(".rodata"))); 43 44 extern const unsigned long kallsyms_relative_base 45 __attribute__((weak, section(".rodata"))); 46 47 extern const char kallsyms_token_table[] __weak; 48 extern const u16 kallsyms_token_index[] __weak; 49 50 extern const unsigned int kallsyms_markers[] __weak; 51 52 /* 53 * Expand a compressed symbol data into the resulting uncompressed string, 54 * if uncompressed string is too long (>= maxlen), it will be truncated, 55 * given the offset to where the symbol is in the compressed stream. 56 */ 57 static unsigned int kallsyms_expand_symbol(unsigned int off, 58 char *result, size_t maxlen) 59 { 60 int len, skipped_first = 0; 61 const char *tptr; 62 const u8 *data; 63 64 /* Get the compressed symbol length from the first symbol byte. */ 65 data = &kallsyms_names[off]; 66 len = *data; 67 data++; 68 69 /* 70 * Update the offset to return the offset for the next symbol on 71 * the compressed stream. 72 */ 73 off += len + 1; 74 75 /* 76 * For every byte on the compressed symbol data, copy the table 77 * entry for that byte. 78 */ 79 while (len) { 80 tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; 81 data++; 82 len--; 83 84 while (*tptr) { 85 if (skipped_first) { 86 if (maxlen <= 1) 87 goto tail; 88 *result = *tptr; 89 result++; 90 maxlen--; 91 } else 92 skipped_first = 1; 93 tptr++; 94 } 95 } 96 97 tail: 98 if (maxlen) 99 *result = '\0'; 100 101 /* Return to offset to the next symbol. */ 102 return off; 103 } 104 105 /* 106 * Get symbol type information. This is encoded as a single char at the 107 * beginning of the symbol name. 108 */ 109 static char kallsyms_get_symbol_type(unsigned int off) 110 { 111 /* 112 * Get just the first code, look it up in the token table, 113 * and return the first char from this token. 114 */ 115 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; 116 } 117 118 119 /* 120 * Find the offset on the compressed stream given and index in the 121 * kallsyms array. 122 */ 123 static unsigned int get_symbol_offset(unsigned long pos) 124 { 125 const u8 *name; 126 int i; 127 128 /* 129 * Use the closest marker we have. We have markers every 256 positions, 130 * so that should be close enough. 131 */ 132 name = &kallsyms_names[kallsyms_markers[pos >> 8]]; 133 134 /* 135 * Sequentially scan all the symbols up to the point we're searching 136 * for. Every symbol is stored in a [<len>][<len> bytes of data] format, 137 * so we just need to add the len to the current pointer for every 138 * symbol we wish to skip. 139 */ 140 for (i = 0; i < (pos & 0xFF); i++) 141 name = name + (*name) + 1; 142 143 return name - kallsyms_names; 144 } 145 146 static unsigned long kallsyms_sym_address(int idx) 147 { 148 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 149 return kallsyms_addresses[idx]; 150 151 /* values are unsigned offsets if --absolute-percpu is not in effect */ 152 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) 153 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; 154 155 /* ...otherwise, positive offsets are absolute values */ 156 if (kallsyms_offsets[idx] >= 0) 157 return kallsyms_offsets[idx]; 158 159 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ 160 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 161 } 162 163 /* Lookup the address for this symbol. Returns 0 if not found. */ 164 unsigned long kallsyms_lookup_name(const char *name) 165 { 166 char namebuf[KSYM_NAME_LEN]; 167 unsigned long i; 168 unsigned int off; 169 170 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 171 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 172 173 if (strcmp(namebuf, name) == 0) 174 return kallsyms_sym_address(i); 175 } 176 return module_kallsyms_lookup_name(name); 177 } 178 179 int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, 180 unsigned long), 181 void *data) 182 { 183 char namebuf[KSYM_NAME_LEN]; 184 unsigned long i; 185 unsigned int off; 186 int ret; 187 188 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 189 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 190 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); 191 if (ret != 0) 192 return ret; 193 } 194 return module_kallsyms_on_each_symbol(fn, data); 195 } 196 197 static unsigned long get_symbol_pos(unsigned long addr, 198 unsigned long *symbolsize, 199 unsigned long *offset) 200 { 201 unsigned long symbol_start = 0, symbol_end = 0; 202 unsigned long i, low, high, mid; 203 204 /* This kernel should never had been booted. */ 205 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 206 BUG_ON(!kallsyms_addresses); 207 else 208 BUG_ON(!kallsyms_offsets); 209 210 /* Do a binary search on the sorted kallsyms_addresses array. */ 211 low = 0; 212 high = kallsyms_num_syms; 213 214 while (high - low > 1) { 215 mid = low + (high - low) / 2; 216 if (kallsyms_sym_address(mid) <= addr) 217 low = mid; 218 else 219 high = mid; 220 } 221 222 /* 223 * Search for the first aliased symbol. Aliased 224 * symbols are symbols with the same address. 225 */ 226 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) 227 --low; 228 229 symbol_start = kallsyms_sym_address(low); 230 231 /* Search for next non-aliased symbol. */ 232 for (i = low + 1; i < kallsyms_num_syms; i++) { 233 if (kallsyms_sym_address(i) > symbol_start) { 234 symbol_end = kallsyms_sym_address(i); 235 break; 236 } 237 } 238 239 /* If we found no next symbol, we use the end of the section. */ 240 if (!symbol_end) { 241 if (is_kernel_inittext(addr)) 242 symbol_end = (unsigned long)_einittext; 243 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) 244 symbol_end = (unsigned long)_end; 245 else 246 symbol_end = (unsigned long)_etext; 247 } 248 249 if (symbolsize) 250 *symbolsize = symbol_end - symbol_start; 251 if (offset) 252 *offset = addr - symbol_start; 253 254 return low; 255 } 256 257 /* 258 * Lookup an address but don't bother to find any names. 259 */ 260 int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, 261 unsigned long *offset) 262 { 263 char namebuf[KSYM_NAME_LEN]; 264 265 if (is_ksym_addr(addr)) { 266 get_symbol_pos(addr, symbolsize, offset); 267 return 1; 268 } 269 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 270 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 271 } 272 273 /* 274 * Lookup an address 275 * - modname is set to NULL if it's in the kernel. 276 * - We guarantee that the returned name is valid until we reschedule even if. 277 * It resides in a module. 278 * - We also guarantee that modname will be valid until rescheduled. 279 */ 280 const char *kallsyms_lookup(unsigned long addr, 281 unsigned long *symbolsize, 282 unsigned long *offset, 283 char **modname, char *namebuf) 284 { 285 const char *ret; 286 287 namebuf[KSYM_NAME_LEN - 1] = 0; 288 namebuf[0] = 0; 289 290 if (is_ksym_addr(addr)) { 291 unsigned long pos; 292 293 pos = get_symbol_pos(addr, symbolsize, offset); 294 /* Grab name */ 295 kallsyms_expand_symbol(get_symbol_offset(pos), 296 namebuf, KSYM_NAME_LEN); 297 if (modname) 298 *modname = NULL; 299 return namebuf; 300 } 301 302 /* See if it's in a module or a BPF JITed image. */ 303 ret = module_address_lookup(addr, symbolsize, offset, 304 modname, namebuf); 305 if (!ret) 306 ret = bpf_address_lookup(addr, symbolsize, 307 offset, modname, namebuf); 308 309 if (!ret) 310 ret = ftrace_mod_address_lookup(addr, symbolsize, 311 offset, modname, namebuf); 312 return ret; 313 } 314 315 int lookup_symbol_name(unsigned long addr, char *symname) 316 { 317 symname[0] = '\0'; 318 symname[KSYM_NAME_LEN - 1] = '\0'; 319 320 if (is_ksym_addr(addr)) { 321 unsigned long pos; 322 323 pos = get_symbol_pos(addr, NULL, NULL); 324 /* Grab name */ 325 kallsyms_expand_symbol(get_symbol_offset(pos), 326 symname, KSYM_NAME_LEN); 327 return 0; 328 } 329 /* See if it's in a module. */ 330 return lookup_module_symbol_name(addr, symname); 331 } 332 333 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, 334 unsigned long *offset, char *modname, char *name) 335 { 336 name[0] = '\0'; 337 name[KSYM_NAME_LEN - 1] = '\0'; 338 339 if (is_ksym_addr(addr)) { 340 unsigned long pos; 341 342 pos = get_symbol_pos(addr, size, offset); 343 /* Grab name */ 344 kallsyms_expand_symbol(get_symbol_offset(pos), 345 name, KSYM_NAME_LEN); 346 modname[0] = '\0'; 347 return 0; 348 } 349 /* See if it's in a module. */ 350 return lookup_module_symbol_attrs(addr, size, offset, modname, name); 351 } 352 353 /* Look up a kernel symbol and return it in a text buffer. */ 354 static int __sprint_symbol(char *buffer, unsigned long address, 355 int symbol_offset, int add_offset) 356 { 357 char *modname; 358 const char *name; 359 unsigned long offset, size; 360 int len; 361 362 address += symbol_offset; 363 name = kallsyms_lookup(address, &size, &offset, &modname, buffer); 364 if (!name) 365 return sprintf(buffer, "0x%lx", address - symbol_offset); 366 367 if (name != buffer) 368 strcpy(buffer, name); 369 len = strlen(buffer); 370 offset -= symbol_offset; 371 372 if (add_offset) 373 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); 374 375 if (modname) 376 len += sprintf(buffer + len, " [%s]", modname); 377 378 return len; 379 } 380 381 /** 382 * sprint_symbol - Look up a kernel symbol and return it in a text buffer 383 * @buffer: buffer to be stored 384 * @address: address to lookup 385 * 386 * This function looks up a kernel symbol with @address and stores its name, 387 * offset, size and module name to @buffer if possible. If no symbol was found, 388 * just saves its @address as is. 389 * 390 * This function returns the number of bytes stored in @buffer. 391 */ 392 int sprint_symbol(char *buffer, unsigned long address) 393 { 394 return __sprint_symbol(buffer, address, 0, 1); 395 } 396 EXPORT_SYMBOL_GPL(sprint_symbol); 397 398 /** 399 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer 400 * @buffer: buffer to be stored 401 * @address: address to lookup 402 * 403 * This function looks up a kernel symbol with @address and stores its name 404 * and module name to @buffer if possible. If no symbol was found, just saves 405 * its @address as is. 406 * 407 * This function returns the number of bytes stored in @buffer. 408 */ 409 int sprint_symbol_no_offset(char *buffer, unsigned long address) 410 { 411 return __sprint_symbol(buffer, address, 0, 0); 412 } 413 EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); 414 415 /** 416 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 417 * @buffer: buffer to be stored 418 * @address: address to lookup 419 * 420 * This function is for stack backtrace and does the same thing as 421 * sprint_symbol() but with modified/decreased @address. If there is a 422 * tail-call to the function marked "noreturn", gcc optimized out code after 423 * the call so that the stack-saved return address could point outside of the 424 * caller. This function ensures that kallsyms will find the original caller 425 * by decreasing @address. 426 * 427 * This function returns the number of bytes stored in @buffer. 428 */ 429 int sprint_backtrace(char *buffer, unsigned long address) 430 { 431 return __sprint_symbol(buffer, address, -1, 1); 432 } 433 434 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 435 struct kallsym_iter { 436 loff_t pos; 437 loff_t pos_arch_end; 438 loff_t pos_mod_end; 439 loff_t pos_ftrace_mod_end; 440 unsigned long value; 441 unsigned int nameoff; /* If iterating in core kernel symbols. */ 442 char type; 443 char name[KSYM_NAME_LEN]; 444 char module_name[MODULE_NAME_LEN]; 445 int exported; 446 int show_value; 447 }; 448 449 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, 450 char *type, char *name) 451 { 452 return -EINVAL; 453 } 454 455 static int get_ksymbol_arch(struct kallsym_iter *iter) 456 { 457 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms, 458 &iter->value, &iter->type, 459 iter->name); 460 461 if (ret < 0) { 462 iter->pos_arch_end = iter->pos; 463 return 0; 464 } 465 466 return 1; 467 } 468 469 static int get_ksymbol_mod(struct kallsym_iter *iter) 470 { 471 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end, 472 &iter->value, &iter->type, 473 iter->name, iter->module_name, 474 &iter->exported); 475 if (ret < 0) { 476 iter->pos_mod_end = iter->pos; 477 return 0; 478 } 479 480 return 1; 481 } 482 483 static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) 484 { 485 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, 486 &iter->value, &iter->type, 487 iter->name, iter->module_name, 488 &iter->exported); 489 if (ret < 0) { 490 iter->pos_ftrace_mod_end = iter->pos; 491 return 0; 492 } 493 494 return 1; 495 } 496 497 static int get_ksymbol_bpf(struct kallsym_iter *iter) 498 { 499 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); 500 iter->exported = 0; 501 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, 502 &iter->value, &iter->type, 503 iter->name) < 0 ? 0 : 1; 504 } 505 506 /* Returns space to next name. */ 507 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 508 { 509 unsigned off = iter->nameoff; 510 511 iter->module_name[0] = '\0'; 512 iter->value = kallsyms_sym_address(iter->pos); 513 514 iter->type = kallsyms_get_symbol_type(off); 515 516 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); 517 518 return off - iter->nameoff; 519 } 520 521 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) 522 { 523 iter->name[0] = '\0'; 524 iter->nameoff = get_symbol_offset(new_pos); 525 iter->pos = new_pos; 526 if (new_pos == 0) { 527 iter->pos_arch_end = 0; 528 iter->pos_mod_end = 0; 529 iter->pos_ftrace_mod_end = 0; 530 } 531 } 532 533 /* 534 * The end position (last + 1) of each additional kallsyms section is recorded 535 * in iter->pos_..._end as each section is added, and so can be used to 536 * determine which get_ksymbol_...() function to call next. 537 */ 538 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 539 { 540 iter->pos = pos; 541 542 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) && 543 get_ksymbol_arch(iter)) 544 return 1; 545 546 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && 547 get_ksymbol_mod(iter)) 548 return 1; 549 550 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && 551 get_ksymbol_ftrace_mod(iter)) 552 return 1; 553 554 return get_ksymbol_bpf(iter); 555 } 556 557 /* Returns false if pos at or past end of file. */ 558 static int update_iter(struct kallsym_iter *iter, loff_t pos) 559 { 560 /* Module symbols can be accessed randomly. */ 561 if (pos >= kallsyms_num_syms) 562 return update_iter_mod(iter, pos); 563 564 /* If we're not on the desired position, reset to new position. */ 565 if (pos != iter->pos) 566 reset_iter(iter, pos); 567 568 iter->nameoff += get_ksymbol_core(iter); 569 iter->pos++; 570 571 return 1; 572 } 573 574 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 575 { 576 (*pos)++; 577 578 if (!update_iter(m->private, *pos)) 579 return NULL; 580 return p; 581 } 582 583 static void *s_start(struct seq_file *m, loff_t *pos) 584 { 585 if (!update_iter(m->private, *pos)) 586 return NULL; 587 return m->private; 588 } 589 590 static void s_stop(struct seq_file *m, void *p) 591 { 592 } 593 594 static int s_show(struct seq_file *m, void *p) 595 { 596 void *value; 597 struct kallsym_iter *iter = m->private; 598 599 /* Some debugging symbols have no name. Ignore them. */ 600 if (!iter->name[0]) 601 return 0; 602 603 value = iter->show_value ? (void *)iter->value : NULL; 604 605 if (iter->module_name[0]) { 606 char type; 607 608 /* 609 * Label it "global" if it is exported, 610 * "local" if not exported. 611 */ 612 type = iter->exported ? toupper(iter->type) : 613 tolower(iter->type); 614 seq_printf(m, "%px %c %s\t[%s]\n", value, 615 type, iter->name, iter->module_name); 616 } else 617 seq_printf(m, "%px %c %s\n", value, 618 iter->type, iter->name); 619 return 0; 620 } 621 622 static const struct seq_operations kallsyms_op = { 623 .start = s_start, 624 .next = s_next, 625 .stop = s_stop, 626 .show = s_show 627 }; 628 629 static inline int kallsyms_for_perf(void) 630 { 631 #ifdef CONFIG_PERF_EVENTS 632 extern int sysctl_perf_event_paranoid; 633 if (sysctl_perf_event_paranoid <= 1) 634 return 1; 635 #endif 636 return 0; 637 } 638 639 /* 640 * We show kallsyms information even to normal users if we've enabled 641 * kernel profiling and are explicitly not paranoid (so kptr_restrict 642 * is clear, and sysctl_perf_event_paranoid isn't set). 643 * 644 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to 645 * block even that). 646 */ 647 bool kallsyms_show_value(const struct cred *cred) 648 { 649 switch (kptr_restrict) { 650 case 0: 651 if (kallsyms_for_perf()) 652 return true; 653 /* fallthrough */ 654 case 1: 655 if (security_capable(cred, &init_user_ns, CAP_SYSLOG, 656 CAP_OPT_NOAUDIT) == 0) 657 return true; 658 /* fallthrough */ 659 default: 660 return false; 661 } 662 } 663 664 static int kallsyms_open(struct inode *inode, struct file *file) 665 { 666 /* 667 * We keep iterator in m->private, since normal case is to 668 * s_start from where we left off, so we avoid doing 669 * using get_symbol_offset for every symbol. 670 */ 671 struct kallsym_iter *iter; 672 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); 673 if (!iter) 674 return -ENOMEM; 675 reset_iter(iter, 0); 676 677 /* 678 * Instead of checking this on every s_show() call, cache 679 * the result here at open time. 680 */ 681 iter->show_value = kallsyms_show_value(file->f_cred); 682 return 0; 683 } 684 685 #ifdef CONFIG_KGDB_KDB 686 const char *kdb_walk_kallsyms(loff_t *pos) 687 { 688 static struct kallsym_iter kdb_walk_kallsyms_iter; 689 if (*pos == 0) { 690 memset(&kdb_walk_kallsyms_iter, 0, 691 sizeof(kdb_walk_kallsyms_iter)); 692 reset_iter(&kdb_walk_kallsyms_iter, 0); 693 } 694 while (1) { 695 if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) 696 return NULL; 697 ++*pos; 698 /* Some debugging symbols have no name. Ignore them. */ 699 if (kdb_walk_kallsyms_iter.name[0]) 700 return kdb_walk_kallsyms_iter.name; 701 } 702 } 703 #endif /* CONFIG_KGDB_KDB */ 704 705 static const struct proc_ops kallsyms_proc_ops = { 706 .proc_open = kallsyms_open, 707 .proc_read = seq_read, 708 .proc_lseek = seq_lseek, 709 .proc_release = seq_release_private, 710 }; 711 712 static int __init kallsyms_init(void) 713 { 714 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); 715 return 0; 716 } 717 device_initcall(kallsyms_init); 718