1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. 4 * 5 * Rewritten and vastly simplified by Rusty Russell for in-kernel 6 * module loader: 7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 8 * 9 * ChangeLog: 10 * 11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> 12 * Changed the compression method from stem compression to "table lookup" 13 * compression (see scripts/kallsyms.c for a more complete description) 14 */ 15 #include <linux/kallsyms.h> 16 #include <linux/init.h> 17 #include <linux/seq_file.h> 18 #include <linux/fs.h> 19 #include <linux/kdb.h> 20 #include <linux/err.h> 21 #include <linux/proc_fs.h> 22 #include <linux/sched.h> /* for cond_resched */ 23 #include <linux/ctype.h> 24 #include <linux/slab.h> 25 #include <linux/filter.h> 26 #include <linux/ftrace.h> 27 #include <linux/kprobes.h> 28 #include <linux/build_bug.h> 29 #include <linux/compiler.h> 30 #include <linux/module.h> 31 #include <linux/kernel.h> 32 #include <linux/bsearch.h> 33 34 #include "kallsyms_internal.h" 35 36 /* 37 * Expand a compressed symbol data into the resulting uncompressed string, 38 * if uncompressed string is too long (>= maxlen), it will be truncated, 39 * given the offset to where the symbol is in the compressed stream. 40 */ 41 static unsigned int kallsyms_expand_symbol(unsigned int off, 42 char *result, size_t maxlen) 43 { 44 int len, skipped_first = 0; 45 const char *tptr; 46 const u8 *data; 47 48 /* Get the compressed symbol length from the first symbol byte. */ 49 data = &kallsyms_names[off]; 50 len = *data; 51 data++; 52 53 /* 54 * Update the offset to return the offset for the next symbol on 55 * the compressed stream. 56 */ 57 off += len + 1; 58 59 /* 60 * For every byte on the compressed symbol data, copy the table 61 * entry for that byte. 62 */ 63 while (len) { 64 tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; 65 data++; 66 len--; 67 68 while (*tptr) { 69 if (skipped_first) { 70 if (maxlen <= 1) 71 goto tail; 72 *result = *tptr; 73 result++; 74 maxlen--; 75 } else 76 skipped_first = 1; 77 tptr++; 78 } 79 } 80 81 tail: 82 if (maxlen) 83 *result = '\0'; 84 85 /* Return to offset to the next symbol. */ 86 return off; 87 } 88 89 /* 90 * Get symbol type information. This is encoded as a single char at the 91 * beginning of the symbol name. 92 */ 93 static char kallsyms_get_symbol_type(unsigned int off) 94 { 95 /* 96 * Get just the first code, look it up in the token table, 97 * and return the first char from this token. 98 */ 99 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; 100 } 101 102 103 /* 104 * Find the offset on the compressed stream given and index in the 105 * kallsyms array. 106 */ 107 static unsigned int get_symbol_offset(unsigned long pos) 108 { 109 const u8 *name; 110 int i; 111 112 /* 113 * Use the closest marker we have. We have markers every 256 positions, 114 * so that should be close enough. 115 */ 116 name = &kallsyms_names[kallsyms_markers[pos >> 8]]; 117 118 /* 119 * Sequentially scan all the symbols up to the point we're searching 120 * for. Every symbol is stored in a [<len>][<len> bytes of data] format, 121 * so we just need to add the len to the current pointer for every 122 * symbol we wish to skip. 123 */ 124 for (i = 0; i < (pos & 0xFF); i++) 125 name = name + (*name) + 1; 126 127 return name - kallsyms_names; 128 } 129 130 static unsigned long kallsyms_sym_address(int idx) 131 { 132 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 133 return kallsyms_addresses[idx]; 134 135 /* values are unsigned offsets if --absolute-percpu is not in effect */ 136 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) 137 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; 138 139 /* ...otherwise, positive offsets are absolute values */ 140 if (kallsyms_offsets[idx] >= 0) 141 return kallsyms_offsets[idx]; 142 143 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ 144 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 145 } 146 147 static bool cleanup_symbol_name(char *s) 148 { 149 char *res; 150 151 if (!IS_ENABLED(CONFIG_LTO_CLANG)) 152 return false; 153 154 /* 155 * LLVM appends various suffixes for local functions and variables that 156 * must be promoted to global scope as part of LTO. This can break 157 * hooking of static functions with kprobes. '.' is not a valid 158 * character in an identifier in C. Suffixes observed: 159 * - foo.llvm.[0-9a-f]+ 160 * - foo.[0-9a-f]+ 161 * - foo.[0-9a-f]+.cfi_jt 162 */ 163 res = strchr(s, '.'); 164 if (res) { 165 *res = '\0'; 166 return true; 167 } 168 169 if (!IS_ENABLED(CONFIG_CFI_CLANG) || 170 !IS_ENABLED(CONFIG_LTO_CLANG_THIN) || 171 CONFIG_CLANG_VERSION >= 130000) 172 return false; 173 174 /* 175 * Prior to LLVM 13, the following suffixes were observed when thinLTO 176 * and CFI are both enabled: 177 * - foo$[0-9]+ 178 */ 179 res = strrchr(s, '$'); 180 if (res) { 181 *res = '\0'; 182 return true; 183 } 184 185 return false; 186 } 187 188 /* Lookup the address for this symbol. Returns 0 if not found. */ 189 unsigned long kallsyms_lookup_name(const char *name) 190 { 191 char namebuf[KSYM_NAME_LEN]; 192 unsigned long i; 193 unsigned int off; 194 195 /* Skip the search for empty string. */ 196 if (!*name) 197 return 0; 198 199 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 200 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 201 202 if (strcmp(namebuf, name) == 0) 203 return kallsyms_sym_address(i); 204 205 if (cleanup_symbol_name(namebuf) && strcmp(namebuf, name) == 0) 206 return kallsyms_sym_address(i); 207 } 208 return module_kallsyms_lookup_name(name); 209 } 210 211 /* 212 * Iterate over all symbols in vmlinux. For symbols from modules use 213 * module_kallsyms_on_each_symbol instead. 214 */ 215 int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, 216 unsigned long), 217 void *data) 218 { 219 char namebuf[KSYM_NAME_LEN]; 220 unsigned long i; 221 unsigned int off; 222 int ret; 223 224 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 225 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 226 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i)); 227 if (ret != 0) 228 return ret; 229 cond_resched(); 230 } 231 return 0; 232 } 233 234 static unsigned long get_symbol_pos(unsigned long addr, 235 unsigned long *symbolsize, 236 unsigned long *offset) 237 { 238 unsigned long symbol_start = 0, symbol_end = 0; 239 unsigned long i, low, high, mid; 240 241 /* This kernel should never had been booted. */ 242 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) 243 BUG_ON(!kallsyms_addresses); 244 else 245 BUG_ON(!kallsyms_offsets); 246 247 /* Do a binary search on the sorted kallsyms_addresses array. */ 248 low = 0; 249 high = kallsyms_num_syms; 250 251 while (high - low > 1) { 252 mid = low + (high - low) / 2; 253 if (kallsyms_sym_address(mid) <= addr) 254 low = mid; 255 else 256 high = mid; 257 } 258 259 /* 260 * Search for the first aliased symbol. Aliased 261 * symbols are symbols with the same address. 262 */ 263 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) 264 --low; 265 266 symbol_start = kallsyms_sym_address(low); 267 268 /* Search for next non-aliased symbol. */ 269 for (i = low + 1; i < kallsyms_num_syms; i++) { 270 if (kallsyms_sym_address(i) > symbol_start) { 271 symbol_end = kallsyms_sym_address(i); 272 break; 273 } 274 } 275 276 /* If we found no next symbol, we use the end of the section. */ 277 if (!symbol_end) { 278 if (is_kernel_inittext(addr)) 279 symbol_end = (unsigned long)_einittext; 280 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) 281 symbol_end = (unsigned long)_end; 282 else 283 symbol_end = (unsigned long)_etext; 284 } 285 286 if (symbolsize) 287 *symbolsize = symbol_end - symbol_start; 288 if (offset) 289 *offset = addr - symbol_start; 290 291 return low; 292 } 293 294 /* 295 * Lookup an address but don't bother to find any names. 296 */ 297 int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, 298 unsigned long *offset) 299 { 300 char namebuf[KSYM_NAME_LEN]; 301 302 if (is_ksym_addr(addr)) { 303 get_symbol_pos(addr, symbolsize, offset); 304 return 1; 305 } 306 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || 307 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 308 } 309 310 static const char *kallsyms_lookup_buildid(unsigned long addr, 311 unsigned long *symbolsize, 312 unsigned long *offset, char **modname, 313 const unsigned char **modbuildid, char *namebuf) 314 { 315 const char *ret; 316 317 namebuf[KSYM_NAME_LEN - 1] = 0; 318 namebuf[0] = 0; 319 320 if (is_ksym_addr(addr)) { 321 unsigned long pos; 322 323 pos = get_symbol_pos(addr, symbolsize, offset); 324 /* Grab name */ 325 kallsyms_expand_symbol(get_symbol_offset(pos), 326 namebuf, KSYM_NAME_LEN); 327 if (modname) 328 *modname = NULL; 329 if (modbuildid) 330 *modbuildid = NULL; 331 332 ret = namebuf; 333 goto found; 334 } 335 336 /* See if it's in a module or a BPF JITed image. */ 337 ret = module_address_lookup(addr, symbolsize, offset, 338 modname, modbuildid, namebuf); 339 if (!ret) 340 ret = bpf_address_lookup(addr, symbolsize, 341 offset, modname, namebuf); 342 343 if (!ret) 344 ret = ftrace_mod_address_lookup(addr, symbolsize, 345 offset, modname, namebuf); 346 347 found: 348 cleanup_symbol_name(namebuf); 349 return ret; 350 } 351 352 /* 353 * Lookup an address 354 * - modname is set to NULL if it's in the kernel. 355 * - We guarantee that the returned name is valid until we reschedule even if. 356 * It resides in a module. 357 * - We also guarantee that modname will be valid until rescheduled. 358 */ 359 const char *kallsyms_lookup(unsigned long addr, 360 unsigned long *symbolsize, 361 unsigned long *offset, 362 char **modname, char *namebuf) 363 { 364 return kallsyms_lookup_buildid(addr, symbolsize, offset, modname, 365 NULL, namebuf); 366 } 367 368 int lookup_symbol_name(unsigned long addr, char *symname) 369 { 370 int res; 371 372 symname[0] = '\0'; 373 symname[KSYM_NAME_LEN - 1] = '\0'; 374 375 if (is_ksym_addr(addr)) { 376 unsigned long pos; 377 378 pos = get_symbol_pos(addr, NULL, NULL); 379 /* Grab name */ 380 kallsyms_expand_symbol(get_symbol_offset(pos), 381 symname, KSYM_NAME_LEN); 382 goto found; 383 } 384 /* See if it's in a module. */ 385 res = lookup_module_symbol_name(addr, symname); 386 if (res) 387 return res; 388 389 found: 390 cleanup_symbol_name(symname); 391 return 0; 392 } 393 394 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, 395 unsigned long *offset, char *modname, char *name) 396 { 397 int res; 398 399 name[0] = '\0'; 400 name[KSYM_NAME_LEN - 1] = '\0'; 401 402 if (is_ksym_addr(addr)) { 403 unsigned long pos; 404 405 pos = get_symbol_pos(addr, size, offset); 406 /* Grab name */ 407 kallsyms_expand_symbol(get_symbol_offset(pos), 408 name, KSYM_NAME_LEN); 409 modname[0] = '\0'; 410 goto found; 411 } 412 /* See if it's in a module. */ 413 res = lookup_module_symbol_attrs(addr, size, offset, modname, name); 414 if (res) 415 return res; 416 417 found: 418 cleanup_symbol_name(name); 419 return 0; 420 } 421 422 /* Look up a kernel symbol and return it in a text buffer. */ 423 static int __sprint_symbol(char *buffer, unsigned long address, 424 int symbol_offset, int add_offset, int add_buildid) 425 { 426 char *modname; 427 const unsigned char *buildid; 428 const char *name; 429 unsigned long offset, size; 430 int len; 431 432 address += symbol_offset; 433 name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid, 434 buffer); 435 if (!name) 436 return sprintf(buffer, "0x%lx", address - symbol_offset); 437 438 if (name != buffer) 439 strcpy(buffer, name); 440 len = strlen(buffer); 441 offset -= symbol_offset; 442 443 if (add_offset) 444 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); 445 446 if (modname) { 447 len += sprintf(buffer + len, " [%s", modname); 448 #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) 449 if (add_buildid && buildid) { 450 /* build ID should match length of sprintf */ 451 #if IS_ENABLED(CONFIG_MODULES) 452 static_assert(sizeof(typeof_member(struct module, build_id)) == 20); 453 #endif 454 len += sprintf(buffer + len, " %20phN", buildid); 455 } 456 #endif 457 len += sprintf(buffer + len, "]"); 458 } 459 460 return len; 461 } 462 463 /** 464 * sprint_symbol - Look up a kernel symbol and return it in a text buffer 465 * @buffer: buffer to be stored 466 * @address: address to lookup 467 * 468 * This function looks up a kernel symbol with @address and stores its name, 469 * offset, size and module name to @buffer if possible. If no symbol was found, 470 * just saves its @address as is. 471 * 472 * This function returns the number of bytes stored in @buffer. 473 */ 474 int sprint_symbol(char *buffer, unsigned long address) 475 { 476 return __sprint_symbol(buffer, address, 0, 1, 0); 477 } 478 EXPORT_SYMBOL_GPL(sprint_symbol); 479 480 /** 481 * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer 482 * @buffer: buffer to be stored 483 * @address: address to lookup 484 * 485 * This function looks up a kernel symbol with @address and stores its name, 486 * offset, size, module name and module build ID to @buffer if possible. If no 487 * symbol was found, just saves its @address as is. 488 * 489 * This function returns the number of bytes stored in @buffer. 490 */ 491 int sprint_symbol_build_id(char *buffer, unsigned long address) 492 { 493 return __sprint_symbol(buffer, address, 0, 1, 1); 494 } 495 EXPORT_SYMBOL_GPL(sprint_symbol_build_id); 496 497 /** 498 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer 499 * @buffer: buffer to be stored 500 * @address: address to lookup 501 * 502 * This function looks up a kernel symbol with @address and stores its name 503 * and module name to @buffer if possible. If no symbol was found, just saves 504 * its @address as is. 505 * 506 * This function returns the number of bytes stored in @buffer. 507 */ 508 int sprint_symbol_no_offset(char *buffer, unsigned long address) 509 { 510 return __sprint_symbol(buffer, address, 0, 0, 0); 511 } 512 EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); 513 514 /** 515 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 516 * @buffer: buffer to be stored 517 * @address: address to lookup 518 * 519 * This function is for stack backtrace and does the same thing as 520 * sprint_symbol() but with modified/decreased @address. If there is a 521 * tail-call to the function marked "noreturn", gcc optimized out code after 522 * the call so that the stack-saved return address could point outside of the 523 * caller. This function ensures that kallsyms will find the original caller 524 * by decreasing @address. 525 * 526 * This function returns the number of bytes stored in @buffer. 527 */ 528 int sprint_backtrace(char *buffer, unsigned long address) 529 { 530 return __sprint_symbol(buffer, address, -1, 1, 0); 531 } 532 533 /** 534 * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer 535 * @buffer: buffer to be stored 536 * @address: address to lookup 537 * 538 * This function is for stack backtrace and does the same thing as 539 * sprint_symbol() but with modified/decreased @address. If there is a 540 * tail-call to the function marked "noreturn", gcc optimized out code after 541 * the call so that the stack-saved return address could point outside of the 542 * caller. This function ensures that kallsyms will find the original caller 543 * by decreasing @address. This function also appends the module build ID to 544 * the @buffer if @address is within a kernel module. 545 * 546 * This function returns the number of bytes stored in @buffer. 547 */ 548 int sprint_backtrace_build_id(char *buffer, unsigned long address) 549 { 550 return __sprint_symbol(buffer, address, -1, 1, 1); 551 } 552 553 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 554 struct kallsym_iter { 555 loff_t pos; 556 loff_t pos_arch_end; 557 loff_t pos_mod_end; 558 loff_t pos_ftrace_mod_end; 559 loff_t pos_bpf_end; 560 unsigned long value; 561 unsigned int nameoff; /* If iterating in core kernel symbols. */ 562 char type; 563 char name[KSYM_NAME_LEN]; 564 char module_name[MODULE_NAME_LEN]; 565 int exported; 566 int show_value; 567 }; 568 569 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, 570 char *type, char *name) 571 { 572 return -EINVAL; 573 } 574 575 static int get_ksymbol_arch(struct kallsym_iter *iter) 576 { 577 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms, 578 &iter->value, &iter->type, 579 iter->name); 580 581 if (ret < 0) { 582 iter->pos_arch_end = iter->pos; 583 return 0; 584 } 585 586 return 1; 587 } 588 589 static int get_ksymbol_mod(struct kallsym_iter *iter) 590 { 591 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end, 592 &iter->value, &iter->type, 593 iter->name, iter->module_name, 594 &iter->exported); 595 if (ret < 0) { 596 iter->pos_mod_end = iter->pos; 597 return 0; 598 } 599 600 return 1; 601 } 602 603 /* 604 * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace 605 * purposes. In that case "__builtin__ftrace" is used as a module name, even 606 * though "__builtin__ftrace" is not a module. 607 */ 608 static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) 609 { 610 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, 611 &iter->value, &iter->type, 612 iter->name, iter->module_name, 613 &iter->exported); 614 if (ret < 0) { 615 iter->pos_ftrace_mod_end = iter->pos; 616 return 0; 617 } 618 619 return 1; 620 } 621 622 static int get_ksymbol_bpf(struct kallsym_iter *iter) 623 { 624 int ret; 625 626 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN); 627 iter->exported = 0; 628 ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, 629 &iter->value, &iter->type, 630 iter->name); 631 if (ret < 0) { 632 iter->pos_bpf_end = iter->pos; 633 return 0; 634 } 635 636 return 1; 637 } 638 639 /* 640 * This uses "__builtin__kprobes" as a module name for symbols for pages 641 * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a 642 * module. 643 */ 644 static int get_ksymbol_kprobe(struct kallsym_iter *iter) 645 { 646 strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); 647 iter->exported = 0; 648 return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, 649 &iter->value, &iter->type, 650 iter->name) < 0 ? 0 : 1; 651 } 652 653 /* Returns space to next name. */ 654 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 655 { 656 unsigned off = iter->nameoff; 657 658 iter->module_name[0] = '\0'; 659 iter->value = kallsyms_sym_address(iter->pos); 660 661 iter->type = kallsyms_get_symbol_type(off); 662 663 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); 664 665 return off - iter->nameoff; 666 } 667 668 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) 669 { 670 iter->name[0] = '\0'; 671 iter->nameoff = get_symbol_offset(new_pos); 672 iter->pos = new_pos; 673 if (new_pos == 0) { 674 iter->pos_arch_end = 0; 675 iter->pos_mod_end = 0; 676 iter->pos_ftrace_mod_end = 0; 677 iter->pos_bpf_end = 0; 678 } 679 } 680 681 /* 682 * The end position (last + 1) of each additional kallsyms section is recorded 683 * in iter->pos_..._end as each section is added, and so can be used to 684 * determine which get_ksymbol_...() function to call next. 685 */ 686 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 687 { 688 iter->pos = pos; 689 690 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) && 691 get_ksymbol_arch(iter)) 692 return 1; 693 694 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && 695 get_ksymbol_mod(iter)) 696 return 1; 697 698 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && 699 get_ksymbol_ftrace_mod(iter)) 700 return 1; 701 702 if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && 703 get_ksymbol_bpf(iter)) 704 return 1; 705 706 return get_ksymbol_kprobe(iter); 707 } 708 709 /* Returns false if pos at or past end of file. */ 710 static int update_iter(struct kallsym_iter *iter, loff_t pos) 711 { 712 /* Module symbols can be accessed randomly. */ 713 if (pos >= kallsyms_num_syms) 714 return update_iter_mod(iter, pos); 715 716 /* If we're not on the desired position, reset to new position. */ 717 if (pos != iter->pos) 718 reset_iter(iter, pos); 719 720 iter->nameoff += get_ksymbol_core(iter); 721 iter->pos++; 722 723 return 1; 724 } 725 726 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 727 { 728 (*pos)++; 729 730 if (!update_iter(m->private, *pos)) 731 return NULL; 732 return p; 733 } 734 735 static void *s_start(struct seq_file *m, loff_t *pos) 736 { 737 if (!update_iter(m->private, *pos)) 738 return NULL; 739 return m->private; 740 } 741 742 static void s_stop(struct seq_file *m, void *p) 743 { 744 } 745 746 static int s_show(struct seq_file *m, void *p) 747 { 748 void *value; 749 struct kallsym_iter *iter = m->private; 750 751 /* Some debugging symbols have no name. Ignore them. */ 752 if (!iter->name[0]) 753 return 0; 754 755 value = iter->show_value ? (void *)iter->value : NULL; 756 757 if (iter->module_name[0]) { 758 char type; 759 760 /* 761 * Label it "global" if it is exported, 762 * "local" if not exported. 763 */ 764 type = iter->exported ? toupper(iter->type) : 765 tolower(iter->type); 766 seq_printf(m, "%px %c %s\t[%s]\n", value, 767 type, iter->name, iter->module_name); 768 } else 769 seq_printf(m, "%px %c %s\n", value, 770 iter->type, iter->name); 771 return 0; 772 } 773 774 static const struct seq_operations kallsyms_op = { 775 .start = s_start, 776 .next = s_next, 777 .stop = s_stop, 778 .show = s_show 779 }; 780 781 static inline int kallsyms_for_perf(void) 782 { 783 #ifdef CONFIG_PERF_EVENTS 784 extern int sysctl_perf_event_paranoid; 785 if (sysctl_perf_event_paranoid <= 1) 786 return 1; 787 #endif 788 return 0; 789 } 790 791 /* 792 * We show kallsyms information even to normal users if we've enabled 793 * kernel profiling and are explicitly not paranoid (so kptr_restrict 794 * is clear, and sysctl_perf_event_paranoid isn't set). 795 * 796 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to 797 * block even that). 798 */ 799 bool kallsyms_show_value(const struct cred *cred) 800 { 801 switch (kptr_restrict) { 802 case 0: 803 if (kallsyms_for_perf()) 804 return true; 805 fallthrough; 806 case 1: 807 if (security_capable(cred, &init_user_ns, CAP_SYSLOG, 808 CAP_OPT_NOAUDIT) == 0) 809 return true; 810 fallthrough; 811 default: 812 return false; 813 } 814 } 815 816 static int kallsyms_open(struct inode *inode, struct file *file) 817 { 818 /* 819 * We keep iterator in m->private, since normal case is to 820 * s_start from where we left off, so we avoid doing 821 * using get_symbol_offset for every symbol. 822 */ 823 struct kallsym_iter *iter; 824 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); 825 if (!iter) 826 return -ENOMEM; 827 reset_iter(iter, 0); 828 829 /* 830 * Instead of checking this on every s_show() call, cache 831 * the result here at open time. 832 */ 833 iter->show_value = kallsyms_show_value(file->f_cred); 834 return 0; 835 } 836 837 #ifdef CONFIG_KGDB_KDB 838 const char *kdb_walk_kallsyms(loff_t *pos) 839 { 840 static struct kallsym_iter kdb_walk_kallsyms_iter; 841 if (*pos == 0) { 842 memset(&kdb_walk_kallsyms_iter, 0, 843 sizeof(kdb_walk_kallsyms_iter)); 844 reset_iter(&kdb_walk_kallsyms_iter, 0); 845 } 846 while (1) { 847 if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) 848 return NULL; 849 ++*pos; 850 /* Some debugging symbols have no name. Ignore them. */ 851 if (kdb_walk_kallsyms_iter.name[0]) 852 return kdb_walk_kallsyms_iter.name; 853 } 854 } 855 #endif /* CONFIG_KGDB_KDB */ 856 857 static const struct proc_ops kallsyms_proc_ops = { 858 .proc_open = kallsyms_open, 859 .proc_read = seq_read, 860 .proc_lseek = seq_lseek, 861 .proc_release = seq_release_private, 862 }; 863 864 static int __init kallsyms_init(void) 865 { 866 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); 867 return 0; 868 } 869 device_initcall(kallsyms_init); 870