1 #include "builtin.h" 2 #include "perf.h" 3 4 #include "util/evlist.h" 5 #include "util/evsel.h" 6 #include "util/util.h" 7 #include "util/config.h" 8 #include "util/symbol.h" 9 #include "util/thread.h" 10 #include "util/header.h" 11 #include "util/session.h" 12 #include "util/tool.h" 13 #include "util/callchain.h" 14 15 #include <subcmd/parse-options.h> 16 #include "util/trace-event.h" 17 #include "util/data.h" 18 #include "util/cpumap.h" 19 20 #include "util/debug.h" 21 22 #include <linux/rbtree.h> 23 #include <linux/string.h> 24 #include <locale.h> 25 #include <regex.h> 26 27 static int kmem_slab; 28 static int kmem_page; 29 30 static long kmem_page_size; 31 static enum { 32 KMEM_SLAB, 33 KMEM_PAGE, 34 } kmem_default = KMEM_SLAB; /* for backward compatibility */ 35 36 struct alloc_stat; 37 typedef int (*sort_fn_t)(void *, void *); 38 39 static int alloc_flag; 40 static int caller_flag; 41 42 static int alloc_lines = -1; 43 static int caller_lines = -1; 44 45 static bool raw_ip; 46 47 struct alloc_stat { 48 u64 call_site; 49 u64 ptr; 50 u64 bytes_req; 51 u64 bytes_alloc; 52 u32 hit; 53 u32 pingpong; 54 55 short alloc_cpu; 56 57 struct rb_node node; 58 }; 59 60 static struct rb_root root_alloc_stat; 61 static struct rb_root root_alloc_sorted; 62 static struct rb_root root_caller_stat; 63 static struct rb_root root_caller_sorted; 64 65 static unsigned long total_requested, total_allocated; 66 static unsigned long nr_allocs, nr_cross_allocs; 67 68 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, 69 int bytes_req, int bytes_alloc, int cpu) 70 { 71 struct rb_node **node = &root_alloc_stat.rb_node; 72 struct rb_node *parent = NULL; 73 struct alloc_stat *data = NULL; 74 75 while (*node) { 76 parent = *node; 77 data = rb_entry(*node, struct alloc_stat, node); 78 79 if (ptr > data->ptr) 80 node = &(*node)->rb_right; 81 else if (ptr < data->ptr) 82 node = &(*node)->rb_left; 83 else 84 break; 85 } 86 87 if (data && data->ptr == ptr) { 88 data->hit++; 89 data->bytes_req += bytes_req; 90 data->bytes_alloc += bytes_alloc; 91 } else { 92 data = malloc(sizeof(*data)); 93 if (!data) { 94 pr_err("%s: malloc failed\n", __func__); 95 return -1; 96 } 97 data->ptr = ptr; 98 data->pingpong = 0; 99 data->hit = 1; 100 data->bytes_req = bytes_req; 101 data->bytes_alloc = bytes_alloc; 102 103 rb_link_node(&data->node, parent, node); 104 rb_insert_color(&data->node, &root_alloc_stat); 105 } 106 data->call_site = call_site; 107 data->alloc_cpu = cpu; 108 return 0; 109 } 110 111 static int insert_caller_stat(unsigned long call_site, 112 int bytes_req, int bytes_alloc) 113 { 114 struct rb_node **node = &root_caller_stat.rb_node; 115 struct rb_node *parent = NULL; 116 struct alloc_stat *data = NULL; 117 118 while (*node) { 119 parent = *node; 120 data = rb_entry(*node, struct alloc_stat, node); 121 122 if (call_site > data->call_site) 123 node = &(*node)->rb_right; 124 else if (call_site < data->call_site) 125 node = &(*node)->rb_left; 126 else 127 break; 128 } 129 130 if (data && data->call_site == call_site) { 131 data->hit++; 132 data->bytes_req += bytes_req; 133 data->bytes_alloc += bytes_alloc; 134 } else { 135 data = malloc(sizeof(*data)); 136 if (!data) { 137 pr_err("%s: malloc failed\n", __func__); 138 return -1; 139 } 140 data->call_site = call_site; 141 data->pingpong = 0; 142 data->hit = 1; 143 data->bytes_req = bytes_req; 144 data->bytes_alloc = bytes_alloc; 145 146 rb_link_node(&data->node, parent, node); 147 rb_insert_color(&data->node, &root_caller_stat); 148 } 149 150 return 0; 151 } 152 153 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, 154 struct perf_sample *sample) 155 { 156 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), 157 call_site = perf_evsel__intval(evsel, sample, "call_site"); 158 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), 159 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); 160 161 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || 162 insert_caller_stat(call_site, bytes_req, bytes_alloc)) 163 return -1; 164 165 total_requested += bytes_req; 166 total_allocated += bytes_alloc; 167 168 nr_allocs++; 169 return 0; 170 } 171 172 static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, 173 struct perf_sample *sample) 174 { 175 int ret = perf_evsel__process_alloc_event(evsel, sample); 176 177 if (!ret) { 178 int node1 = cpu__get_node(sample->cpu), 179 node2 = perf_evsel__intval(evsel, sample, "node"); 180 181 if (node1 != node2) 182 nr_cross_allocs++; 183 } 184 185 return ret; 186 } 187 188 static int ptr_cmp(void *, void *); 189 static int slab_callsite_cmp(void *, void *); 190 191 static struct alloc_stat *search_alloc_stat(unsigned long ptr, 192 unsigned long call_site, 193 struct rb_root *root, 194 sort_fn_t sort_fn) 195 { 196 struct rb_node *node = root->rb_node; 197 struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; 198 199 while (node) { 200 struct alloc_stat *data; 201 int cmp; 202 203 data = rb_entry(node, struct alloc_stat, node); 204 205 cmp = sort_fn(&key, data); 206 if (cmp < 0) 207 node = node->rb_left; 208 else if (cmp > 0) 209 node = node->rb_right; 210 else 211 return data; 212 } 213 return NULL; 214 } 215 216 static int perf_evsel__process_free_event(struct perf_evsel *evsel, 217 struct perf_sample *sample) 218 { 219 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); 220 struct alloc_stat *s_alloc, *s_caller; 221 222 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); 223 if (!s_alloc) 224 return 0; 225 226 if ((short)sample->cpu != s_alloc->alloc_cpu) { 227 s_alloc->pingpong++; 228 229 s_caller = search_alloc_stat(0, s_alloc->call_site, 230 &root_caller_stat, 231 slab_callsite_cmp); 232 if (!s_caller) 233 return -1; 234 s_caller->pingpong++; 235 } 236 s_alloc->alloc_cpu = -1; 237 238 return 0; 239 } 240 241 static u64 total_page_alloc_bytes; 242 static u64 total_page_free_bytes; 243 static u64 total_page_nomatch_bytes; 244 static u64 total_page_fail_bytes; 245 static unsigned long nr_page_allocs; 246 static unsigned long nr_page_frees; 247 static unsigned long nr_page_fails; 248 static unsigned long nr_page_nomatch; 249 250 static bool use_pfn; 251 static bool live_page; 252 static struct perf_session *kmem_session; 253 254 #define MAX_MIGRATE_TYPES 6 255 #define MAX_PAGE_ORDER 11 256 257 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES]; 258 259 struct page_stat { 260 struct rb_node node; 261 u64 page; 262 u64 callsite; 263 int order; 264 unsigned gfp_flags; 265 unsigned migrate_type; 266 u64 alloc_bytes; 267 u64 free_bytes; 268 int nr_alloc; 269 int nr_free; 270 }; 271 272 static struct rb_root page_live_tree; 273 static struct rb_root page_alloc_tree; 274 static struct rb_root page_alloc_sorted; 275 static struct rb_root page_caller_tree; 276 static struct rb_root page_caller_sorted; 277 278 struct alloc_func { 279 u64 start; 280 u64 end; 281 char *name; 282 }; 283 284 static int nr_alloc_funcs; 285 static struct alloc_func *alloc_func_list; 286 287 static int funcmp(const void *a, const void *b) 288 { 289 const struct alloc_func *fa = a; 290 const struct alloc_func *fb = b; 291 292 if (fa->start > fb->start) 293 return 1; 294 else 295 return -1; 296 } 297 298 static int callcmp(const void *a, const void *b) 299 { 300 const struct alloc_func *fa = a; 301 const struct alloc_func *fb = b; 302 303 if (fb->start <= fa->start && fa->end < fb->end) 304 return 0; 305 306 if (fa->start > fb->start) 307 return 1; 308 else 309 return -1; 310 } 311 312 static int build_alloc_func_list(void) 313 { 314 int ret; 315 struct map *kernel_map; 316 struct symbol *sym; 317 struct rb_node *node; 318 struct alloc_func *func; 319 struct machine *machine = &kmem_session->machines.host; 320 regex_t alloc_func_regex; 321 const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?"; 322 323 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED); 324 if (ret) { 325 char err[BUFSIZ]; 326 327 regerror(ret, &alloc_func_regex, err, sizeof(err)); 328 pr_err("Invalid regex: %s\n%s", pattern, err); 329 return -EINVAL; 330 } 331 332 kernel_map = machine__kernel_map(machine); 333 if (map__load(kernel_map, NULL) < 0) { 334 pr_err("cannot load kernel map\n"); 335 return -ENOENT; 336 } 337 338 map__for_each_symbol(kernel_map, sym, node) { 339 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0)) 340 continue; 341 342 func = realloc(alloc_func_list, 343 (nr_alloc_funcs + 1) * sizeof(*func)); 344 if (func == NULL) 345 return -ENOMEM; 346 347 pr_debug("alloc func: %s\n", sym->name); 348 func[nr_alloc_funcs].start = sym->start; 349 func[nr_alloc_funcs].end = sym->end; 350 func[nr_alloc_funcs].name = sym->name; 351 352 alloc_func_list = func; 353 nr_alloc_funcs++; 354 } 355 356 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp); 357 358 regfree(&alloc_func_regex); 359 return 0; 360 } 361 362 /* 363 * Find first non-memory allocation function from callchain. 364 * The allocation functions are in the 'alloc_func_list'. 365 */ 366 static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample) 367 { 368 struct addr_location al; 369 struct machine *machine = &kmem_session->machines.host; 370 struct callchain_cursor_node *node; 371 372 if (alloc_func_list == NULL) { 373 if (build_alloc_func_list() < 0) 374 goto out; 375 } 376 377 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid); 378 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16); 379 380 callchain_cursor_commit(&callchain_cursor); 381 while (true) { 382 struct alloc_func key, *caller; 383 u64 addr; 384 385 node = callchain_cursor_current(&callchain_cursor); 386 if (node == NULL) 387 break; 388 389 key.start = key.end = node->ip; 390 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs, 391 sizeof(key), callcmp); 392 if (!caller) { 393 /* found */ 394 if (node->map) 395 addr = map__unmap_ip(node->map, node->ip); 396 else 397 addr = node->ip; 398 399 return addr; 400 } else 401 pr_debug3("skipping alloc function: %s\n", caller->name); 402 403 callchain_cursor_advance(&callchain_cursor); 404 } 405 406 out: 407 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip); 408 return sample->ip; 409 } 410 411 struct sort_dimension { 412 const char name[20]; 413 sort_fn_t cmp; 414 struct list_head list; 415 }; 416 417 static LIST_HEAD(page_alloc_sort_input); 418 static LIST_HEAD(page_caller_sort_input); 419 420 static struct page_stat * 421 __page_stat__findnew_page(struct page_stat *pstat, bool create) 422 { 423 struct rb_node **node = &page_live_tree.rb_node; 424 struct rb_node *parent = NULL; 425 struct page_stat *data; 426 427 while (*node) { 428 s64 cmp; 429 430 parent = *node; 431 data = rb_entry(*node, struct page_stat, node); 432 433 cmp = data->page - pstat->page; 434 if (cmp < 0) 435 node = &parent->rb_left; 436 else if (cmp > 0) 437 node = &parent->rb_right; 438 else 439 return data; 440 } 441 442 if (!create) 443 return NULL; 444 445 data = zalloc(sizeof(*data)); 446 if (data != NULL) { 447 data->page = pstat->page; 448 data->order = pstat->order; 449 data->gfp_flags = pstat->gfp_flags; 450 data->migrate_type = pstat->migrate_type; 451 452 rb_link_node(&data->node, parent, node); 453 rb_insert_color(&data->node, &page_live_tree); 454 } 455 456 return data; 457 } 458 459 static struct page_stat *page_stat__find_page(struct page_stat *pstat) 460 { 461 return __page_stat__findnew_page(pstat, false); 462 } 463 464 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat) 465 { 466 return __page_stat__findnew_page(pstat, true); 467 } 468 469 static struct page_stat * 470 __page_stat__findnew_alloc(struct page_stat *pstat, bool create) 471 { 472 struct rb_node **node = &page_alloc_tree.rb_node; 473 struct rb_node *parent = NULL; 474 struct page_stat *data; 475 struct sort_dimension *sort; 476 477 while (*node) { 478 int cmp = 0; 479 480 parent = *node; 481 data = rb_entry(*node, struct page_stat, node); 482 483 list_for_each_entry(sort, &page_alloc_sort_input, list) { 484 cmp = sort->cmp(pstat, data); 485 if (cmp) 486 break; 487 } 488 489 if (cmp < 0) 490 node = &parent->rb_left; 491 else if (cmp > 0) 492 node = &parent->rb_right; 493 else 494 return data; 495 } 496 497 if (!create) 498 return NULL; 499 500 data = zalloc(sizeof(*data)); 501 if (data != NULL) { 502 data->page = pstat->page; 503 data->order = pstat->order; 504 data->gfp_flags = pstat->gfp_flags; 505 data->migrate_type = pstat->migrate_type; 506 507 rb_link_node(&data->node, parent, node); 508 rb_insert_color(&data->node, &page_alloc_tree); 509 } 510 511 return data; 512 } 513 514 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat) 515 { 516 return __page_stat__findnew_alloc(pstat, false); 517 } 518 519 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat) 520 { 521 return __page_stat__findnew_alloc(pstat, true); 522 } 523 524 static struct page_stat * 525 __page_stat__findnew_caller(struct page_stat *pstat, bool create) 526 { 527 struct rb_node **node = &page_caller_tree.rb_node; 528 struct rb_node *parent = NULL; 529 struct page_stat *data; 530 struct sort_dimension *sort; 531 532 while (*node) { 533 int cmp = 0; 534 535 parent = *node; 536 data = rb_entry(*node, struct page_stat, node); 537 538 list_for_each_entry(sort, &page_caller_sort_input, list) { 539 cmp = sort->cmp(pstat, data); 540 if (cmp) 541 break; 542 } 543 544 if (cmp < 0) 545 node = &parent->rb_left; 546 else if (cmp > 0) 547 node = &parent->rb_right; 548 else 549 return data; 550 } 551 552 if (!create) 553 return NULL; 554 555 data = zalloc(sizeof(*data)); 556 if (data != NULL) { 557 data->callsite = pstat->callsite; 558 data->order = pstat->order; 559 data->gfp_flags = pstat->gfp_flags; 560 data->migrate_type = pstat->migrate_type; 561 562 rb_link_node(&data->node, parent, node); 563 rb_insert_color(&data->node, &page_caller_tree); 564 } 565 566 return data; 567 } 568 569 static struct page_stat *page_stat__find_caller(struct page_stat *pstat) 570 { 571 return __page_stat__findnew_caller(pstat, false); 572 } 573 574 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat) 575 { 576 return __page_stat__findnew_caller(pstat, true); 577 } 578 579 static bool valid_page(u64 pfn_or_page) 580 { 581 if (use_pfn && pfn_or_page == -1UL) 582 return false; 583 if (!use_pfn && pfn_or_page == 0) 584 return false; 585 return true; 586 } 587 588 struct gfp_flag { 589 unsigned int flags; 590 char *compact_str; 591 char *human_readable; 592 }; 593 594 static struct gfp_flag *gfps; 595 static int nr_gfps; 596 597 static int gfpcmp(const void *a, const void *b) 598 { 599 const struct gfp_flag *fa = a; 600 const struct gfp_flag *fb = b; 601 602 return fa->flags - fb->flags; 603 } 604 605 /* see include/trace/events/mmflags.h */ 606 static const struct { 607 const char *original; 608 const char *compact; 609 } gfp_compact_table[] = { 610 { "GFP_TRANSHUGE", "THP" }, 611 { "GFP_TRANSHUGE_LIGHT", "THL" }, 612 { "GFP_HIGHUSER_MOVABLE", "HUM" }, 613 { "GFP_HIGHUSER", "HU" }, 614 { "GFP_USER", "U" }, 615 { "GFP_TEMPORARY", "TMP" }, 616 { "GFP_KERNEL_ACCOUNT", "KAC" }, 617 { "GFP_KERNEL", "K" }, 618 { "GFP_NOFS", "NF" }, 619 { "GFP_ATOMIC", "A" }, 620 { "GFP_NOIO", "NI" }, 621 { "GFP_NOWAIT", "NW" }, 622 { "GFP_DMA", "D" }, 623 { "__GFP_HIGHMEM", "HM" }, 624 { "GFP_DMA32", "D32" }, 625 { "__GFP_HIGH", "H" }, 626 { "__GFP_ATOMIC", "_A" }, 627 { "__GFP_IO", "I" }, 628 { "__GFP_FS", "F" }, 629 { "__GFP_COLD", "CO" }, 630 { "__GFP_NOWARN", "NWR" }, 631 { "__GFP_REPEAT", "R" }, 632 { "__GFP_NOFAIL", "NF" }, 633 { "__GFP_NORETRY", "NR" }, 634 { "__GFP_COMP", "C" }, 635 { "__GFP_ZERO", "Z" }, 636 { "__GFP_NOMEMALLOC", "NMA" }, 637 { "__GFP_MEMALLOC", "MA" }, 638 { "__GFP_HARDWALL", "HW" }, 639 { "__GFP_THISNODE", "TN" }, 640 { "__GFP_RECLAIMABLE", "RC" }, 641 { "__GFP_MOVABLE", "M" }, 642 { "__GFP_ACCOUNT", "AC" }, 643 { "__GFP_NOTRACK", "NT" }, 644 { "__GFP_WRITE", "WR" }, 645 { "__GFP_RECLAIM", "R" }, 646 { "__GFP_DIRECT_RECLAIM", "DR" }, 647 { "__GFP_KSWAPD_RECLAIM", "KR" }, 648 { "__GFP_OTHER_NODE", "ON" }, 649 }; 650 651 static size_t max_gfp_len; 652 653 static char *compact_gfp_flags(char *gfp_flags) 654 { 655 char *orig_flags = strdup(gfp_flags); 656 char *new_flags = NULL; 657 char *str, *pos = NULL; 658 size_t len = 0; 659 660 if (orig_flags == NULL) 661 return NULL; 662 663 str = strtok_r(orig_flags, "|", &pos); 664 while (str) { 665 size_t i; 666 char *new; 667 const char *cpt; 668 669 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) { 670 if (strcmp(gfp_compact_table[i].original, str)) 671 continue; 672 673 cpt = gfp_compact_table[i].compact; 674 new = realloc(new_flags, len + strlen(cpt) + 2); 675 if (new == NULL) { 676 free(new_flags); 677 return NULL; 678 } 679 680 new_flags = new; 681 682 if (!len) { 683 strcpy(new_flags, cpt); 684 } else { 685 strcat(new_flags, "|"); 686 strcat(new_flags, cpt); 687 len++; 688 } 689 690 len += strlen(cpt); 691 } 692 693 str = strtok_r(NULL, "|", &pos); 694 } 695 696 if (max_gfp_len < len) 697 max_gfp_len = len; 698 699 free(orig_flags); 700 return new_flags; 701 } 702 703 static char *compact_gfp_string(unsigned long gfp_flags) 704 { 705 struct gfp_flag key = { 706 .flags = gfp_flags, 707 }; 708 struct gfp_flag *gfp; 709 710 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp); 711 if (gfp) 712 return gfp->compact_str; 713 714 return NULL; 715 } 716 717 static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample, 718 unsigned int gfp_flags) 719 { 720 struct pevent_record record = { 721 .cpu = sample->cpu, 722 .data = sample->raw_data, 723 .size = sample->raw_size, 724 }; 725 struct trace_seq seq; 726 char *str, *pos = NULL; 727 728 if (nr_gfps) { 729 struct gfp_flag key = { 730 .flags = gfp_flags, 731 }; 732 733 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp)) 734 return 0; 735 } 736 737 trace_seq_init(&seq); 738 pevent_event_info(&seq, evsel->tp_format, &record); 739 740 str = strtok_r(seq.buffer, " ", &pos); 741 while (str) { 742 if (!strncmp(str, "gfp_flags=", 10)) { 743 struct gfp_flag *new; 744 745 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps)); 746 if (new == NULL) 747 return -ENOMEM; 748 749 gfps = new; 750 new += nr_gfps++; 751 752 new->flags = gfp_flags; 753 new->human_readable = strdup(str + 10); 754 new->compact_str = compact_gfp_flags(str + 10); 755 if (!new->human_readable || !new->compact_str) 756 return -ENOMEM; 757 758 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp); 759 } 760 761 str = strtok_r(NULL, " ", &pos); 762 } 763 764 trace_seq_destroy(&seq); 765 return 0; 766 } 767 768 static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, 769 struct perf_sample *sample) 770 { 771 u64 page; 772 unsigned int order = perf_evsel__intval(evsel, sample, "order"); 773 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); 774 unsigned int migrate_type = perf_evsel__intval(evsel, sample, 775 "migratetype"); 776 u64 bytes = kmem_page_size << order; 777 u64 callsite; 778 struct page_stat *pstat; 779 struct page_stat this = { 780 .order = order, 781 .gfp_flags = gfp_flags, 782 .migrate_type = migrate_type, 783 }; 784 785 if (use_pfn) 786 page = perf_evsel__intval(evsel, sample, "pfn"); 787 else 788 page = perf_evsel__intval(evsel, sample, "page"); 789 790 nr_page_allocs++; 791 total_page_alloc_bytes += bytes; 792 793 if (!valid_page(page)) { 794 nr_page_fails++; 795 total_page_fail_bytes += bytes; 796 797 return 0; 798 } 799 800 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0) 801 return -1; 802 803 callsite = find_callsite(evsel, sample); 804 805 /* 806 * This is to find the current page (with correct gfp flags and 807 * migrate type) at free event. 808 */ 809 this.page = page; 810 pstat = page_stat__findnew_page(&this); 811 if (pstat == NULL) 812 return -ENOMEM; 813 814 pstat->nr_alloc++; 815 pstat->alloc_bytes += bytes; 816 pstat->callsite = callsite; 817 818 if (!live_page) { 819 pstat = page_stat__findnew_alloc(&this); 820 if (pstat == NULL) 821 return -ENOMEM; 822 823 pstat->nr_alloc++; 824 pstat->alloc_bytes += bytes; 825 pstat->callsite = callsite; 826 } 827 828 this.callsite = callsite; 829 pstat = page_stat__findnew_caller(&this); 830 if (pstat == NULL) 831 return -ENOMEM; 832 833 pstat->nr_alloc++; 834 pstat->alloc_bytes += bytes; 835 836 order_stats[order][migrate_type]++; 837 838 return 0; 839 } 840 841 static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, 842 struct perf_sample *sample) 843 { 844 u64 page; 845 unsigned int order = perf_evsel__intval(evsel, sample, "order"); 846 u64 bytes = kmem_page_size << order; 847 struct page_stat *pstat; 848 struct page_stat this = { 849 .order = order, 850 }; 851 852 if (use_pfn) 853 page = perf_evsel__intval(evsel, sample, "pfn"); 854 else 855 page = perf_evsel__intval(evsel, sample, "page"); 856 857 nr_page_frees++; 858 total_page_free_bytes += bytes; 859 860 this.page = page; 861 pstat = page_stat__find_page(&this); 862 if (pstat == NULL) { 863 pr_debug2("missing free at page %"PRIx64" (order: %d)\n", 864 page, order); 865 866 nr_page_nomatch++; 867 total_page_nomatch_bytes += bytes; 868 869 return 0; 870 } 871 872 this.gfp_flags = pstat->gfp_flags; 873 this.migrate_type = pstat->migrate_type; 874 this.callsite = pstat->callsite; 875 876 rb_erase(&pstat->node, &page_live_tree); 877 free(pstat); 878 879 if (live_page) { 880 order_stats[this.order][this.migrate_type]--; 881 } else { 882 pstat = page_stat__find_alloc(&this); 883 if (pstat == NULL) 884 return -ENOMEM; 885 886 pstat->nr_free++; 887 pstat->free_bytes += bytes; 888 } 889 890 pstat = page_stat__find_caller(&this); 891 if (pstat == NULL) 892 return -ENOENT; 893 894 pstat->nr_free++; 895 pstat->free_bytes += bytes; 896 897 if (live_page) { 898 pstat->nr_alloc--; 899 pstat->alloc_bytes -= bytes; 900 901 if (pstat->nr_alloc == 0) { 902 rb_erase(&pstat->node, &page_caller_tree); 903 free(pstat); 904 } 905 } 906 907 return 0; 908 } 909 910 typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 911 struct perf_sample *sample); 912 913 static int process_sample_event(struct perf_tool *tool __maybe_unused, 914 union perf_event *event, 915 struct perf_sample *sample, 916 struct perf_evsel *evsel, 917 struct machine *machine) 918 { 919 int err = 0; 920 struct thread *thread = machine__findnew_thread(machine, sample->pid, 921 sample->tid); 922 923 if (thread == NULL) { 924 pr_debug("problem processing %d event, skipping it.\n", 925 event->header.type); 926 return -1; 927 } 928 929 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 930 931 if (evsel->handler != NULL) { 932 tracepoint_handler f = evsel->handler; 933 err = f(evsel, sample); 934 } 935 936 thread__put(thread); 937 938 return err; 939 } 940 941 static struct perf_tool perf_kmem = { 942 .sample = process_sample_event, 943 .comm = perf_event__process_comm, 944 .mmap = perf_event__process_mmap, 945 .mmap2 = perf_event__process_mmap2, 946 .ordered_events = true, 947 }; 948 949 static double fragmentation(unsigned long n_req, unsigned long n_alloc) 950 { 951 if (n_alloc == 0) 952 return 0.0; 953 else 954 return 100.0 - (100.0 * n_req / n_alloc); 955 } 956 957 static void __print_slab_result(struct rb_root *root, 958 struct perf_session *session, 959 int n_lines, int is_caller) 960 { 961 struct rb_node *next; 962 struct machine *machine = &session->machines.host; 963 964 printf("%.105s\n", graph_dotted_line); 965 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); 966 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); 967 printf("%.105s\n", graph_dotted_line); 968 969 next = rb_first(root); 970 971 while (next && n_lines--) { 972 struct alloc_stat *data = rb_entry(next, struct alloc_stat, 973 node); 974 struct symbol *sym = NULL; 975 struct map *map; 976 char buf[BUFSIZ]; 977 u64 addr; 978 979 if (is_caller) { 980 addr = data->call_site; 981 if (!raw_ip) 982 sym = machine__find_kernel_function(machine, addr, &map, NULL); 983 } else 984 addr = data->ptr; 985 986 if (sym != NULL) 987 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name, 988 addr - map->unmap_ip(map, sym->start)); 989 else 990 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr); 991 printf(" %-34s |", buf); 992 993 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n", 994 (unsigned long long)data->bytes_alloc, 995 (unsigned long)data->bytes_alloc / data->hit, 996 (unsigned long long)data->bytes_req, 997 (unsigned long)data->bytes_req / data->hit, 998 (unsigned long)data->hit, 999 (unsigned long)data->pingpong, 1000 fragmentation(data->bytes_req, data->bytes_alloc)); 1001 1002 next = rb_next(next); 1003 } 1004 1005 if (n_lines == -1) 1006 printf(" ... | ... | ... | ... | ... | ... \n"); 1007 1008 printf("%.105s\n", graph_dotted_line); 1009 } 1010 1011 static const char * const migrate_type_str[] = { 1012 "UNMOVABL", 1013 "RECLAIM", 1014 "MOVABLE", 1015 "RESERVED", 1016 "CMA/ISLT", 1017 "UNKNOWN", 1018 }; 1019 1020 static void __print_page_alloc_result(struct perf_session *session, int n_lines) 1021 { 1022 struct rb_node *next = rb_first(&page_alloc_sorted); 1023 struct machine *machine = &session->machines.host; 1024 const char *format; 1025 int gfp_len = max(strlen("GFP flags"), max_gfp_len); 1026 1027 printf("\n%.105s\n", graph_dotted_line); 1028 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n", 1029 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total", 1030 gfp_len, "GFP flags"); 1031 printf("%.105s\n", graph_dotted_line); 1032 1033 if (use_pfn) 1034 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n"; 1035 else 1036 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n"; 1037 1038 while (next && n_lines--) { 1039 struct page_stat *data; 1040 struct symbol *sym; 1041 struct map *map; 1042 char buf[32]; 1043 char *caller = buf; 1044 1045 data = rb_entry(next, struct page_stat, node); 1046 sym = machine__find_kernel_function(machine, data->callsite, 1047 &map, NULL); 1048 if (sym && sym->name) 1049 caller = sym->name; 1050 else 1051 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite); 1052 1053 printf(format, (unsigned long long)data->page, 1054 (unsigned long long)data->alloc_bytes / 1024, 1055 data->nr_alloc, data->order, 1056 migrate_type_str[data->migrate_type], 1057 gfp_len, compact_gfp_string(data->gfp_flags), caller); 1058 1059 next = rb_next(next); 1060 } 1061 1062 if (n_lines == -1) { 1063 printf(" ... | ... | ... | ... | ... | %-*s | ...\n", 1064 gfp_len, "..."); 1065 } 1066 1067 printf("%.105s\n", graph_dotted_line); 1068 } 1069 1070 static void __print_page_caller_result(struct perf_session *session, int n_lines) 1071 { 1072 struct rb_node *next = rb_first(&page_caller_sorted); 1073 struct machine *machine = &session->machines.host; 1074 int gfp_len = max(strlen("GFP flags"), max_gfp_len); 1075 1076 printf("\n%.105s\n", graph_dotted_line); 1077 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n", 1078 live_page ? "Live" : "Total", gfp_len, "GFP flags"); 1079 printf("%.105s\n", graph_dotted_line); 1080 1081 while (next && n_lines--) { 1082 struct page_stat *data; 1083 struct symbol *sym; 1084 struct map *map; 1085 char buf[32]; 1086 char *caller = buf; 1087 1088 data = rb_entry(next, struct page_stat, node); 1089 sym = machine__find_kernel_function(machine, data->callsite, 1090 &map, NULL); 1091 if (sym && sym->name) 1092 caller = sym->name; 1093 else 1094 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite); 1095 1096 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n", 1097 (unsigned long long)data->alloc_bytes / 1024, 1098 data->nr_alloc, data->order, 1099 migrate_type_str[data->migrate_type], 1100 gfp_len, compact_gfp_string(data->gfp_flags), caller); 1101 1102 next = rb_next(next); 1103 } 1104 1105 if (n_lines == -1) { 1106 printf(" ... | ... | ... | ... | %-*s | ...\n", 1107 gfp_len, "..."); 1108 } 1109 1110 printf("%.105s\n", graph_dotted_line); 1111 } 1112 1113 static void print_gfp_flags(void) 1114 { 1115 int i; 1116 1117 printf("#\n"); 1118 printf("# GFP flags\n"); 1119 printf("# ---------\n"); 1120 for (i = 0; i < nr_gfps; i++) { 1121 printf("# %08x: %*s: %s\n", gfps[i].flags, 1122 (int) max_gfp_len, gfps[i].compact_str, 1123 gfps[i].human_readable); 1124 } 1125 } 1126 1127 static void print_slab_summary(void) 1128 { 1129 printf("\nSUMMARY (SLAB allocator)"); 1130 printf("\n========================\n"); 1131 printf("Total bytes requested: %'lu\n", total_requested); 1132 printf("Total bytes allocated: %'lu\n", total_allocated); 1133 printf("Total bytes wasted on internal fragmentation: %'lu\n", 1134 total_allocated - total_requested); 1135 printf("Internal fragmentation: %f%%\n", 1136 fragmentation(total_requested, total_allocated)); 1137 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs); 1138 } 1139 1140 static void print_page_summary(void) 1141 { 1142 int o, m; 1143 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch; 1144 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes; 1145 1146 printf("\nSUMMARY (page allocator)"); 1147 printf("\n========================\n"); 1148 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests", 1149 nr_page_allocs, total_page_alloc_bytes / 1024); 1150 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests", 1151 nr_page_frees, total_page_free_bytes / 1024); 1152 printf("\n"); 1153 1154 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", 1155 nr_alloc_freed, (total_alloc_freed_bytes) / 1024); 1156 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", 1157 nr_page_allocs - nr_alloc_freed, 1158 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); 1159 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", 1160 nr_page_nomatch, total_page_nomatch_bytes / 1024); 1161 printf("\n"); 1162 1163 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures", 1164 nr_page_fails, total_page_fail_bytes / 1024); 1165 printf("\n"); 1166 1167 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable", 1168 "Reclaimable", "Movable", "Reserved", "CMA/Isolated"); 1169 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line, 1170 graph_dotted_line, graph_dotted_line, graph_dotted_line, 1171 graph_dotted_line, graph_dotted_line); 1172 1173 for (o = 0; o < MAX_PAGE_ORDER; o++) { 1174 printf("%5d", o); 1175 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) { 1176 if (order_stats[o][m]) 1177 printf(" %'12d", order_stats[o][m]); 1178 else 1179 printf(" %12c", '.'); 1180 } 1181 printf("\n"); 1182 } 1183 } 1184 1185 static void print_slab_result(struct perf_session *session) 1186 { 1187 if (caller_flag) 1188 __print_slab_result(&root_caller_sorted, session, caller_lines, 1); 1189 if (alloc_flag) 1190 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0); 1191 print_slab_summary(); 1192 } 1193 1194 static void print_page_result(struct perf_session *session) 1195 { 1196 if (caller_flag || alloc_flag) 1197 print_gfp_flags(); 1198 if (caller_flag) 1199 __print_page_caller_result(session, caller_lines); 1200 if (alloc_flag) 1201 __print_page_alloc_result(session, alloc_lines); 1202 print_page_summary(); 1203 } 1204 1205 static void print_result(struct perf_session *session) 1206 { 1207 if (kmem_slab) 1208 print_slab_result(session); 1209 if (kmem_page) 1210 print_page_result(session); 1211 } 1212 1213 static LIST_HEAD(slab_caller_sort); 1214 static LIST_HEAD(slab_alloc_sort); 1215 static LIST_HEAD(page_caller_sort); 1216 static LIST_HEAD(page_alloc_sort); 1217 1218 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data, 1219 struct list_head *sort_list) 1220 { 1221 struct rb_node **new = &(root->rb_node); 1222 struct rb_node *parent = NULL; 1223 struct sort_dimension *sort; 1224 1225 while (*new) { 1226 struct alloc_stat *this; 1227 int cmp = 0; 1228 1229 this = rb_entry(*new, struct alloc_stat, node); 1230 parent = *new; 1231 1232 list_for_each_entry(sort, sort_list, list) { 1233 cmp = sort->cmp(data, this); 1234 if (cmp) 1235 break; 1236 } 1237 1238 if (cmp > 0) 1239 new = &((*new)->rb_left); 1240 else 1241 new = &((*new)->rb_right); 1242 } 1243 1244 rb_link_node(&data->node, parent, new); 1245 rb_insert_color(&data->node, root); 1246 } 1247 1248 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted, 1249 struct list_head *sort_list) 1250 { 1251 struct rb_node *node; 1252 struct alloc_stat *data; 1253 1254 for (;;) { 1255 node = rb_first(root); 1256 if (!node) 1257 break; 1258 1259 rb_erase(node, root); 1260 data = rb_entry(node, struct alloc_stat, node); 1261 sort_slab_insert(root_sorted, data, sort_list); 1262 } 1263 } 1264 1265 static void sort_page_insert(struct rb_root *root, struct page_stat *data, 1266 struct list_head *sort_list) 1267 { 1268 struct rb_node **new = &root->rb_node; 1269 struct rb_node *parent = NULL; 1270 struct sort_dimension *sort; 1271 1272 while (*new) { 1273 struct page_stat *this; 1274 int cmp = 0; 1275 1276 this = rb_entry(*new, struct page_stat, node); 1277 parent = *new; 1278 1279 list_for_each_entry(sort, sort_list, list) { 1280 cmp = sort->cmp(data, this); 1281 if (cmp) 1282 break; 1283 } 1284 1285 if (cmp > 0) 1286 new = &parent->rb_left; 1287 else 1288 new = &parent->rb_right; 1289 } 1290 1291 rb_link_node(&data->node, parent, new); 1292 rb_insert_color(&data->node, root); 1293 } 1294 1295 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted, 1296 struct list_head *sort_list) 1297 { 1298 struct rb_node *node; 1299 struct page_stat *data; 1300 1301 for (;;) { 1302 node = rb_first(root); 1303 if (!node) 1304 break; 1305 1306 rb_erase(node, root); 1307 data = rb_entry(node, struct page_stat, node); 1308 sort_page_insert(root_sorted, data, sort_list); 1309 } 1310 } 1311 1312 static void sort_result(void) 1313 { 1314 if (kmem_slab) { 1315 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted, 1316 &slab_alloc_sort); 1317 __sort_slab_result(&root_caller_stat, &root_caller_sorted, 1318 &slab_caller_sort); 1319 } 1320 if (kmem_page) { 1321 if (live_page) 1322 __sort_page_result(&page_live_tree, &page_alloc_sorted, 1323 &page_alloc_sort); 1324 else 1325 __sort_page_result(&page_alloc_tree, &page_alloc_sorted, 1326 &page_alloc_sort); 1327 1328 __sort_page_result(&page_caller_tree, &page_caller_sorted, 1329 &page_caller_sort); 1330 } 1331 } 1332 1333 static int __cmd_kmem(struct perf_session *session) 1334 { 1335 int err = -EINVAL; 1336 struct perf_evsel *evsel; 1337 const struct perf_evsel_str_handler kmem_tracepoints[] = { 1338 /* slab allocator */ 1339 { "kmem:kmalloc", perf_evsel__process_alloc_event, }, 1340 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, 1341 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, 1342 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, 1343 { "kmem:kfree", perf_evsel__process_free_event, }, 1344 { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, 1345 /* page allocator */ 1346 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, }, 1347 { "kmem:mm_page_free", perf_evsel__process_page_free_event, }, 1348 }; 1349 1350 if (!perf_session__has_traces(session, "kmem record")) 1351 goto out; 1352 1353 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { 1354 pr_err("Initializing perf session tracepoint handlers failed\n"); 1355 goto out; 1356 } 1357 1358 evlist__for_each_entry(session->evlist, evsel) { 1359 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") && 1360 perf_evsel__field(evsel, "pfn")) { 1361 use_pfn = true; 1362 break; 1363 } 1364 } 1365 1366 setup_pager(); 1367 err = perf_session__process_events(session); 1368 if (err != 0) { 1369 pr_err("error during process events: %d\n", err); 1370 goto out; 1371 } 1372 sort_result(); 1373 print_result(session); 1374 out: 1375 return err; 1376 } 1377 1378 /* slab sort keys */ 1379 static int ptr_cmp(void *a, void *b) 1380 { 1381 struct alloc_stat *l = a; 1382 struct alloc_stat *r = b; 1383 1384 if (l->ptr < r->ptr) 1385 return -1; 1386 else if (l->ptr > r->ptr) 1387 return 1; 1388 return 0; 1389 } 1390 1391 static struct sort_dimension ptr_sort_dimension = { 1392 .name = "ptr", 1393 .cmp = ptr_cmp, 1394 }; 1395 1396 static int slab_callsite_cmp(void *a, void *b) 1397 { 1398 struct alloc_stat *l = a; 1399 struct alloc_stat *r = b; 1400 1401 if (l->call_site < r->call_site) 1402 return -1; 1403 else if (l->call_site > r->call_site) 1404 return 1; 1405 return 0; 1406 } 1407 1408 static struct sort_dimension callsite_sort_dimension = { 1409 .name = "callsite", 1410 .cmp = slab_callsite_cmp, 1411 }; 1412 1413 static int hit_cmp(void *a, void *b) 1414 { 1415 struct alloc_stat *l = a; 1416 struct alloc_stat *r = b; 1417 1418 if (l->hit < r->hit) 1419 return -1; 1420 else if (l->hit > r->hit) 1421 return 1; 1422 return 0; 1423 } 1424 1425 static struct sort_dimension hit_sort_dimension = { 1426 .name = "hit", 1427 .cmp = hit_cmp, 1428 }; 1429 1430 static int bytes_cmp(void *a, void *b) 1431 { 1432 struct alloc_stat *l = a; 1433 struct alloc_stat *r = b; 1434 1435 if (l->bytes_alloc < r->bytes_alloc) 1436 return -1; 1437 else if (l->bytes_alloc > r->bytes_alloc) 1438 return 1; 1439 return 0; 1440 } 1441 1442 static struct sort_dimension bytes_sort_dimension = { 1443 .name = "bytes", 1444 .cmp = bytes_cmp, 1445 }; 1446 1447 static int frag_cmp(void *a, void *b) 1448 { 1449 double x, y; 1450 struct alloc_stat *l = a; 1451 struct alloc_stat *r = b; 1452 1453 x = fragmentation(l->bytes_req, l->bytes_alloc); 1454 y = fragmentation(r->bytes_req, r->bytes_alloc); 1455 1456 if (x < y) 1457 return -1; 1458 else if (x > y) 1459 return 1; 1460 return 0; 1461 } 1462 1463 static struct sort_dimension frag_sort_dimension = { 1464 .name = "frag", 1465 .cmp = frag_cmp, 1466 }; 1467 1468 static int pingpong_cmp(void *a, void *b) 1469 { 1470 struct alloc_stat *l = a; 1471 struct alloc_stat *r = b; 1472 1473 if (l->pingpong < r->pingpong) 1474 return -1; 1475 else if (l->pingpong > r->pingpong) 1476 return 1; 1477 return 0; 1478 } 1479 1480 static struct sort_dimension pingpong_sort_dimension = { 1481 .name = "pingpong", 1482 .cmp = pingpong_cmp, 1483 }; 1484 1485 /* page sort keys */ 1486 static int page_cmp(void *a, void *b) 1487 { 1488 struct page_stat *l = a; 1489 struct page_stat *r = b; 1490 1491 if (l->page < r->page) 1492 return -1; 1493 else if (l->page > r->page) 1494 return 1; 1495 return 0; 1496 } 1497 1498 static struct sort_dimension page_sort_dimension = { 1499 .name = "page", 1500 .cmp = page_cmp, 1501 }; 1502 1503 static int page_callsite_cmp(void *a, void *b) 1504 { 1505 struct page_stat *l = a; 1506 struct page_stat *r = b; 1507 1508 if (l->callsite < r->callsite) 1509 return -1; 1510 else if (l->callsite > r->callsite) 1511 return 1; 1512 return 0; 1513 } 1514 1515 static struct sort_dimension page_callsite_sort_dimension = { 1516 .name = "callsite", 1517 .cmp = page_callsite_cmp, 1518 }; 1519 1520 static int page_hit_cmp(void *a, void *b) 1521 { 1522 struct page_stat *l = a; 1523 struct page_stat *r = b; 1524 1525 if (l->nr_alloc < r->nr_alloc) 1526 return -1; 1527 else if (l->nr_alloc > r->nr_alloc) 1528 return 1; 1529 return 0; 1530 } 1531 1532 static struct sort_dimension page_hit_sort_dimension = { 1533 .name = "hit", 1534 .cmp = page_hit_cmp, 1535 }; 1536 1537 static int page_bytes_cmp(void *a, void *b) 1538 { 1539 struct page_stat *l = a; 1540 struct page_stat *r = b; 1541 1542 if (l->alloc_bytes < r->alloc_bytes) 1543 return -1; 1544 else if (l->alloc_bytes > r->alloc_bytes) 1545 return 1; 1546 return 0; 1547 } 1548 1549 static struct sort_dimension page_bytes_sort_dimension = { 1550 .name = "bytes", 1551 .cmp = page_bytes_cmp, 1552 }; 1553 1554 static int page_order_cmp(void *a, void *b) 1555 { 1556 struct page_stat *l = a; 1557 struct page_stat *r = b; 1558 1559 if (l->order < r->order) 1560 return -1; 1561 else if (l->order > r->order) 1562 return 1; 1563 return 0; 1564 } 1565 1566 static struct sort_dimension page_order_sort_dimension = { 1567 .name = "order", 1568 .cmp = page_order_cmp, 1569 }; 1570 1571 static int migrate_type_cmp(void *a, void *b) 1572 { 1573 struct page_stat *l = a; 1574 struct page_stat *r = b; 1575 1576 /* for internal use to find free'd page */ 1577 if (l->migrate_type == -1U) 1578 return 0; 1579 1580 if (l->migrate_type < r->migrate_type) 1581 return -1; 1582 else if (l->migrate_type > r->migrate_type) 1583 return 1; 1584 return 0; 1585 } 1586 1587 static struct sort_dimension migrate_type_sort_dimension = { 1588 .name = "migtype", 1589 .cmp = migrate_type_cmp, 1590 }; 1591 1592 static int gfp_flags_cmp(void *a, void *b) 1593 { 1594 struct page_stat *l = a; 1595 struct page_stat *r = b; 1596 1597 /* for internal use to find free'd page */ 1598 if (l->gfp_flags == -1U) 1599 return 0; 1600 1601 if (l->gfp_flags < r->gfp_flags) 1602 return -1; 1603 else if (l->gfp_flags > r->gfp_flags) 1604 return 1; 1605 return 0; 1606 } 1607 1608 static struct sort_dimension gfp_flags_sort_dimension = { 1609 .name = "gfp", 1610 .cmp = gfp_flags_cmp, 1611 }; 1612 1613 static struct sort_dimension *slab_sorts[] = { 1614 &ptr_sort_dimension, 1615 &callsite_sort_dimension, 1616 &hit_sort_dimension, 1617 &bytes_sort_dimension, 1618 &frag_sort_dimension, 1619 &pingpong_sort_dimension, 1620 }; 1621 1622 static struct sort_dimension *page_sorts[] = { 1623 &page_sort_dimension, 1624 &page_callsite_sort_dimension, 1625 &page_hit_sort_dimension, 1626 &page_bytes_sort_dimension, 1627 &page_order_sort_dimension, 1628 &migrate_type_sort_dimension, 1629 &gfp_flags_sort_dimension, 1630 }; 1631 1632 static int slab_sort_dimension__add(const char *tok, struct list_head *list) 1633 { 1634 struct sort_dimension *sort; 1635 int i; 1636 1637 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) { 1638 if (!strcmp(slab_sorts[i]->name, tok)) { 1639 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i])); 1640 if (!sort) { 1641 pr_err("%s: memdup failed\n", __func__); 1642 return -1; 1643 } 1644 list_add_tail(&sort->list, list); 1645 return 0; 1646 } 1647 } 1648 1649 return -1; 1650 } 1651 1652 static int page_sort_dimension__add(const char *tok, struct list_head *list) 1653 { 1654 struct sort_dimension *sort; 1655 int i; 1656 1657 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) { 1658 if (!strcmp(page_sorts[i]->name, tok)) { 1659 sort = memdup(page_sorts[i], sizeof(*page_sorts[i])); 1660 if (!sort) { 1661 pr_err("%s: memdup failed\n", __func__); 1662 return -1; 1663 } 1664 list_add_tail(&sort->list, list); 1665 return 0; 1666 } 1667 } 1668 1669 return -1; 1670 } 1671 1672 static int setup_slab_sorting(struct list_head *sort_list, const char *arg) 1673 { 1674 char *tok; 1675 char *str = strdup(arg); 1676 char *pos = str; 1677 1678 if (!str) { 1679 pr_err("%s: strdup failed\n", __func__); 1680 return -1; 1681 } 1682 1683 while (true) { 1684 tok = strsep(&pos, ","); 1685 if (!tok) 1686 break; 1687 if (slab_sort_dimension__add(tok, sort_list) < 0) { 1688 error("Unknown slab --sort key: '%s'", tok); 1689 free(str); 1690 return -1; 1691 } 1692 } 1693 1694 free(str); 1695 return 0; 1696 } 1697 1698 static int setup_page_sorting(struct list_head *sort_list, const char *arg) 1699 { 1700 char *tok; 1701 char *str = strdup(arg); 1702 char *pos = str; 1703 1704 if (!str) { 1705 pr_err("%s: strdup failed\n", __func__); 1706 return -1; 1707 } 1708 1709 while (true) { 1710 tok = strsep(&pos, ","); 1711 if (!tok) 1712 break; 1713 if (page_sort_dimension__add(tok, sort_list) < 0) { 1714 error("Unknown page --sort key: '%s'", tok); 1715 free(str); 1716 return -1; 1717 } 1718 } 1719 1720 free(str); 1721 return 0; 1722 } 1723 1724 static int parse_sort_opt(const struct option *opt __maybe_unused, 1725 const char *arg, int unset __maybe_unused) 1726 { 1727 if (!arg) 1728 return -1; 1729 1730 if (kmem_page > kmem_slab || 1731 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) { 1732 if (caller_flag > alloc_flag) 1733 return setup_page_sorting(&page_caller_sort, arg); 1734 else 1735 return setup_page_sorting(&page_alloc_sort, arg); 1736 } else { 1737 if (caller_flag > alloc_flag) 1738 return setup_slab_sorting(&slab_caller_sort, arg); 1739 else 1740 return setup_slab_sorting(&slab_alloc_sort, arg); 1741 } 1742 1743 return 0; 1744 } 1745 1746 static int parse_caller_opt(const struct option *opt __maybe_unused, 1747 const char *arg __maybe_unused, 1748 int unset __maybe_unused) 1749 { 1750 caller_flag = (alloc_flag + 1); 1751 return 0; 1752 } 1753 1754 static int parse_alloc_opt(const struct option *opt __maybe_unused, 1755 const char *arg __maybe_unused, 1756 int unset __maybe_unused) 1757 { 1758 alloc_flag = (caller_flag + 1); 1759 return 0; 1760 } 1761 1762 static int parse_slab_opt(const struct option *opt __maybe_unused, 1763 const char *arg __maybe_unused, 1764 int unset __maybe_unused) 1765 { 1766 kmem_slab = (kmem_page + 1); 1767 return 0; 1768 } 1769 1770 static int parse_page_opt(const struct option *opt __maybe_unused, 1771 const char *arg __maybe_unused, 1772 int unset __maybe_unused) 1773 { 1774 kmem_page = (kmem_slab + 1); 1775 return 0; 1776 } 1777 1778 static int parse_line_opt(const struct option *opt __maybe_unused, 1779 const char *arg, int unset __maybe_unused) 1780 { 1781 int lines; 1782 1783 if (!arg) 1784 return -1; 1785 1786 lines = strtoul(arg, NULL, 10); 1787 1788 if (caller_flag > alloc_flag) 1789 caller_lines = lines; 1790 else 1791 alloc_lines = lines; 1792 1793 return 0; 1794 } 1795 1796 static int __cmd_record(int argc, const char **argv) 1797 { 1798 const char * const record_args[] = { 1799 "record", "-a", "-R", "-c", "1", 1800 }; 1801 const char * const slab_events[] = { 1802 "-e", "kmem:kmalloc", 1803 "-e", "kmem:kmalloc_node", 1804 "-e", "kmem:kfree", 1805 "-e", "kmem:kmem_cache_alloc", 1806 "-e", "kmem:kmem_cache_alloc_node", 1807 "-e", "kmem:kmem_cache_free", 1808 }; 1809 const char * const page_events[] = { 1810 "-e", "kmem:mm_page_alloc", 1811 "-e", "kmem:mm_page_free", 1812 }; 1813 unsigned int rec_argc, i, j; 1814 const char **rec_argv; 1815 1816 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1817 if (kmem_slab) 1818 rec_argc += ARRAY_SIZE(slab_events); 1819 if (kmem_page) 1820 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */ 1821 1822 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1823 1824 if (rec_argv == NULL) 1825 return -ENOMEM; 1826 1827 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1828 rec_argv[i] = strdup(record_args[i]); 1829 1830 if (kmem_slab) { 1831 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++) 1832 rec_argv[i] = strdup(slab_events[j]); 1833 } 1834 if (kmem_page) { 1835 rec_argv[i++] = strdup("-g"); 1836 1837 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++) 1838 rec_argv[i] = strdup(page_events[j]); 1839 } 1840 1841 for (j = 1; j < (unsigned int)argc; j++, i++) 1842 rec_argv[i] = argv[j]; 1843 1844 return cmd_record(i, rec_argv, NULL); 1845 } 1846 1847 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused) 1848 { 1849 if (!strcmp(var, "kmem.default")) { 1850 if (!strcmp(value, "slab")) 1851 kmem_default = KMEM_SLAB; 1852 else if (!strcmp(value, "page")) 1853 kmem_default = KMEM_PAGE; 1854 else 1855 pr_err("invalid default value ('slab' or 'page' required): %s\n", 1856 value); 1857 return 0; 1858 } 1859 1860 return 0; 1861 } 1862 1863 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) 1864 { 1865 const char * const default_slab_sort = "frag,hit,bytes"; 1866 const char * const default_page_sort = "bytes,hit"; 1867 struct perf_data_file file = { 1868 .mode = PERF_DATA_MODE_READ, 1869 }; 1870 const struct option kmem_options[] = { 1871 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1872 OPT_INCR('v', "verbose", &verbose, 1873 "be more verbose (show symbol address, etc)"), 1874 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL, 1875 "show per-callsite statistics", parse_caller_opt), 1876 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL, 1877 "show per-allocation statistics", parse_alloc_opt), 1878 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", 1879 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, " 1880 "page, order, migtype, gfp", parse_sort_opt), 1881 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt), 1882 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), 1883 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 1884 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator", 1885 parse_slab_opt), 1886 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator", 1887 parse_page_opt), 1888 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"), 1889 OPT_END() 1890 }; 1891 const char *const kmem_subcommands[] = { "record", "stat", NULL }; 1892 const char *kmem_usage[] = { 1893 NULL, 1894 NULL 1895 }; 1896 struct perf_session *session; 1897 int ret = -1; 1898 const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n"; 1899 1900 perf_config(kmem_config, NULL); 1901 argc = parse_options_subcommand(argc, argv, kmem_options, 1902 kmem_subcommands, kmem_usage, 0); 1903 1904 if (!argc) 1905 usage_with_options(kmem_usage, kmem_options); 1906 1907 if (kmem_slab == 0 && kmem_page == 0) { 1908 if (kmem_default == KMEM_SLAB) 1909 kmem_slab = 1; 1910 else 1911 kmem_page = 1; 1912 } 1913 1914 if (!strncmp(argv[0], "rec", 3)) { 1915 symbol__init(NULL); 1916 return __cmd_record(argc, argv); 1917 } 1918 1919 file.path = input_name; 1920 1921 kmem_session = session = perf_session__new(&file, false, &perf_kmem); 1922 if (session == NULL) 1923 return -1; 1924 1925 if (kmem_slab) { 1926 if (!perf_evlist__find_tracepoint_by_name(session->evlist, 1927 "kmem:kmalloc")) { 1928 pr_err(errmsg, "slab", "slab"); 1929 goto out_delete; 1930 } 1931 } 1932 1933 if (kmem_page) { 1934 struct perf_evsel *evsel; 1935 1936 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 1937 "kmem:mm_page_alloc"); 1938 if (evsel == NULL) { 1939 pr_err(errmsg, "page", "page"); 1940 goto out_delete; 1941 } 1942 1943 kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent); 1944 symbol_conf.use_callchain = true; 1945 } 1946 1947 symbol__init(&session->header.env); 1948 1949 if (!strcmp(argv[0], "stat")) { 1950 setlocale(LC_ALL, ""); 1951 1952 if (cpu__setup_cpunode_map()) 1953 goto out_delete; 1954 1955 if (list_empty(&slab_caller_sort)) 1956 setup_slab_sorting(&slab_caller_sort, default_slab_sort); 1957 if (list_empty(&slab_alloc_sort)) 1958 setup_slab_sorting(&slab_alloc_sort, default_slab_sort); 1959 if (list_empty(&page_caller_sort)) 1960 setup_page_sorting(&page_caller_sort, default_page_sort); 1961 if (list_empty(&page_alloc_sort)) 1962 setup_page_sorting(&page_alloc_sort, default_page_sort); 1963 1964 if (kmem_page) { 1965 setup_page_sorting(&page_alloc_sort_input, 1966 "page,order,migtype,gfp"); 1967 setup_page_sorting(&page_caller_sort_input, 1968 "callsite,order,migtype,gfp"); 1969 } 1970 ret = __cmd_kmem(session); 1971 } else 1972 usage_with_options(kmem_usage, kmem_options); 1973 1974 out_delete: 1975 perf_session__delete(session); 1976 1977 return ret; 1978 } 1979 1980