1 #include "builtin.h" 2 #include "perf.h" 3 4 #include "util/evlist.h" 5 #include "util/evsel.h" 6 #include "util/util.h" 7 #include "util/cache.h" 8 #include "util/symbol.h" 9 #include "util/thread.h" 10 #include "util/header.h" 11 #include "util/session.h" 12 #include "util/tool.h" 13 #include "util/callchain.h" 14 15 #include <subcmd/parse-options.h> 16 #include "util/trace-event.h" 17 #include "util/data.h" 18 #include "util/cpumap.h" 19 20 #include "util/debug.h" 21 22 #include <linux/rbtree.h> 23 #include <linux/string.h> 24 #include <locale.h> 25 #include <regex.h> 26 27 static int kmem_slab; 28 static int kmem_page; 29 30 static long kmem_page_size; 31 static enum { 32 KMEM_SLAB, 33 KMEM_PAGE, 34 } kmem_default = KMEM_SLAB; /* for backward compatibility */ 35 36 struct alloc_stat; 37 typedef int (*sort_fn_t)(void *, void *); 38 39 static int alloc_flag; 40 static int caller_flag; 41 42 static int alloc_lines = -1; 43 static int caller_lines = -1; 44 45 static bool raw_ip; 46 47 struct alloc_stat { 48 u64 call_site; 49 u64 ptr; 50 u64 bytes_req; 51 u64 bytes_alloc; 52 u32 hit; 53 u32 pingpong; 54 55 short alloc_cpu; 56 57 struct rb_node node; 58 }; 59 60 static struct rb_root root_alloc_stat; 61 static struct rb_root root_alloc_sorted; 62 static struct rb_root root_caller_stat; 63 static struct rb_root root_caller_sorted; 64 65 static unsigned long total_requested, total_allocated; 66 static unsigned long nr_allocs, nr_cross_allocs; 67 68 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, 69 int bytes_req, int bytes_alloc, int cpu) 70 { 71 struct rb_node **node = &root_alloc_stat.rb_node; 72 struct rb_node *parent = NULL; 73 struct alloc_stat *data = NULL; 74 75 while (*node) { 76 parent = *node; 77 data = rb_entry(*node, struct alloc_stat, node); 78 79 if (ptr > data->ptr) 80 node = &(*node)->rb_right; 81 else if (ptr < data->ptr) 82 node = &(*node)->rb_left; 83 else 84 break; 85 } 86 87 if (data && data->ptr == ptr) { 88 data->hit++; 89 data->bytes_req += bytes_req; 90 data->bytes_alloc += bytes_alloc; 91 } else { 92 data = malloc(sizeof(*data)); 93 if (!data) { 94 pr_err("%s: malloc failed\n", __func__); 95 return -1; 96 } 97 data->ptr = ptr; 98 data->pingpong = 0; 99 data->hit = 1; 100 data->bytes_req = bytes_req; 101 data->bytes_alloc = bytes_alloc; 102 103 rb_link_node(&data->node, parent, node); 104 rb_insert_color(&data->node, &root_alloc_stat); 105 } 106 data->call_site = call_site; 107 data->alloc_cpu = cpu; 108 return 0; 109 } 110 111 static int insert_caller_stat(unsigned long call_site, 112 int bytes_req, int bytes_alloc) 113 { 114 struct rb_node **node = &root_caller_stat.rb_node; 115 struct rb_node *parent = NULL; 116 struct alloc_stat *data = NULL; 117 118 while (*node) { 119 parent = *node; 120 data = rb_entry(*node, struct alloc_stat, node); 121 122 if (call_site > data->call_site) 123 node = &(*node)->rb_right; 124 else if (call_site < data->call_site) 125 node = &(*node)->rb_left; 126 else 127 break; 128 } 129 130 if (data && data->call_site == call_site) { 131 data->hit++; 132 data->bytes_req += bytes_req; 133 data->bytes_alloc += bytes_alloc; 134 } else { 135 data = malloc(sizeof(*data)); 136 if (!data) { 137 pr_err("%s: malloc failed\n", __func__); 138 return -1; 139 } 140 data->call_site = call_site; 141 data->pingpong = 0; 142 data->hit = 1; 143 data->bytes_req = bytes_req; 144 data->bytes_alloc = bytes_alloc; 145 146 rb_link_node(&data->node, parent, node); 147 rb_insert_color(&data->node, &root_caller_stat); 148 } 149 150 return 0; 151 } 152 153 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, 154 struct perf_sample *sample) 155 { 156 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), 157 call_site = perf_evsel__intval(evsel, sample, "call_site"); 158 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), 159 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); 160 161 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || 162 insert_caller_stat(call_site, bytes_req, bytes_alloc)) 163 return -1; 164 165 total_requested += bytes_req; 166 total_allocated += bytes_alloc; 167 168 nr_allocs++; 169 return 0; 170 } 171 172 static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, 173 struct perf_sample *sample) 174 { 175 int ret = perf_evsel__process_alloc_event(evsel, sample); 176 177 if (!ret) { 178 int node1 = cpu__get_node(sample->cpu), 179 node2 = perf_evsel__intval(evsel, sample, "node"); 180 181 if (node1 != node2) 182 nr_cross_allocs++; 183 } 184 185 return ret; 186 } 187 188 static int ptr_cmp(void *, void *); 189 static int slab_callsite_cmp(void *, void *); 190 191 static struct alloc_stat *search_alloc_stat(unsigned long ptr, 192 unsigned long call_site, 193 struct rb_root *root, 194 sort_fn_t sort_fn) 195 { 196 struct rb_node *node = root->rb_node; 197 struct alloc_stat key = { .ptr = ptr, .call_site = call_site }; 198 199 while (node) { 200 struct alloc_stat *data; 201 int cmp; 202 203 data = rb_entry(node, struct alloc_stat, node); 204 205 cmp = sort_fn(&key, data); 206 if (cmp < 0) 207 node = node->rb_left; 208 else if (cmp > 0) 209 node = node->rb_right; 210 else 211 return data; 212 } 213 return NULL; 214 } 215 216 static int perf_evsel__process_free_event(struct perf_evsel *evsel, 217 struct perf_sample *sample) 218 { 219 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); 220 struct alloc_stat *s_alloc, *s_caller; 221 222 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); 223 if (!s_alloc) 224 return 0; 225 226 if ((short)sample->cpu != s_alloc->alloc_cpu) { 227 s_alloc->pingpong++; 228 229 s_caller = search_alloc_stat(0, s_alloc->call_site, 230 &root_caller_stat, 231 slab_callsite_cmp); 232 if (!s_caller) 233 return -1; 234 s_caller->pingpong++; 235 } 236 s_alloc->alloc_cpu = -1; 237 238 return 0; 239 } 240 241 static u64 total_page_alloc_bytes; 242 static u64 total_page_free_bytes; 243 static u64 total_page_nomatch_bytes; 244 static u64 total_page_fail_bytes; 245 static unsigned long nr_page_allocs; 246 static unsigned long nr_page_frees; 247 static unsigned long nr_page_fails; 248 static unsigned long nr_page_nomatch; 249 250 static bool use_pfn; 251 static bool live_page; 252 static struct perf_session *kmem_session; 253 254 #define MAX_MIGRATE_TYPES 6 255 #define MAX_PAGE_ORDER 11 256 257 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES]; 258 259 struct page_stat { 260 struct rb_node node; 261 u64 page; 262 u64 callsite; 263 int order; 264 unsigned gfp_flags; 265 unsigned migrate_type; 266 u64 alloc_bytes; 267 u64 free_bytes; 268 int nr_alloc; 269 int nr_free; 270 }; 271 272 static struct rb_root page_live_tree; 273 static struct rb_root page_alloc_tree; 274 static struct rb_root page_alloc_sorted; 275 static struct rb_root page_caller_tree; 276 static struct rb_root page_caller_sorted; 277 278 struct alloc_func { 279 u64 start; 280 u64 end; 281 char *name; 282 }; 283 284 static int nr_alloc_funcs; 285 static struct alloc_func *alloc_func_list; 286 287 static int funcmp(const void *a, const void *b) 288 { 289 const struct alloc_func *fa = a; 290 const struct alloc_func *fb = b; 291 292 if (fa->start > fb->start) 293 return 1; 294 else 295 return -1; 296 } 297 298 static int callcmp(const void *a, const void *b) 299 { 300 const struct alloc_func *fa = a; 301 const struct alloc_func *fb = b; 302 303 if (fb->start <= fa->start && fa->end < fb->end) 304 return 0; 305 306 if (fa->start > fb->start) 307 return 1; 308 else 309 return -1; 310 } 311 312 static int build_alloc_func_list(void) 313 { 314 int ret; 315 struct map *kernel_map; 316 struct symbol *sym; 317 struct rb_node *node; 318 struct alloc_func *func; 319 struct machine *machine = &kmem_session->machines.host; 320 regex_t alloc_func_regex; 321 const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?"; 322 323 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED); 324 if (ret) { 325 char err[BUFSIZ]; 326 327 regerror(ret, &alloc_func_regex, err, sizeof(err)); 328 pr_err("Invalid regex: %s\n%s", pattern, err); 329 return -EINVAL; 330 } 331 332 kernel_map = machine__kernel_map(machine); 333 if (map__load(kernel_map, NULL) < 0) { 334 pr_err("cannot load kernel map\n"); 335 return -ENOENT; 336 } 337 338 map__for_each_symbol(kernel_map, sym, node) { 339 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0)) 340 continue; 341 342 func = realloc(alloc_func_list, 343 (nr_alloc_funcs + 1) * sizeof(*func)); 344 if (func == NULL) 345 return -ENOMEM; 346 347 pr_debug("alloc func: %s\n", sym->name); 348 func[nr_alloc_funcs].start = sym->start; 349 func[nr_alloc_funcs].end = sym->end; 350 func[nr_alloc_funcs].name = sym->name; 351 352 alloc_func_list = func; 353 nr_alloc_funcs++; 354 } 355 356 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp); 357 358 regfree(&alloc_func_regex); 359 return 0; 360 } 361 362 /* 363 * Find first non-memory allocation function from callchain. 364 * The allocation functions are in the 'alloc_func_list'. 365 */ 366 static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample) 367 { 368 struct addr_location al; 369 struct machine *machine = &kmem_session->machines.host; 370 struct callchain_cursor_node *node; 371 372 if (alloc_func_list == NULL) { 373 if (build_alloc_func_list() < 0) 374 goto out; 375 } 376 377 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid); 378 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16); 379 380 callchain_cursor_commit(&callchain_cursor); 381 while (true) { 382 struct alloc_func key, *caller; 383 u64 addr; 384 385 node = callchain_cursor_current(&callchain_cursor); 386 if (node == NULL) 387 break; 388 389 key.start = key.end = node->ip; 390 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs, 391 sizeof(key), callcmp); 392 if (!caller) { 393 /* found */ 394 if (node->map) 395 addr = map__unmap_ip(node->map, node->ip); 396 else 397 addr = node->ip; 398 399 return addr; 400 } else 401 pr_debug3("skipping alloc function: %s\n", caller->name); 402 403 callchain_cursor_advance(&callchain_cursor); 404 } 405 406 out: 407 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip); 408 return sample->ip; 409 } 410 411 struct sort_dimension { 412 const char name[20]; 413 sort_fn_t cmp; 414 struct list_head list; 415 }; 416 417 static LIST_HEAD(page_alloc_sort_input); 418 static LIST_HEAD(page_caller_sort_input); 419 420 static struct page_stat * 421 __page_stat__findnew_page(struct page_stat *pstat, bool create) 422 { 423 struct rb_node **node = &page_live_tree.rb_node; 424 struct rb_node *parent = NULL; 425 struct page_stat *data; 426 427 while (*node) { 428 s64 cmp; 429 430 parent = *node; 431 data = rb_entry(*node, struct page_stat, node); 432 433 cmp = data->page - pstat->page; 434 if (cmp < 0) 435 node = &parent->rb_left; 436 else if (cmp > 0) 437 node = &parent->rb_right; 438 else 439 return data; 440 } 441 442 if (!create) 443 return NULL; 444 445 data = zalloc(sizeof(*data)); 446 if (data != NULL) { 447 data->page = pstat->page; 448 data->order = pstat->order; 449 data->gfp_flags = pstat->gfp_flags; 450 data->migrate_type = pstat->migrate_type; 451 452 rb_link_node(&data->node, parent, node); 453 rb_insert_color(&data->node, &page_live_tree); 454 } 455 456 return data; 457 } 458 459 static struct page_stat *page_stat__find_page(struct page_stat *pstat) 460 { 461 return __page_stat__findnew_page(pstat, false); 462 } 463 464 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat) 465 { 466 return __page_stat__findnew_page(pstat, true); 467 } 468 469 static struct page_stat * 470 __page_stat__findnew_alloc(struct page_stat *pstat, bool create) 471 { 472 struct rb_node **node = &page_alloc_tree.rb_node; 473 struct rb_node *parent = NULL; 474 struct page_stat *data; 475 struct sort_dimension *sort; 476 477 while (*node) { 478 int cmp = 0; 479 480 parent = *node; 481 data = rb_entry(*node, struct page_stat, node); 482 483 list_for_each_entry(sort, &page_alloc_sort_input, list) { 484 cmp = sort->cmp(pstat, data); 485 if (cmp) 486 break; 487 } 488 489 if (cmp < 0) 490 node = &parent->rb_left; 491 else if (cmp > 0) 492 node = &parent->rb_right; 493 else 494 return data; 495 } 496 497 if (!create) 498 return NULL; 499 500 data = zalloc(sizeof(*data)); 501 if (data != NULL) { 502 data->page = pstat->page; 503 data->order = pstat->order; 504 data->gfp_flags = pstat->gfp_flags; 505 data->migrate_type = pstat->migrate_type; 506 507 rb_link_node(&data->node, parent, node); 508 rb_insert_color(&data->node, &page_alloc_tree); 509 } 510 511 return data; 512 } 513 514 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat) 515 { 516 return __page_stat__findnew_alloc(pstat, false); 517 } 518 519 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat) 520 { 521 return __page_stat__findnew_alloc(pstat, true); 522 } 523 524 static struct page_stat * 525 __page_stat__findnew_caller(struct page_stat *pstat, bool create) 526 { 527 struct rb_node **node = &page_caller_tree.rb_node; 528 struct rb_node *parent = NULL; 529 struct page_stat *data; 530 struct sort_dimension *sort; 531 532 while (*node) { 533 int cmp = 0; 534 535 parent = *node; 536 data = rb_entry(*node, struct page_stat, node); 537 538 list_for_each_entry(sort, &page_caller_sort_input, list) { 539 cmp = sort->cmp(pstat, data); 540 if (cmp) 541 break; 542 } 543 544 if (cmp < 0) 545 node = &parent->rb_left; 546 else if (cmp > 0) 547 node = &parent->rb_right; 548 else 549 return data; 550 } 551 552 if (!create) 553 return NULL; 554 555 data = zalloc(sizeof(*data)); 556 if (data != NULL) { 557 data->callsite = pstat->callsite; 558 data->order = pstat->order; 559 data->gfp_flags = pstat->gfp_flags; 560 data->migrate_type = pstat->migrate_type; 561 562 rb_link_node(&data->node, parent, node); 563 rb_insert_color(&data->node, &page_caller_tree); 564 } 565 566 return data; 567 } 568 569 static struct page_stat *page_stat__find_caller(struct page_stat *pstat) 570 { 571 return __page_stat__findnew_caller(pstat, false); 572 } 573 574 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat) 575 { 576 return __page_stat__findnew_caller(pstat, true); 577 } 578 579 static bool valid_page(u64 pfn_or_page) 580 { 581 if (use_pfn && pfn_or_page == -1UL) 582 return false; 583 if (!use_pfn && pfn_or_page == 0) 584 return false; 585 return true; 586 } 587 588 struct gfp_flag { 589 unsigned int flags; 590 char *compact_str; 591 char *human_readable; 592 }; 593 594 static struct gfp_flag *gfps; 595 static int nr_gfps; 596 597 static int gfpcmp(const void *a, const void *b) 598 { 599 const struct gfp_flag *fa = a; 600 const struct gfp_flag *fb = b; 601 602 return fa->flags - fb->flags; 603 } 604 605 /* see include/trace/events/mmflags.h */ 606 static const struct { 607 const char *original; 608 const char *compact; 609 } gfp_compact_table[] = { 610 { "GFP_TRANSHUGE", "THP" }, 611 { "GFP_HIGHUSER_MOVABLE", "HUM" }, 612 { "GFP_HIGHUSER", "HU" }, 613 { "GFP_USER", "U" }, 614 { "GFP_TEMPORARY", "TMP" }, 615 { "GFP_KERNEL_ACCOUNT", "KAC" }, 616 { "GFP_KERNEL", "K" }, 617 { "GFP_NOFS", "NF" }, 618 { "GFP_ATOMIC", "A" }, 619 { "GFP_NOIO", "NI" }, 620 { "GFP_NOWAIT", "NW" }, 621 { "GFP_DMA", "D" }, 622 { "__GFP_HIGHMEM", "HM" }, 623 { "GFP_DMA32", "D32" }, 624 { "__GFP_HIGH", "H" }, 625 { "__GFP_ATOMIC", "_A" }, 626 { "__GFP_IO", "I" }, 627 { "__GFP_FS", "F" }, 628 { "__GFP_COLD", "CO" }, 629 { "__GFP_NOWARN", "NWR" }, 630 { "__GFP_REPEAT", "R" }, 631 { "__GFP_NOFAIL", "NF" }, 632 { "__GFP_NORETRY", "NR" }, 633 { "__GFP_COMP", "C" }, 634 { "__GFP_ZERO", "Z" }, 635 { "__GFP_NOMEMALLOC", "NMA" }, 636 { "__GFP_MEMALLOC", "MA" }, 637 { "__GFP_HARDWALL", "HW" }, 638 { "__GFP_THISNODE", "TN" }, 639 { "__GFP_RECLAIMABLE", "RC" }, 640 { "__GFP_MOVABLE", "M" }, 641 { "__GFP_ACCOUNT", "AC" }, 642 { "__GFP_NOTRACK", "NT" }, 643 { "__GFP_WRITE", "WR" }, 644 { "__GFP_RECLAIM", "R" }, 645 { "__GFP_DIRECT_RECLAIM", "DR" }, 646 { "__GFP_KSWAPD_RECLAIM", "KR" }, 647 { "__GFP_OTHER_NODE", "ON" }, 648 }; 649 650 static size_t max_gfp_len; 651 652 static char *compact_gfp_flags(char *gfp_flags) 653 { 654 char *orig_flags = strdup(gfp_flags); 655 char *new_flags = NULL; 656 char *str, *pos = NULL; 657 size_t len = 0; 658 659 if (orig_flags == NULL) 660 return NULL; 661 662 str = strtok_r(orig_flags, "|", &pos); 663 while (str) { 664 size_t i; 665 char *new; 666 const char *cpt; 667 668 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) { 669 if (strcmp(gfp_compact_table[i].original, str)) 670 continue; 671 672 cpt = gfp_compact_table[i].compact; 673 new = realloc(new_flags, len + strlen(cpt) + 2); 674 if (new == NULL) { 675 free(new_flags); 676 return NULL; 677 } 678 679 new_flags = new; 680 681 if (!len) { 682 strcpy(new_flags, cpt); 683 } else { 684 strcat(new_flags, "|"); 685 strcat(new_flags, cpt); 686 len++; 687 } 688 689 len += strlen(cpt); 690 } 691 692 str = strtok_r(NULL, "|", &pos); 693 } 694 695 if (max_gfp_len < len) 696 max_gfp_len = len; 697 698 free(orig_flags); 699 return new_flags; 700 } 701 702 static char *compact_gfp_string(unsigned long gfp_flags) 703 { 704 struct gfp_flag key = { 705 .flags = gfp_flags, 706 }; 707 struct gfp_flag *gfp; 708 709 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp); 710 if (gfp) 711 return gfp->compact_str; 712 713 return NULL; 714 } 715 716 static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample, 717 unsigned int gfp_flags) 718 { 719 struct pevent_record record = { 720 .cpu = sample->cpu, 721 .data = sample->raw_data, 722 .size = sample->raw_size, 723 }; 724 struct trace_seq seq; 725 char *str, *pos = NULL; 726 727 if (nr_gfps) { 728 struct gfp_flag key = { 729 .flags = gfp_flags, 730 }; 731 732 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp)) 733 return 0; 734 } 735 736 trace_seq_init(&seq); 737 pevent_event_info(&seq, evsel->tp_format, &record); 738 739 str = strtok_r(seq.buffer, " ", &pos); 740 while (str) { 741 if (!strncmp(str, "gfp_flags=", 10)) { 742 struct gfp_flag *new; 743 744 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps)); 745 if (new == NULL) 746 return -ENOMEM; 747 748 gfps = new; 749 new += nr_gfps++; 750 751 new->flags = gfp_flags; 752 new->human_readable = strdup(str + 10); 753 new->compact_str = compact_gfp_flags(str + 10); 754 if (!new->human_readable || !new->compact_str) 755 return -ENOMEM; 756 757 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp); 758 } 759 760 str = strtok_r(NULL, " ", &pos); 761 } 762 763 trace_seq_destroy(&seq); 764 return 0; 765 } 766 767 static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, 768 struct perf_sample *sample) 769 { 770 u64 page; 771 unsigned int order = perf_evsel__intval(evsel, sample, "order"); 772 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags"); 773 unsigned int migrate_type = perf_evsel__intval(evsel, sample, 774 "migratetype"); 775 u64 bytes = kmem_page_size << order; 776 u64 callsite; 777 struct page_stat *pstat; 778 struct page_stat this = { 779 .order = order, 780 .gfp_flags = gfp_flags, 781 .migrate_type = migrate_type, 782 }; 783 784 if (use_pfn) 785 page = perf_evsel__intval(evsel, sample, "pfn"); 786 else 787 page = perf_evsel__intval(evsel, sample, "page"); 788 789 nr_page_allocs++; 790 total_page_alloc_bytes += bytes; 791 792 if (!valid_page(page)) { 793 nr_page_fails++; 794 total_page_fail_bytes += bytes; 795 796 return 0; 797 } 798 799 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0) 800 return -1; 801 802 callsite = find_callsite(evsel, sample); 803 804 /* 805 * This is to find the current page (with correct gfp flags and 806 * migrate type) at free event. 807 */ 808 this.page = page; 809 pstat = page_stat__findnew_page(&this); 810 if (pstat == NULL) 811 return -ENOMEM; 812 813 pstat->nr_alloc++; 814 pstat->alloc_bytes += bytes; 815 pstat->callsite = callsite; 816 817 if (!live_page) { 818 pstat = page_stat__findnew_alloc(&this); 819 if (pstat == NULL) 820 return -ENOMEM; 821 822 pstat->nr_alloc++; 823 pstat->alloc_bytes += bytes; 824 pstat->callsite = callsite; 825 } 826 827 this.callsite = callsite; 828 pstat = page_stat__findnew_caller(&this); 829 if (pstat == NULL) 830 return -ENOMEM; 831 832 pstat->nr_alloc++; 833 pstat->alloc_bytes += bytes; 834 835 order_stats[order][migrate_type]++; 836 837 return 0; 838 } 839 840 static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, 841 struct perf_sample *sample) 842 { 843 u64 page; 844 unsigned int order = perf_evsel__intval(evsel, sample, "order"); 845 u64 bytes = kmem_page_size << order; 846 struct page_stat *pstat; 847 struct page_stat this = { 848 .order = order, 849 }; 850 851 if (use_pfn) 852 page = perf_evsel__intval(evsel, sample, "pfn"); 853 else 854 page = perf_evsel__intval(evsel, sample, "page"); 855 856 nr_page_frees++; 857 total_page_free_bytes += bytes; 858 859 this.page = page; 860 pstat = page_stat__find_page(&this); 861 if (pstat == NULL) { 862 pr_debug2("missing free at page %"PRIx64" (order: %d)\n", 863 page, order); 864 865 nr_page_nomatch++; 866 total_page_nomatch_bytes += bytes; 867 868 return 0; 869 } 870 871 this.gfp_flags = pstat->gfp_flags; 872 this.migrate_type = pstat->migrate_type; 873 this.callsite = pstat->callsite; 874 875 rb_erase(&pstat->node, &page_live_tree); 876 free(pstat); 877 878 if (live_page) { 879 order_stats[this.order][this.migrate_type]--; 880 } else { 881 pstat = page_stat__find_alloc(&this); 882 if (pstat == NULL) 883 return -ENOMEM; 884 885 pstat->nr_free++; 886 pstat->free_bytes += bytes; 887 } 888 889 pstat = page_stat__find_caller(&this); 890 if (pstat == NULL) 891 return -ENOENT; 892 893 pstat->nr_free++; 894 pstat->free_bytes += bytes; 895 896 if (live_page) { 897 pstat->nr_alloc--; 898 pstat->alloc_bytes -= bytes; 899 900 if (pstat->nr_alloc == 0) { 901 rb_erase(&pstat->node, &page_caller_tree); 902 free(pstat); 903 } 904 } 905 906 return 0; 907 } 908 909 typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 910 struct perf_sample *sample); 911 912 static int process_sample_event(struct perf_tool *tool __maybe_unused, 913 union perf_event *event, 914 struct perf_sample *sample, 915 struct perf_evsel *evsel, 916 struct machine *machine) 917 { 918 int err = 0; 919 struct thread *thread = machine__findnew_thread(machine, sample->pid, 920 sample->tid); 921 922 if (thread == NULL) { 923 pr_debug("problem processing %d event, skipping it.\n", 924 event->header.type); 925 return -1; 926 } 927 928 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 929 930 if (evsel->handler != NULL) { 931 tracepoint_handler f = evsel->handler; 932 err = f(evsel, sample); 933 } 934 935 thread__put(thread); 936 937 return err; 938 } 939 940 static struct perf_tool perf_kmem = { 941 .sample = process_sample_event, 942 .comm = perf_event__process_comm, 943 .mmap = perf_event__process_mmap, 944 .mmap2 = perf_event__process_mmap2, 945 .ordered_events = true, 946 }; 947 948 static double fragmentation(unsigned long n_req, unsigned long n_alloc) 949 { 950 if (n_alloc == 0) 951 return 0.0; 952 else 953 return 100.0 - (100.0 * n_req / n_alloc); 954 } 955 956 static void __print_slab_result(struct rb_root *root, 957 struct perf_session *session, 958 int n_lines, int is_caller) 959 { 960 struct rb_node *next; 961 struct machine *machine = &session->machines.host; 962 963 printf("%.105s\n", graph_dotted_line); 964 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr"); 965 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n"); 966 printf("%.105s\n", graph_dotted_line); 967 968 next = rb_first(root); 969 970 while (next && n_lines--) { 971 struct alloc_stat *data = rb_entry(next, struct alloc_stat, 972 node); 973 struct symbol *sym = NULL; 974 struct map *map; 975 char buf[BUFSIZ]; 976 u64 addr; 977 978 if (is_caller) { 979 addr = data->call_site; 980 if (!raw_ip) 981 sym = machine__find_kernel_function(machine, addr, &map, NULL); 982 } else 983 addr = data->ptr; 984 985 if (sym != NULL) 986 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name, 987 addr - map->unmap_ip(map, sym->start)); 988 else 989 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr); 990 printf(" %-34s |", buf); 991 992 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n", 993 (unsigned long long)data->bytes_alloc, 994 (unsigned long)data->bytes_alloc / data->hit, 995 (unsigned long long)data->bytes_req, 996 (unsigned long)data->bytes_req / data->hit, 997 (unsigned long)data->hit, 998 (unsigned long)data->pingpong, 999 fragmentation(data->bytes_req, data->bytes_alloc)); 1000 1001 next = rb_next(next); 1002 } 1003 1004 if (n_lines == -1) 1005 printf(" ... | ... | ... | ... | ... | ... \n"); 1006 1007 printf("%.105s\n", graph_dotted_line); 1008 } 1009 1010 static const char * const migrate_type_str[] = { 1011 "UNMOVABL", 1012 "RECLAIM", 1013 "MOVABLE", 1014 "RESERVED", 1015 "CMA/ISLT", 1016 "UNKNOWN", 1017 }; 1018 1019 static void __print_page_alloc_result(struct perf_session *session, int n_lines) 1020 { 1021 struct rb_node *next = rb_first(&page_alloc_sorted); 1022 struct machine *machine = &session->machines.host; 1023 const char *format; 1024 int gfp_len = max(strlen("GFP flags"), max_gfp_len); 1025 1026 printf("\n%.105s\n", graph_dotted_line); 1027 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n", 1028 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total", 1029 gfp_len, "GFP flags"); 1030 printf("%.105s\n", graph_dotted_line); 1031 1032 if (use_pfn) 1033 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n"; 1034 else 1035 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n"; 1036 1037 while (next && n_lines--) { 1038 struct page_stat *data; 1039 struct symbol *sym; 1040 struct map *map; 1041 char buf[32]; 1042 char *caller = buf; 1043 1044 data = rb_entry(next, struct page_stat, node); 1045 sym = machine__find_kernel_function(machine, data->callsite, 1046 &map, NULL); 1047 if (sym && sym->name) 1048 caller = sym->name; 1049 else 1050 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite); 1051 1052 printf(format, (unsigned long long)data->page, 1053 (unsigned long long)data->alloc_bytes / 1024, 1054 data->nr_alloc, data->order, 1055 migrate_type_str[data->migrate_type], 1056 gfp_len, compact_gfp_string(data->gfp_flags), caller); 1057 1058 next = rb_next(next); 1059 } 1060 1061 if (n_lines == -1) { 1062 printf(" ... | ... | ... | ... | ... | %-*s | ...\n", 1063 gfp_len, "..."); 1064 } 1065 1066 printf("%.105s\n", graph_dotted_line); 1067 } 1068 1069 static void __print_page_caller_result(struct perf_session *session, int n_lines) 1070 { 1071 struct rb_node *next = rb_first(&page_caller_sorted); 1072 struct machine *machine = &session->machines.host; 1073 int gfp_len = max(strlen("GFP flags"), max_gfp_len); 1074 1075 printf("\n%.105s\n", graph_dotted_line); 1076 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n", 1077 live_page ? "Live" : "Total", gfp_len, "GFP flags"); 1078 printf("%.105s\n", graph_dotted_line); 1079 1080 while (next && n_lines--) { 1081 struct page_stat *data; 1082 struct symbol *sym; 1083 struct map *map; 1084 char buf[32]; 1085 char *caller = buf; 1086 1087 data = rb_entry(next, struct page_stat, node); 1088 sym = machine__find_kernel_function(machine, data->callsite, 1089 &map, NULL); 1090 if (sym && sym->name) 1091 caller = sym->name; 1092 else 1093 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite); 1094 1095 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n", 1096 (unsigned long long)data->alloc_bytes / 1024, 1097 data->nr_alloc, data->order, 1098 migrate_type_str[data->migrate_type], 1099 gfp_len, compact_gfp_string(data->gfp_flags), caller); 1100 1101 next = rb_next(next); 1102 } 1103 1104 if (n_lines == -1) { 1105 printf(" ... | ... | ... | ... | %-*s | ...\n", 1106 gfp_len, "..."); 1107 } 1108 1109 printf("%.105s\n", graph_dotted_line); 1110 } 1111 1112 static void print_gfp_flags(void) 1113 { 1114 int i; 1115 1116 printf("#\n"); 1117 printf("# GFP flags\n"); 1118 printf("# ---------\n"); 1119 for (i = 0; i < nr_gfps; i++) { 1120 printf("# %08x: %*s: %s\n", gfps[i].flags, 1121 (int) max_gfp_len, gfps[i].compact_str, 1122 gfps[i].human_readable); 1123 } 1124 } 1125 1126 static void print_slab_summary(void) 1127 { 1128 printf("\nSUMMARY (SLAB allocator)"); 1129 printf("\n========================\n"); 1130 printf("Total bytes requested: %'lu\n", total_requested); 1131 printf("Total bytes allocated: %'lu\n", total_allocated); 1132 printf("Total bytes wasted on internal fragmentation: %'lu\n", 1133 total_allocated - total_requested); 1134 printf("Internal fragmentation: %f%%\n", 1135 fragmentation(total_requested, total_allocated)); 1136 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs); 1137 } 1138 1139 static void print_page_summary(void) 1140 { 1141 int o, m; 1142 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch; 1143 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes; 1144 1145 printf("\nSUMMARY (page allocator)"); 1146 printf("\n========================\n"); 1147 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests", 1148 nr_page_allocs, total_page_alloc_bytes / 1024); 1149 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests", 1150 nr_page_frees, total_page_free_bytes / 1024); 1151 printf("\n"); 1152 1153 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", 1154 nr_alloc_freed, (total_alloc_freed_bytes) / 1024); 1155 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", 1156 nr_page_allocs - nr_alloc_freed, 1157 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); 1158 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", 1159 nr_page_nomatch, total_page_nomatch_bytes / 1024); 1160 printf("\n"); 1161 1162 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures", 1163 nr_page_fails, total_page_fail_bytes / 1024); 1164 printf("\n"); 1165 1166 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable", 1167 "Reclaimable", "Movable", "Reserved", "CMA/Isolated"); 1168 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line, 1169 graph_dotted_line, graph_dotted_line, graph_dotted_line, 1170 graph_dotted_line, graph_dotted_line); 1171 1172 for (o = 0; o < MAX_PAGE_ORDER; o++) { 1173 printf("%5d", o); 1174 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) { 1175 if (order_stats[o][m]) 1176 printf(" %'12d", order_stats[o][m]); 1177 else 1178 printf(" %12c", '.'); 1179 } 1180 printf("\n"); 1181 } 1182 } 1183 1184 static void print_slab_result(struct perf_session *session) 1185 { 1186 if (caller_flag) 1187 __print_slab_result(&root_caller_sorted, session, caller_lines, 1); 1188 if (alloc_flag) 1189 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0); 1190 print_slab_summary(); 1191 } 1192 1193 static void print_page_result(struct perf_session *session) 1194 { 1195 if (caller_flag || alloc_flag) 1196 print_gfp_flags(); 1197 if (caller_flag) 1198 __print_page_caller_result(session, caller_lines); 1199 if (alloc_flag) 1200 __print_page_alloc_result(session, alloc_lines); 1201 print_page_summary(); 1202 } 1203 1204 static void print_result(struct perf_session *session) 1205 { 1206 if (kmem_slab) 1207 print_slab_result(session); 1208 if (kmem_page) 1209 print_page_result(session); 1210 } 1211 1212 static LIST_HEAD(slab_caller_sort); 1213 static LIST_HEAD(slab_alloc_sort); 1214 static LIST_HEAD(page_caller_sort); 1215 static LIST_HEAD(page_alloc_sort); 1216 1217 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data, 1218 struct list_head *sort_list) 1219 { 1220 struct rb_node **new = &(root->rb_node); 1221 struct rb_node *parent = NULL; 1222 struct sort_dimension *sort; 1223 1224 while (*new) { 1225 struct alloc_stat *this; 1226 int cmp = 0; 1227 1228 this = rb_entry(*new, struct alloc_stat, node); 1229 parent = *new; 1230 1231 list_for_each_entry(sort, sort_list, list) { 1232 cmp = sort->cmp(data, this); 1233 if (cmp) 1234 break; 1235 } 1236 1237 if (cmp > 0) 1238 new = &((*new)->rb_left); 1239 else 1240 new = &((*new)->rb_right); 1241 } 1242 1243 rb_link_node(&data->node, parent, new); 1244 rb_insert_color(&data->node, root); 1245 } 1246 1247 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted, 1248 struct list_head *sort_list) 1249 { 1250 struct rb_node *node; 1251 struct alloc_stat *data; 1252 1253 for (;;) { 1254 node = rb_first(root); 1255 if (!node) 1256 break; 1257 1258 rb_erase(node, root); 1259 data = rb_entry(node, struct alloc_stat, node); 1260 sort_slab_insert(root_sorted, data, sort_list); 1261 } 1262 } 1263 1264 static void sort_page_insert(struct rb_root *root, struct page_stat *data, 1265 struct list_head *sort_list) 1266 { 1267 struct rb_node **new = &root->rb_node; 1268 struct rb_node *parent = NULL; 1269 struct sort_dimension *sort; 1270 1271 while (*new) { 1272 struct page_stat *this; 1273 int cmp = 0; 1274 1275 this = rb_entry(*new, struct page_stat, node); 1276 parent = *new; 1277 1278 list_for_each_entry(sort, sort_list, list) { 1279 cmp = sort->cmp(data, this); 1280 if (cmp) 1281 break; 1282 } 1283 1284 if (cmp > 0) 1285 new = &parent->rb_left; 1286 else 1287 new = &parent->rb_right; 1288 } 1289 1290 rb_link_node(&data->node, parent, new); 1291 rb_insert_color(&data->node, root); 1292 } 1293 1294 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted, 1295 struct list_head *sort_list) 1296 { 1297 struct rb_node *node; 1298 struct page_stat *data; 1299 1300 for (;;) { 1301 node = rb_first(root); 1302 if (!node) 1303 break; 1304 1305 rb_erase(node, root); 1306 data = rb_entry(node, struct page_stat, node); 1307 sort_page_insert(root_sorted, data, sort_list); 1308 } 1309 } 1310 1311 static void sort_result(void) 1312 { 1313 if (kmem_slab) { 1314 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted, 1315 &slab_alloc_sort); 1316 __sort_slab_result(&root_caller_stat, &root_caller_sorted, 1317 &slab_caller_sort); 1318 } 1319 if (kmem_page) { 1320 if (live_page) 1321 __sort_page_result(&page_live_tree, &page_alloc_sorted, 1322 &page_alloc_sort); 1323 else 1324 __sort_page_result(&page_alloc_tree, &page_alloc_sorted, 1325 &page_alloc_sort); 1326 1327 __sort_page_result(&page_caller_tree, &page_caller_sorted, 1328 &page_caller_sort); 1329 } 1330 } 1331 1332 static int __cmd_kmem(struct perf_session *session) 1333 { 1334 int err = -EINVAL; 1335 struct perf_evsel *evsel; 1336 const struct perf_evsel_str_handler kmem_tracepoints[] = { 1337 /* slab allocator */ 1338 { "kmem:kmalloc", perf_evsel__process_alloc_event, }, 1339 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, 1340 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, 1341 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, 1342 { "kmem:kfree", perf_evsel__process_free_event, }, 1343 { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, 1344 /* page allocator */ 1345 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, }, 1346 { "kmem:mm_page_free", perf_evsel__process_page_free_event, }, 1347 }; 1348 1349 if (!perf_session__has_traces(session, "kmem record")) 1350 goto out; 1351 1352 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { 1353 pr_err("Initializing perf session tracepoint handlers failed\n"); 1354 goto out; 1355 } 1356 1357 evlist__for_each(session->evlist, evsel) { 1358 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") && 1359 perf_evsel__field(evsel, "pfn")) { 1360 use_pfn = true; 1361 break; 1362 } 1363 } 1364 1365 setup_pager(); 1366 err = perf_session__process_events(session); 1367 if (err != 0) { 1368 pr_err("error during process events: %d\n", err); 1369 goto out; 1370 } 1371 sort_result(); 1372 print_result(session); 1373 out: 1374 return err; 1375 } 1376 1377 /* slab sort keys */ 1378 static int ptr_cmp(void *a, void *b) 1379 { 1380 struct alloc_stat *l = a; 1381 struct alloc_stat *r = b; 1382 1383 if (l->ptr < r->ptr) 1384 return -1; 1385 else if (l->ptr > r->ptr) 1386 return 1; 1387 return 0; 1388 } 1389 1390 static struct sort_dimension ptr_sort_dimension = { 1391 .name = "ptr", 1392 .cmp = ptr_cmp, 1393 }; 1394 1395 static int slab_callsite_cmp(void *a, void *b) 1396 { 1397 struct alloc_stat *l = a; 1398 struct alloc_stat *r = b; 1399 1400 if (l->call_site < r->call_site) 1401 return -1; 1402 else if (l->call_site > r->call_site) 1403 return 1; 1404 return 0; 1405 } 1406 1407 static struct sort_dimension callsite_sort_dimension = { 1408 .name = "callsite", 1409 .cmp = slab_callsite_cmp, 1410 }; 1411 1412 static int hit_cmp(void *a, void *b) 1413 { 1414 struct alloc_stat *l = a; 1415 struct alloc_stat *r = b; 1416 1417 if (l->hit < r->hit) 1418 return -1; 1419 else if (l->hit > r->hit) 1420 return 1; 1421 return 0; 1422 } 1423 1424 static struct sort_dimension hit_sort_dimension = { 1425 .name = "hit", 1426 .cmp = hit_cmp, 1427 }; 1428 1429 static int bytes_cmp(void *a, void *b) 1430 { 1431 struct alloc_stat *l = a; 1432 struct alloc_stat *r = b; 1433 1434 if (l->bytes_alloc < r->bytes_alloc) 1435 return -1; 1436 else if (l->bytes_alloc > r->bytes_alloc) 1437 return 1; 1438 return 0; 1439 } 1440 1441 static struct sort_dimension bytes_sort_dimension = { 1442 .name = "bytes", 1443 .cmp = bytes_cmp, 1444 }; 1445 1446 static int frag_cmp(void *a, void *b) 1447 { 1448 double x, y; 1449 struct alloc_stat *l = a; 1450 struct alloc_stat *r = b; 1451 1452 x = fragmentation(l->bytes_req, l->bytes_alloc); 1453 y = fragmentation(r->bytes_req, r->bytes_alloc); 1454 1455 if (x < y) 1456 return -1; 1457 else if (x > y) 1458 return 1; 1459 return 0; 1460 } 1461 1462 static struct sort_dimension frag_sort_dimension = { 1463 .name = "frag", 1464 .cmp = frag_cmp, 1465 }; 1466 1467 static int pingpong_cmp(void *a, void *b) 1468 { 1469 struct alloc_stat *l = a; 1470 struct alloc_stat *r = b; 1471 1472 if (l->pingpong < r->pingpong) 1473 return -1; 1474 else if (l->pingpong > r->pingpong) 1475 return 1; 1476 return 0; 1477 } 1478 1479 static struct sort_dimension pingpong_sort_dimension = { 1480 .name = "pingpong", 1481 .cmp = pingpong_cmp, 1482 }; 1483 1484 /* page sort keys */ 1485 static int page_cmp(void *a, void *b) 1486 { 1487 struct page_stat *l = a; 1488 struct page_stat *r = b; 1489 1490 if (l->page < r->page) 1491 return -1; 1492 else if (l->page > r->page) 1493 return 1; 1494 return 0; 1495 } 1496 1497 static struct sort_dimension page_sort_dimension = { 1498 .name = "page", 1499 .cmp = page_cmp, 1500 }; 1501 1502 static int page_callsite_cmp(void *a, void *b) 1503 { 1504 struct page_stat *l = a; 1505 struct page_stat *r = b; 1506 1507 if (l->callsite < r->callsite) 1508 return -1; 1509 else if (l->callsite > r->callsite) 1510 return 1; 1511 return 0; 1512 } 1513 1514 static struct sort_dimension page_callsite_sort_dimension = { 1515 .name = "callsite", 1516 .cmp = page_callsite_cmp, 1517 }; 1518 1519 static int page_hit_cmp(void *a, void *b) 1520 { 1521 struct page_stat *l = a; 1522 struct page_stat *r = b; 1523 1524 if (l->nr_alloc < r->nr_alloc) 1525 return -1; 1526 else if (l->nr_alloc > r->nr_alloc) 1527 return 1; 1528 return 0; 1529 } 1530 1531 static struct sort_dimension page_hit_sort_dimension = { 1532 .name = "hit", 1533 .cmp = page_hit_cmp, 1534 }; 1535 1536 static int page_bytes_cmp(void *a, void *b) 1537 { 1538 struct page_stat *l = a; 1539 struct page_stat *r = b; 1540 1541 if (l->alloc_bytes < r->alloc_bytes) 1542 return -1; 1543 else if (l->alloc_bytes > r->alloc_bytes) 1544 return 1; 1545 return 0; 1546 } 1547 1548 static struct sort_dimension page_bytes_sort_dimension = { 1549 .name = "bytes", 1550 .cmp = page_bytes_cmp, 1551 }; 1552 1553 static int page_order_cmp(void *a, void *b) 1554 { 1555 struct page_stat *l = a; 1556 struct page_stat *r = b; 1557 1558 if (l->order < r->order) 1559 return -1; 1560 else if (l->order > r->order) 1561 return 1; 1562 return 0; 1563 } 1564 1565 static struct sort_dimension page_order_sort_dimension = { 1566 .name = "order", 1567 .cmp = page_order_cmp, 1568 }; 1569 1570 static int migrate_type_cmp(void *a, void *b) 1571 { 1572 struct page_stat *l = a; 1573 struct page_stat *r = b; 1574 1575 /* for internal use to find free'd page */ 1576 if (l->migrate_type == -1U) 1577 return 0; 1578 1579 if (l->migrate_type < r->migrate_type) 1580 return -1; 1581 else if (l->migrate_type > r->migrate_type) 1582 return 1; 1583 return 0; 1584 } 1585 1586 static struct sort_dimension migrate_type_sort_dimension = { 1587 .name = "migtype", 1588 .cmp = migrate_type_cmp, 1589 }; 1590 1591 static int gfp_flags_cmp(void *a, void *b) 1592 { 1593 struct page_stat *l = a; 1594 struct page_stat *r = b; 1595 1596 /* for internal use to find free'd page */ 1597 if (l->gfp_flags == -1U) 1598 return 0; 1599 1600 if (l->gfp_flags < r->gfp_flags) 1601 return -1; 1602 else if (l->gfp_flags > r->gfp_flags) 1603 return 1; 1604 return 0; 1605 } 1606 1607 static struct sort_dimension gfp_flags_sort_dimension = { 1608 .name = "gfp", 1609 .cmp = gfp_flags_cmp, 1610 }; 1611 1612 static struct sort_dimension *slab_sorts[] = { 1613 &ptr_sort_dimension, 1614 &callsite_sort_dimension, 1615 &hit_sort_dimension, 1616 &bytes_sort_dimension, 1617 &frag_sort_dimension, 1618 &pingpong_sort_dimension, 1619 }; 1620 1621 static struct sort_dimension *page_sorts[] = { 1622 &page_sort_dimension, 1623 &page_callsite_sort_dimension, 1624 &page_hit_sort_dimension, 1625 &page_bytes_sort_dimension, 1626 &page_order_sort_dimension, 1627 &migrate_type_sort_dimension, 1628 &gfp_flags_sort_dimension, 1629 }; 1630 1631 static int slab_sort_dimension__add(const char *tok, struct list_head *list) 1632 { 1633 struct sort_dimension *sort; 1634 int i; 1635 1636 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) { 1637 if (!strcmp(slab_sorts[i]->name, tok)) { 1638 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i])); 1639 if (!sort) { 1640 pr_err("%s: memdup failed\n", __func__); 1641 return -1; 1642 } 1643 list_add_tail(&sort->list, list); 1644 return 0; 1645 } 1646 } 1647 1648 return -1; 1649 } 1650 1651 static int page_sort_dimension__add(const char *tok, struct list_head *list) 1652 { 1653 struct sort_dimension *sort; 1654 int i; 1655 1656 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) { 1657 if (!strcmp(page_sorts[i]->name, tok)) { 1658 sort = memdup(page_sorts[i], sizeof(*page_sorts[i])); 1659 if (!sort) { 1660 pr_err("%s: memdup failed\n", __func__); 1661 return -1; 1662 } 1663 list_add_tail(&sort->list, list); 1664 return 0; 1665 } 1666 } 1667 1668 return -1; 1669 } 1670 1671 static int setup_slab_sorting(struct list_head *sort_list, const char *arg) 1672 { 1673 char *tok; 1674 char *str = strdup(arg); 1675 char *pos = str; 1676 1677 if (!str) { 1678 pr_err("%s: strdup failed\n", __func__); 1679 return -1; 1680 } 1681 1682 while (true) { 1683 tok = strsep(&pos, ","); 1684 if (!tok) 1685 break; 1686 if (slab_sort_dimension__add(tok, sort_list) < 0) { 1687 error("Unknown slab --sort key: '%s'", tok); 1688 free(str); 1689 return -1; 1690 } 1691 } 1692 1693 free(str); 1694 return 0; 1695 } 1696 1697 static int setup_page_sorting(struct list_head *sort_list, const char *arg) 1698 { 1699 char *tok; 1700 char *str = strdup(arg); 1701 char *pos = str; 1702 1703 if (!str) { 1704 pr_err("%s: strdup failed\n", __func__); 1705 return -1; 1706 } 1707 1708 while (true) { 1709 tok = strsep(&pos, ","); 1710 if (!tok) 1711 break; 1712 if (page_sort_dimension__add(tok, sort_list) < 0) { 1713 error("Unknown page --sort key: '%s'", tok); 1714 free(str); 1715 return -1; 1716 } 1717 } 1718 1719 free(str); 1720 return 0; 1721 } 1722 1723 static int parse_sort_opt(const struct option *opt __maybe_unused, 1724 const char *arg, int unset __maybe_unused) 1725 { 1726 if (!arg) 1727 return -1; 1728 1729 if (kmem_page > kmem_slab || 1730 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) { 1731 if (caller_flag > alloc_flag) 1732 return setup_page_sorting(&page_caller_sort, arg); 1733 else 1734 return setup_page_sorting(&page_alloc_sort, arg); 1735 } else { 1736 if (caller_flag > alloc_flag) 1737 return setup_slab_sorting(&slab_caller_sort, arg); 1738 else 1739 return setup_slab_sorting(&slab_alloc_sort, arg); 1740 } 1741 1742 return 0; 1743 } 1744 1745 static int parse_caller_opt(const struct option *opt __maybe_unused, 1746 const char *arg __maybe_unused, 1747 int unset __maybe_unused) 1748 { 1749 caller_flag = (alloc_flag + 1); 1750 return 0; 1751 } 1752 1753 static int parse_alloc_opt(const struct option *opt __maybe_unused, 1754 const char *arg __maybe_unused, 1755 int unset __maybe_unused) 1756 { 1757 alloc_flag = (caller_flag + 1); 1758 return 0; 1759 } 1760 1761 static int parse_slab_opt(const struct option *opt __maybe_unused, 1762 const char *arg __maybe_unused, 1763 int unset __maybe_unused) 1764 { 1765 kmem_slab = (kmem_page + 1); 1766 return 0; 1767 } 1768 1769 static int parse_page_opt(const struct option *opt __maybe_unused, 1770 const char *arg __maybe_unused, 1771 int unset __maybe_unused) 1772 { 1773 kmem_page = (kmem_slab + 1); 1774 return 0; 1775 } 1776 1777 static int parse_line_opt(const struct option *opt __maybe_unused, 1778 const char *arg, int unset __maybe_unused) 1779 { 1780 int lines; 1781 1782 if (!arg) 1783 return -1; 1784 1785 lines = strtoul(arg, NULL, 10); 1786 1787 if (caller_flag > alloc_flag) 1788 caller_lines = lines; 1789 else 1790 alloc_lines = lines; 1791 1792 return 0; 1793 } 1794 1795 static int __cmd_record(int argc, const char **argv) 1796 { 1797 const char * const record_args[] = { 1798 "record", "-a", "-R", "-c", "1", 1799 }; 1800 const char * const slab_events[] = { 1801 "-e", "kmem:kmalloc", 1802 "-e", "kmem:kmalloc_node", 1803 "-e", "kmem:kfree", 1804 "-e", "kmem:kmem_cache_alloc", 1805 "-e", "kmem:kmem_cache_alloc_node", 1806 "-e", "kmem:kmem_cache_free", 1807 }; 1808 const char * const page_events[] = { 1809 "-e", "kmem:mm_page_alloc", 1810 "-e", "kmem:mm_page_free", 1811 }; 1812 unsigned int rec_argc, i, j; 1813 const char **rec_argv; 1814 1815 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1816 if (kmem_slab) 1817 rec_argc += ARRAY_SIZE(slab_events); 1818 if (kmem_page) 1819 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */ 1820 1821 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1822 1823 if (rec_argv == NULL) 1824 return -ENOMEM; 1825 1826 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1827 rec_argv[i] = strdup(record_args[i]); 1828 1829 if (kmem_slab) { 1830 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++) 1831 rec_argv[i] = strdup(slab_events[j]); 1832 } 1833 if (kmem_page) { 1834 rec_argv[i++] = strdup("-g"); 1835 1836 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++) 1837 rec_argv[i] = strdup(page_events[j]); 1838 } 1839 1840 for (j = 1; j < (unsigned int)argc; j++, i++) 1841 rec_argv[i] = argv[j]; 1842 1843 return cmd_record(i, rec_argv, NULL); 1844 } 1845 1846 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused) 1847 { 1848 if (!strcmp(var, "kmem.default")) { 1849 if (!strcmp(value, "slab")) 1850 kmem_default = KMEM_SLAB; 1851 else if (!strcmp(value, "page")) 1852 kmem_default = KMEM_PAGE; 1853 else 1854 pr_err("invalid default value ('slab' or 'page' required): %s\n", 1855 value); 1856 return 0; 1857 } 1858 1859 return 0; 1860 } 1861 1862 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) 1863 { 1864 const char * const default_slab_sort = "frag,hit,bytes"; 1865 const char * const default_page_sort = "bytes,hit"; 1866 struct perf_data_file file = { 1867 .mode = PERF_DATA_MODE_READ, 1868 }; 1869 const struct option kmem_options[] = { 1870 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1871 OPT_INCR('v', "verbose", &verbose, 1872 "be more verbose (show symbol address, etc)"), 1873 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL, 1874 "show per-callsite statistics", parse_caller_opt), 1875 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL, 1876 "show per-allocation statistics", parse_alloc_opt), 1877 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]", 1878 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, " 1879 "page, order, migtype, gfp", parse_sort_opt), 1880 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt), 1881 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"), 1882 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"), 1883 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator", 1884 parse_slab_opt), 1885 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator", 1886 parse_page_opt), 1887 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"), 1888 OPT_END() 1889 }; 1890 const char *const kmem_subcommands[] = { "record", "stat", NULL }; 1891 const char *kmem_usage[] = { 1892 NULL, 1893 NULL 1894 }; 1895 struct perf_session *session; 1896 int ret = -1; 1897 const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n"; 1898 1899 perf_config(kmem_config, NULL); 1900 argc = parse_options_subcommand(argc, argv, kmem_options, 1901 kmem_subcommands, kmem_usage, 0); 1902 1903 if (!argc) 1904 usage_with_options(kmem_usage, kmem_options); 1905 1906 if (kmem_slab == 0 && kmem_page == 0) { 1907 if (kmem_default == KMEM_SLAB) 1908 kmem_slab = 1; 1909 else 1910 kmem_page = 1; 1911 } 1912 1913 if (!strncmp(argv[0], "rec", 3)) { 1914 symbol__init(NULL); 1915 return __cmd_record(argc, argv); 1916 } 1917 1918 file.path = input_name; 1919 1920 kmem_session = session = perf_session__new(&file, false, &perf_kmem); 1921 if (session == NULL) 1922 return -1; 1923 1924 if (kmem_slab) { 1925 if (!perf_evlist__find_tracepoint_by_name(session->evlist, 1926 "kmem:kmalloc")) { 1927 pr_err(errmsg, "slab", "slab"); 1928 goto out_delete; 1929 } 1930 } 1931 1932 if (kmem_page) { 1933 struct perf_evsel *evsel; 1934 1935 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 1936 "kmem:mm_page_alloc"); 1937 if (evsel == NULL) { 1938 pr_err(errmsg, "page", "page"); 1939 goto out_delete; 1940 } 1941 1942 kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent); 1943 symbol_conf.use_callchain = true; 1944 } 1945 1946 symbol__init(&session->header.env); 1947 1948 if (!strcmp(argv[0], "stat")) { 1949 setlocale(LC_ALL, ""); 1950 1951 if (cpu__setup_cpunode_map()) 1952 goto out_delete; 1953 1954 if (list_empty(&slab_caller_sort)) 1955 setup_slab_sorting(&slab_caller_sort, default_slab_sort); 1956 if (list_empty(&slab_alloc_sort)) 1957 setup_slab_sorting(&slab_alloc_sort, default_slab_sort); 1958 if (list_empty(&page_caller_sort)) 1959 setup_page_sorting(&page_caller_sort, default_page_sort); 1960 if (list_empty(&page_alloc_sort)) 1961 setup_page_sorting(&page_alloc_sort, default_page_sort); 1962 1963 if (kmem_page) { 1964 setup_page_sorting(&page_alloc_sort_input, 1965 "page,order,migtype,gfp"); 1966 setup_page_sorting(&page_caller_sort_input, 1967 "callsite,order,migtype,gfp"); 1968 } 1969 ret = __cmd_kmem(session); 1970 } else 1971 usage_with_options(kmem_usage, kmem_options); 1972 1973 out_delete: 1974 perf_session__delete(session); 1975 1976 return ret; 1977 } 1978 1979