xref: /openbmc/linux/tools/perf/builtin-kmem.c (revision 09b35b41)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 
5 #include "util/dso.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/config.h"
9 #include "util/map.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/callchain.h"
16 #include "util/time-utils.h"
17 
18 #include <subcmd/pager.h>
19 #include <subcmd/parse-options.h>
20 #include "util/trace-event.h"
21 #include "util/data.h"
22 #include "util/cpumap.h"
23 
24 #include "util/debug.h"
25 #include "util/string2.h"
26 
27 #include <linux/kernel.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/zalloc.h>
31 #include <errno.h>
32 #include <inttypes.h>
33 #include <locale.h>
34 #include <regex.h>
35 
36 #include <linux/ctype.h>
37 
38 static int	kmem_slab;
39 static int	kmem_page;
40 
41 static long	kmem_page_size;
42 static enum {
43 	KMEM_SLAB,
44 	KMEM_PAGE,
45 } kmem_default = KMEM_SLAB;  /* for backward compatibility */
46 
47 struct alloc_stat;
48 typedef int (*sort_fn_t)(void *, void *);
49 
50 static int			alloc_flag;
51 static int			caller_flag;
52 
53 static int			alloc_lines = -1;
54 static int			caller_lines = -1;
55 
56 static bool			raw_ip;
57 
58 struct alloc_stat {
59 	u64	call_site;
60 	u64	ptr;
61 	u64	bytes_req;
62 	u64	bytes_alloc;
63 	u64	last_alloc;
64 	u32	hit;
65 	u32	pingpong;
66 
67 	short	alloc_cpu;
68 
69 	struct rb_node node;
70 };
71 
72 static struct rb_root root_alloc_stat;
73 static struct rb_root root_alloc_sorted;
74 static struct rb_root root_caller_stat;
75 static struct rb_root root_caller_sorted;
76 
77 static unsigned long total_requested, total_allocated, total_freed;
78 static unsigned long nr_allocs, nr_cross_allocs;
79 
80 /* filters for controlling start and stop of time of analysis */
81 static struct perf_time_interval ptime;
82 const char *time_str;
83 
84 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
85 			     int bytes_req, int bytes_alloc, int cpu)
86 {
87 	struct rb_node **node = &root_alloc_stat.rb_node;
88 	struct rb_node *parent = NULL;
89 	struct alloc_stat *data = NULL;
90 
91 	while (*node) {
92 		parent = *node;
93 		data = rb_entry(*node, struct alloc_stat, node);
94 
95 		if (ptr > data->ptr)
96 			node = &(*node)->rb_right;
97 		else if (ptr < data->ptr)
98 			node = &(*node)->rb_left;
99 		else
100 			break;
101 	}
102 
103 	if (data && data->ptr == ptr) {
104 		data->hit++;
105 		data->bytes_req += bytes_req;
106 		data->bytes_alloc += bytes_alloc;
107 	} else {
108 		data = malloc(sizeof(*data));
109 		if (!data) {
110 			pr_err("%s: malloc failed\n", __func__);
111 			return -1;
112 		}
113 		data->ptr = ptr;
114 		data->pingpong = 0;
115 		data->hit = 1;
116 		data->bytes_req = bytes_req;
117 		data->bytes_alloc = bytes_alloc;
118 
119 		rb_link_node(&data->node, parent, node);
120 		rb_insert_color(&data->node, &root_alloc_stat);
121 	}
122 	data->call_site = call_site;
123 	data->alloc_cpu = cpu;
124 	data->last_alloc = bytes_alloc;
125 
126 	return 0;
127 }
128 
129 static int insert_caller_stat(unsigned long call_site,
130 			      int bytes_req, int bytes_alloc)
131 {
132 	struct rb_node **node = &root_caller_stat.rb_node;
133 	struct rb_node *parent = NULL;
134 	struct alloc_stat *data = NULL;
135 
136 	while (*node) {
137 		parent = *node;
138 		data = rb_entry(*node, struct alloc_stat, node);
139 
140 		if (call_site > data->call_site)
141 			node = &(*node)->rb_right;
142 		else if (call_site < data->call_site)
143 			node = &(*node)->rb_left;
144 		else
145 			break;
146 	}
147 
148 	if (data && data->call_site == call_site) {
149 		data->hit++;
150 		data->bytes_req += bytes_req;
151 		data->bytes_alloc += bytes_alloc;
152 	} else {
153 		data = malloc(sizeof(*data));
154 		if (!data) {
155 			pr_err("%s: malloc failed\n", __func__);
156 			return -1;
157 		}
158 		data->call_site = call_site;
159 		data->pingpong = 0;
160 		data->hit = 1;
161 		data->bytes_req = bytes_req;
162 		data->bytes_alloc = bytes_alloc;
163 
164 		rb_link_node(&data->node, parent, node);
165 		rb_insert_color(&data->node, &root_caller_stat);
166 	}
167 
168 	return 0;
169 }
170 
171 static int perf_evsel__process_alloc_event(struct evsel *evsel,
172 					   struct perf_sample *sample)
173 {
174 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
175 		      call_site = perf_evsel__intval(evsel, sample, "call_site");
176 	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
177 	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
178 
179 	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
180 	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
181 		return -1;
182 
183 	total_requested += bytes_req;
184 	total_allocated += bytes_alloc;
185 
186 	nr_allocs++;
187 	return 0;
188 }
189 
190 static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
191 						struct perf_sample *sample)
192 {
193 	int ret = perf_evsel__process_alloc_event(evsel, sample);
194 
195 	if (!ret) {
196 		int node1 = cpu__get_node(sample->cpu),
197 		    node2 = perf_evsel__intval(evsel, sample, "node");
198 
199 		if (node1 != node2)
200 			nr_cross_allocs++;
201 	}
202 
203 	return ret;
204 }
205 
206 static int ptr_cmp(void *, void *);
207 static int slab_callsite_cmp(void *, void *);
208 
209 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
210 					    unsigned long call_site,
211 					    struct rb_root *root,
212 					    sort_fn_t sort_fn)
213 {
214 	struct rb_node *node = root->rb_node;
215 	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
216 
217 	while (node) {
218 		struct alloc_stat *data;
219 		int cmp;
220 
221 		data = rb_entry(node, struct alloc_stat, node);
222 
223 		cmp = sort_fn(&key, data);
224 		if (cmp < 0)
225 			node = node->rb_left;
226 		else if (cmp > 0)
227 			node = node->rb_right;
228 		else
229 			return data;
230 	}
231 	return NULL;
232 }
233 
234 static int perf_evsel__process_free_event(struct evsel *evsel,
235 					  struct perf_sample *sample)
236 {
237 	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
238 	struct alloc_stat *s_alloc, *s_caller;
239 
240 	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
241 	if (!s_alloc)
242 		return 0;
243 
244 	total_freed += s_alloc->last_alloc;
245 
246 	if ((short)sample->cpu != s_alloc->alloc_cpu) {
247 		s_alloc->pingpong++;
248 
249 		s_caller = search_alloc_stat(0, s_alloc->call_site,
250 					     &root_caller_stat,
251 					     slab_callsite_cmp);
252 		if (!s_caller)
253 			return -1;
254 		s_caller->pingpong++;
255 	}
256 	s_alloc->alloc_cpu = -1;
257 
258 	return 0;
259 }
260 
261 static u64 total_page_alloc_bytes;
262 static u64 total_page_free_bytes;
263 static u64 total_page_nomatch_bytes;
264 static u64 total_page_fail_bytes;
265 static unsigned long nr_page_allocs;
266 static unsigned long nr_page_frees;
267 static unsigned long nr_page_fails;
268 static unsigned long nr_page_nomatch;
269 
270 static bool use_pfn;
271 static bool live_page;
272 static struct perf_session *kmem_session;
273 
274 #define MAX_MIGRATE_TYPES  6
275 #define MAX_PAGE_ORDER     11
276 
277 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
278 
279 struct page_stat {
280 	struct rb_node 	node;
281 	u64 		page;
282 	u64 		callsite;
283 	int 		order;
284 	unsigned 	gfp_flags;
285 	unsigned 	migrate_type;
286 	u64		alloc_bytes;
287 	u64 		free_bytes;
288 	int 		nr_alloc;
289 	int 		nr_free;
290 };
291 
292 static struct rb_root page_live_tree;
293 static struct rb_root page_alloc_tree;
294 static struct rb_root page_alloc_sorted;
295 static struct rb_root page_caller_tree;
296 static struct rb_root page_caller_sorted;
297 
298 struct alloc_func {
299 	u64 start;
300 	u64 end;
301 	char *name;
302 };
303 
304 static int nr_alloc_funcs;
305 static struct alloc_func *alloc_func_list;
306 
307 static int funcmp(const void *a, const void *b)
308 {
309 	const struct alloc_func *fa = a;
310 	const struct alloc_func *fb = b;
311 
312 	if (fa->start > fb->start)
313 		return 1;
314 	else
315 		return -1;
316 }
317 
318 static int callcmp(const void *a, const void *b)
319 {
320 	const struct alloc_func *fa = a;
321 	const struct alloc_func *fb = b;
322 
323 	if (fb->start <= fa->start && fa->end < fb->end)
324 		return 0;
325 
326 	if (fa->start > fb->start)
327 		return 1;
328 	else
329 		return -1;
330 }
331 
332 static int build_alloc_func_list(void)
333 {
334 	int ret;
335 	struct map *kernel_map;
336 	struct symbol *sym;
337 	struct rb_node *node;
338 	struct alloc_func *func;
339 	struct machine *machine = &kmem_session->machines.host;
340 	regex_t alloc_func_regex;
341 	static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
342 
343 	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
344 	if (ret) {
345 		char err[BUFSIZ];
346 
347 		regerror(ret, &alloc_func_regex, err, sizeof(err));
348 		pr_err("Invalid regex: %s\n%s", pattern, err);
349 		return -EINVAL;
350 	}
351 
352 	kernel_map = machine__kernel_map(machine);
353 	if (map__load(kernel_map) < 0) {
354 		pr_err("cannot load kernel map\n");
355 		return -ENOENT;
356 	}
357 
358 	map__for_each_symbol(kernel_map, sym, node) {
359 		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
360 			continue;
361 
362 		func = realloc(alloc_func_list,
363 			       (nr_alloc_funcs + 1) * sizeof(*func));
364 		if (func == NULL)
365 			return -ENOMEM;
366 
367 		pr_debug("alloc func: %s\n", sym->name);
368 		func[nr_alloc_funcs].start = sym->start;
369 		func[nr_alloc_funcs].end   = sym->end;
370 		func[nr_alloc_funcs].name  = sym->name;
371 
372 		alloc_func_list = func;
373 		nr_alloc_funcs++;
374 	}
375 
376 	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
377 
378 	regfree(&alloc_func_regex);
379 	return 0;
380 }
381 
382 /*
383  * Find first non-memory allocation function from callchain.
384  * The allocation functions are in the 'alloc_func_list'.
385  */
386 static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
387 {
388 	struct addr_location al;
389 	struct machine *machine = &kmem_session->machines.host;
390 	struct callchain_cursor_node *node;
391 
392 	if (alloc_func_list == NULL) {
393 		if (build_alloc_func_list() < 0)
394 			goto out;
395 	}
396 
397 	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
398 	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
399 
400 	callchain_cursor_commit(&callchain_cursor);
401 	while (true) {
402 		struct alloc_func key, *caller;
403 		u64 addr;
404 
405 		node = callchain_cursor_current(&callchain_cursor);
406 		if (node == NULL)
407 			break;
408 
409 		key.start = key.end = node->ip;
410 		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
411 				 sizeof(key), callcmp);
412 		if (!caller) {
413 			/* found */
414 			if (node->map)
415 				addr = map__unmap_ip(node->map, node->ip);
416 			else
417 				addr = node->ip;
418 
419 			return addr;
420 		} else
421 			pr_debug3("skipping alloc function: %s\n", caller->name);
422 
423 		callchain_cursor_advance(&callchain_cursor);
424 	}
425 
426 out:
427 	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
428 	return sample->ip;
429 }
430 
431 struct sort_dimension {
432 	const char		name[20];
433 	sort_fn_t		cmp;
434 	struct list_head	list;
435 };
436 
437 static LIST_HEAD(page_alloc_sort_input);
438 static LIST_HEAD(page_caller_sort_input);
439 
440 static struct page_stat *
441 __page_stat__findnew_page(struct page_stat *pstat, bool create)
442 {
443 	struct rb_node **node = &page_live_tree.rb_node;
444 	struct rb_node *parent = NULL;
445 	struct page_stat *data;
446 
447 	while (*node) {
448 		s64 cmp;
449 
450 		parent = *node;
451 		data = rb_entry(*node, struct page_stat, node);
452 
453 		cmp = data->page - pstat->page;
454 		if (cmp < 0)
455 			node = &parent->rb_left;
456 		else if (cmp > 0)
457 			node = &parent->rb_right;
458 		else
459 			return data;
460 	}
461 
462 	if (!create)
463 		return NULL;
464 
465 	data = zalloc(sizeof(*data));
466 	if (data != NULL) {
467 		data->page = pstat->page;
468 		data->order = pstat->order;
469 		data->gfp_flags = pstat->gfp_flags;
470 		data->migrate_type = pstat->migrate_type;
471 
472 		rb_link_node(&data->node, parent, node);
473 		rb_insert_color(&data->node, &page_live_tree);
474 	}
475 
476 	return data;
477 }
478 
479 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
480 {
481 	return __page_stat__findnew_page(pstat, false);
482 }
483 
484 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
485 {
486 	return __page_stat__findnew_page(pstat, true);
487 }
488 
489 static struct page_stat *
490 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
491 {
492 	struct rb_node **node = &page_alloc_tree.rb_node;
493 	struct rb_node *parent = NULL;
494 	struct page_stat *data;
495 	struct sort_dimension *sort;
496 
497 	while (*node) {
498 		int cmp = 0;
499 
500 		parent = *node;
501 		data = rb_entry(*node, struct page_stat, node);
502 
503 		list_for_each_entry(sort, &page_alloc_sort_input, list) {
504 			cmp = sort->cmp(pstat, data);
505 			if (cmp)
506 				break;
507 		}
508 
509 		if (cmp < 0)
510 			node = &parent->rb_left;
511 		else if (cmp > 0)
512 			node = &parent->rb_right;
513 		else
514 			return data;
515 	}
516 
517 	if (!create)
518 		return NULL;
519 
520 	data = zalloc(sizeof(*data));
521 	if (data != NULL) {
522 		data->page = pstat->page;
523 		data->order = pstat->order;
524 		data->gfp_flags = pstat->gfp_flags;
525 		data->migrate_type = pstat->migrate_type;
526 
527 		rb_link_node(&data->node, parent, node);
528 		rb_insert_color(&data->node, &page_alloc_tree);
529 	}
530 
531 	return data;
532 }
533 
534 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
535 {
536 	return __page_stat__findnew_alloc(pstat, false);
537 }
538 
539 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
540 {
541 	return __page_stat__findnew_alloc(pstat, true);
542 }
543 
544 static struct page_stat *
545 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
546 {
547 	struct rb_node **node = &page_caller_tree.rb_node;
548 	struct rb_node *parent = NULL;
549 	struct page_stat *data;
550 	struct sort_dimension *sort;
551 
552 	while (*node) {
553 		int cmp = 0;
554 
555 		parent = *node;
556 		data = rb_entry(*node, struct page_stat, node);
557 
558 		list_for_each_entry(sort, &page_caller_sort_input, list) {
559 			cmp = sort->cmp(pstat, data);
560 			if (cmp)
561 				break;
562 		}
563 
564 		if (cmp < 0)
565 			node = &parent->rb_left;
566 		else if (cmp > 0)
567 			node = &parent->rb_right;
568 		else
569 			return data;
570 	}
571 
572 	if (!create)
573 		return NULL;
574 
575 	data = zalloc(sizeof(*data));
576 	if (data != NULL) {
577 		data->callsite = pstat->callsite;
578 		data->order = pstat->order;
579 		data->gfp_flags = pstat->gfp_flags;
580 		data->migrate_type = pstat->migrate_type;
581 
582 		rb_link_node(&data->node, parent, node);
583 		rb_insert_color(&data->node, &page_caller_tree);
584 	}
585 
586 	return data;
587 }
588 
589 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
590 {
591 	return __page_stat__findnew_caller(pstat, false);
592 }
593 
594 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
595 {
596 	return __page_stat__findnew_caller(pstat, true);
597 }
598 
599 static bool valid_page(u64 pfn_or_page)
600 {
601 	if (use_pfn && pfn_or_page == -1UL)
602 		return false;
603 	if (!use_pfn && pfn_or_page == 0)
604 		return false;
605 	return true;
606 }
607 
608 struct gfp_flag {
609 	unsigned int flags;
610 	char *compact_str;
611 	char *human_readable;
612 };
613 
614 static struct gfp_flag *gfps;
615 static int nr_gfps;
616 
617 static int gfpcmp(const void *a, const void *b)
618 {
619 	const struct gfp_flag *fa = a;
620 	const struct gfp_flag *fb = b;
621 
622 	return fa->flags - fb->flags;
623 }
624 
625 /* see include/trace/events/mmflags.h */
626 static const struct {
627 	const char *original;
628 	const char *compact;
629 } gfp_compact_table[] = {
630 	{ "GFP_TRANSHUGE",		"THP" },
631 	{ "GFP_TRANSHUGE_LIGHT",	"THL" },
632 	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
633 	{ "GFP_HIGHUSER",		"HU" },
634 	{ "GFP_USER",			"U" },
635 	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
636 	{ "GFP_KERNEL",			"K" },
637 	{ "GFP_NOFS",			"NF" },
638 	{ "GFP_ATOMIC",			"A" },
639 	{ "GFP_NOIO",			"NI" },
640 	{ "GFP_NOWAIT",			"NW" },
641 	{ "GFP_DMA",			"D" },
642 	{ "__GFP_HIGHMEM",		"HM" },
643 	{ "GFP_DMA32",			"D32" },
644 	{ "__GFP_HIGH",			"H" },
645 	{ "__GFP_ATOMIC",		"_A" },
646 	{ "__GFP_IO",			"I" },
647 	{ "__GFP_FS",			"F" },
648 	{ "__GFP_NOWARN",		"NWR" },
649 	{ "__GFP_RETRY_MAYFAIL",	"R" },
650 	{ "__GFP_NOFAIL",		"NF" },
651 	{ "__GFP_NORETRY",		"NR" },
652 	{ "__GFP_COMP",			"C" },
653 	{ "__GFP_ZERO",			"Z" },
654 	{ "__GFP_NOMEMALLOC",		"NMA" },
655 	{ "__GFP_MEMALLOC",		"MA" },
656 	{ "__GFP_HARDWALL",		"HW" },
657 	{ "__GFP_THISNODE",		"TN" },
658 	{ "__GFP_RECLAIMABLE",		"RC" },
659 	{ "__GFP_MOVABLE",		"M" },
660 	{ "__GFP_ACCOUNT",		"AC" },
661 	{ "__GFP_WRITE",		"WR" },
662 	{ "__GFP_RECLAIM",		"R" },
663 	{ "__GFP_DIRECT_RECLAIM",	"DR" },
664 	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
665 };
666 
667 static size_t max_gfp_len;
668 
669 static char *compact_gfp_flags(char *gfp_flags)
670 {
671 	char *orig_flags = strdup(gfp_flags);
672 	char *new_flags = NULL;
673 	char *str, *pos = NULL;
674 	size_t len = 0;
675 
676 	if (orig_flags == NULL)
677 		return NULL;
678 
679 	str = strtok_r(orig_flags, "|", &pos);
680 	while (str) {
681 		size_t i;
682 		char *new;
683 		const char *cpt;
684 
685 		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
686 			if (strcmp(gfp_compact_table[i].original, str))
687 				continue;
688 
689 			cpt = gfp_compact_table[i].compact;
690 			new = realloc(new_flags, len + strlen(cpt) + 2);
691 			if (new == NULL) {
692 				free(new_flags);
693 				return NULL;
694 			}
695 
696 			new_flags = new;
697 
698 			if (!len) {
699 				strcpy(new_flags, cpt);
700 			} else {
701 				strcat(new_flags, "|");
702 				strcat(new_flags, cpt);
703 				len++;
704 			}
705 
706 			len += strlen(cpt);
707 		}
708 
709 		str = strtok_r(NULL, "|", &pos);
710 	}
711 
712 	if (max_gfp_len < len)
713 		max_gfp_len = len;
714 
715 	free(orig_flags);
716 	return new_flags;
717 }
718 
719 static char *compact_gfp_string(unsigned long gfp_flags)
720 {
721 	struct gfp_flag key = {
722 		.flags = gfp_flags,
723 	};
724 	struct gfp_flag *gfp;
725 
726 	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
727 	if (gfp)
728 		return gfp->compact_str;
729 
730 	return NULL;
731 }
732 
733 static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
734 			   unsigned int gfp_flags)
735 {
736 	struct tep_record record = {
737 		.cpu = sample->cpu,
738 		.data = sample->raw_data,
739 		.size = sample->raw_size,
740 	};
741 	struct trace_seq seq;
742 	char *str, *pos = NULL;
743 
744 	if (nr_gfps) {
745 		struct gfp_flag key = {
746 			.flags = gfp_flags,
747 		};
748 
749 		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
750 			return 0;
751 	}
752 
753 	trace_seq_init(&seq);
754 	tep_print_event(evsel->tp_format->tep,
755 			&seq, &record, "%s", TEP_PRINT_INFO);
756 
757 	str = strtok_r(seq.buffer, " ", &pos);
758 	while (str) {
759 		if (!strncmp(str, "gfp_flags=", 10)) {
760 			struct gfp_flag *new;
761 
762 			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
763 			if (new == NULL)
764 				return -ENOMEM;
765 
766 			gfps = new;
767 			new += nr_gfps++;
768 
769 			new->flags = gfp_flags;
770 			new->human_readable = strdup(str + 10);
771 			new->compact_str = compact_gfp_flags(str + 10);
772 			if (!new->human_readable || !new->compact_str)
773 				return -ENOMEM;
774 
775 			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
776 		}
777 
778 		str = strtok_r(NULL, " ", &pos);
779 	}
780 
781 	trace_seq_destroy(&seq);
782 	return 0;
783 }
784 
785 static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
786 						struct perf_sample *sample)
787 {
788 	u64 page;
789 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
790 	unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
791 	unsigned int migrate_type = perf_evsel__intval(evsel, sample,
792 						       "migratetype");
793 	u64 bytes = kmem_page_size << order;
794 	u64 callsite;
795 	struct page_stat *pstat;
796 	struct page_stat this = {
797 		.order = order,
798 		.gfp_flags = gfp_flags,
799 		.migrate_type = migrate_type,
800 	};
801 
802 	if (use_pfn)
803 		page = perf_evsel__intval(evsel, sample, "pfn");
804 	else
805 		page = perf_evsel__intval(evsel, sample, "page");
806 
807 	nr_page_allocs++;
808 	total_page_alloc_bytes += bytes;
809 
810 	if (!valid_page(page)) {
811 		nr_page_fails++;
812 		total_page_fail_bytes += bytes;
813 
814 		return 0;
815 	}
816 
817 	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
818 		return -1;
819 
820 	callsite = find_callsite(evsel, sample);
821 
822 	/*
823 	 * This is to find the current page (with correct gfp flags and
824 	 * migrate type) at free event.
825 	 */
826 	this.page = page;
827 	pstat = page_stat__findnew_page(&this);
828 	if (pstat == NULL)
829 		return -ENOMEM;
830 
831 	pstat->nr_alloc++;
832 	pstat->alloc_bytes += bytes;
833 	pstat->callsite = callsite;
834 
835 	if (!live_page) {
836 		pstat = page_stat__findnew_alloc(&this);
837 		if (pstat == NULL)
838 			return -ENOMEM;
839 
840 		pstat->nr_alloc++;
841 		pstat->alloc_bytes += bytes;
842 		pstat->callsite = callsite;
843 	}
844 
845 	this.callsite = callsite;
846 	pstat = page_stat__findnew_caller(&this);
847 	if (pstat == NULL)
848 		return -ENOMEM;
849 
850 	pstat->nr_alloc++;
851 	pstat->alloc_bytes += bytes;
852 
853 	order_stats[order][migrate_type]++;
854 
855 	return 0;
856 }
857 
858 static int perf_evsel__process_page_free_event(struct evsel *evsel,
859 						struct perf_sample *sample)
860 {
861 	u64 page;
862 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
863 	u64 bytes = kmem_page_size << order;
864 	struct page_stat *pstat;
865 	struct page_stat this = {
866 		.order = order,
867 	};
868 
869 	if (use_pfn)
870 		page = perf_evsel__intval(evsel, sample, "pfn");
871 	else
872 		page = perf_evsel__intval(evsel, sample, "page");
873 
874 	nr_page_frees++;
875 	total_page_free_bytes += bytes;
876 
877 	this.page = page;
878 	pstat = page_stat__find_page(&this);
879 	if (pstat == NULL) {
880 		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
881 			  page, order);
882 
883 		nr_page_nomatch++;
884 		total_page_nomatch_bytes += bytes;
885 
886 		return 0;
887 	}
888 
889 	this.gfp_flags = pstat->gfp_flags;
890 	this.migrate_type = pstat->migrate_type;
891 	this.callsite = pstat->callsite;
892 
893 	rb_erase(&pstat->node, &page_live_tree);
894 	free(pstat);
895 
896 	if (live_page) {
897 		order_stats[this.order][this.migrate_type]--;
898 	} else {
899 		pstat = page_stat__find_alloc(&this);
900 		if (pstat == NULL)
901 			return -ENOMEM;
902 
903 		pstat->nr_free++;
904 		pstat->free_bytes += bytes;
905 	}
906 
907 	pstat = page_stat__find_caller(&this);
908 	if (pstat == NULL)
909 		return -ENOENT;
910 
911 	pstat->nr_free++;
912 	pstat->free_bytes += bytes;
913 
914 	if (live_page) {
915 		pstat->nr_alloc--;
916 		pstat->alloc_bytes -= bytes;
917 
918 		if (pstat->nr_alloc == 0) {
919 			rb_erase(&pstat->node, &page_caller_tree);
920 			free(pstat);
921 		}
922 	}
923 
924 	return 0;
925 }
926 
927 static bool perf_kmem__skip_sample(struct perf_sample *sample)
928 {
929 	/* skip sample based on time? */
930 	if (perf_time__skip_sample(&ptime, sample->time))
931 		return true;
932 
933 	return false;
934 }
935 
936 typedef int (*tracepoint_handler)(struct evsel *evsel,
937 				  struct perf_sample *sample);
938 
939 static int process_sample_event(struct perf_tool *tool __maybe_unused,
940 				union perf_event *event,
941 				struct perf_sample *sample,
942 				struct evsel *evsel,
943 				struct machine *machine)
944 {
945 	int err = 0;
946 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
947 							sample->tid);
948 
949 	if (thread == NULL) {
950 		pr_debug("problem processing %d event, skipping it.\n",
951 			 event->header.type);
952 		return -1;
953 	}
954 
955 	if (perf_kmem__skip_sample(sample))
956 		return 0;
957 
958 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
959 
960 	if (evsel->handler != NULL) {
961 		tracepoint_handler f = evsel->handler;
962 		err = f(evsel, sample);
963 	}
964 
965 	thread__put(thread);
966 
967 	return err;
968 }
969 
970 static struct perf_tool perf_kmem = {
971 	.sample		 = process_sample_event,
972 	.comm		 = perf_event__process_comm,
973 	.mmap		 = perf_event__process_mmap,
974 	.mmap2		 = perf_event__process_mmap2,
975 	.namespaces	 = perf_event__process_namespaces,
976 	.ordered_events	 = true,
977 };
978 
979 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
980 {
981 	if (n_alloc == 0)
982 		return 0.0;
983 	else
984 		return 100.0 - (100.0 * n_req / n_alloc);
985 }
986 
987 static void __print_slab_result(struct rb_root *root,
988 				struct perf_session *session,
989 				int n_lines, int is_caller)
990 {
991 	struct rb_node *next;
992 	struct machine *machine = &session->machines.host;
993 
994 	printf("%.105s\n", graph_dotted_line);
995 	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
996 	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
997 	printf("%.105s\n", graph_dotted_line);
998 
999 	next = rb_first(root);
1000 
1001 	while (next && n_lines--) {
1002 		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1003 						   node);
1004 		struct symbol *sym = NULL;
1005 		struct map *map;
1006 		char buf[BUFSIZ];
1007 		u64 addr;
1008 
1009 		if (is_caller) {
1010 			addr = data->call_site;
1011 			if (!raw_ip)
1012 				sym = machine__find_kernel_symbol(machine, addr, &map);
1013 		} else
1014 			addr = data->ptr;
1015 
1016 		if (sym != NULL)
1017 			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1018 				 addr - map->unmap_ip(map, sym->start));
1019 		else
1020 			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1021 		printf(" %-34s |", buf);
1022 
1023 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1024 		       (unsigned long long)data->bytes_alloc,
1025 		       (unsigned long)data->bytes_alloc / data->hit,
1026 		       (unsigned long long)data->bytes_req,
1027 		       (unsigned long)data->bytes_req / data->hit,
1028 		       (unsigned long)data->hit,
1029 		       (unsigned long)data->pingpong,
1030 		       fragmentation(data->bytes_req, data->bytes_alloc));
1031 
1032 		next = rb_next(next);
1033 	}
1034 
1035 	if (n_lines == -1)
1036 		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1037 
1038 	printf("%.105s\n", graph_dotted_line);
1039 }
1040 
1041 static const char * const migrate_type_str[] = {
1042 	"UNMOVABL",
1043 	"RECLAIM",
1044 	"MOVABLE",
1045 	"RESERVED",
1046 	"CMA/ISLT",
1047 	"UNKNOWN",
1048 };
1049 
1050 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1051 {
1052 	struct rb_node *next = rb_first(&page_alloc_sorted);
1053 	struct machine *machine = &session->machines.host;
1054 	const char *format;
1055 	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1056 
1057 	printf("\n%.105s\n", graph_dotted_line);
1058 	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1059 	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1060 	       gfp_len, "GFP flags");
1061 	printf("%.105s\n", graph_dotted_line);
1062 
1063 	if (use_pfn)
1064 		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1065 	else
1066 		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1067 
1068 	while (next && n_lines--) {
1069 		struct page_stat *data;
1070 		struct symbol *sym;
1071 		struct map *map;
1072 		char buf[32];
1073 		char *caller = buf;
1074 
1075 		data = rb_entry(next, struct page_stat, node);
1076 		sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1077 		if (sym)
1078 			caller = sym->name;
1079 		else
1080 			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1081 
1082 		printf(format, (unsigned long long)data->page,
1083 		       (unsigned long long)data->alloc_bytes / 1024,
1084 		       data->nr_alloc, data->order,
1085 		       migrate_type_str[data->migrate_type],
1086 		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1087 
1088 		next = rb_next(next);
1089 	}
1090 
1091 	if (n_lines == -1) {
1092 		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1093 		       gfp_len, "...");
1094 	}
1095 
1096 	printf("%.105s\n", graph_dotted_line);
1097 }
1098 
1099 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1100 {
1101 	struct rb_node *next = rb_first(&page_caller_sorted);
1102 	struct machine *machine = &session->machines.host;
1103 	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1104 
1105 	printf("\n%.105s\n", graph_dotted_line);
1106 	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1107 	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1108 	printf("%.105s\n", graph_dotted_line);
1109 
1110 	while (next && n_lines--) {
1111 		struct page_stat *data;
1112 		struct symbol *sym;
1113 		struct map *map;
1114 		char buf[32];
1115 		char *caller = buf;
1116 
1117 		data = rb_entry(next, struct page_stat, node);
1118 		sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1119 		if (sym)
1120 			caller = sym->name;
1121 		else
1122 			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1123 
1124 		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1125 		       (unsigned long long)data->alloc_bytes / 1024,
1126 		       data->nr_alloc, data->order,
1127 		       migrate_type_str[data->migrate_type],
1128 		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1129 
1130 		next = rb_next(next);
1131 	}
1132 
1133 	if (n_lines == -1) {
1134 		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1135 		       gfp_len, "...");
1136 	}
1137 
1138 	printf("%.105s\n", graph_dotted_line);
1139 }
1140 
1141 static void print_gfp_flags(void)
1142 {
1143 	int i;
1144 
1145 	printf("#\n");
1146 	printf("# GFP flags\n");
1147 	printf("# ---------\n");
1148 	for (i = 0; i < nr_gfps; i++) {
1149 		printf("# %08x: %*s: %s\n", gfps[i].flags,
1150 		       (int) max_gfp_len, gfps[i].compact_str,
1151 		       gfps[i].human_readable);
1152 	}
1153 }
1154 
1155 static void print_slab_summary(void)
1156 {
1157 	printf("\nSUMMARY (SLAB allocator)");
1158 	printf("\n========================\n");
1159 	printf("Total bytes requested: %'lu\n", total_requested);
1160 	printf("Total bytes allocated: %'lu\n", total_allocated);
1161 	printf("Total bytes freed:     %'lu\n", total_freed);
1162 	if (total_allocated > total_freed) {
1163 		printf("Net total bytes allocated: %'lu\n",
1164 		total_allocated - total_freed);
1165 	}
1166 	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1167 	       total_allocated - total_requested);
1168 	printf("Internal fragmentation: %f%%\n",
1169 	       fragmentation(total_requested, total_allocated));
1170 	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1171 }
1172 
1173 static void print_page_summary(void)
1174 {
1175 	int o, m;
1176 	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1177 	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1178 
1179 	printf("\nSUMMARY (page allocator)");
1180 	printf("\n========================\n");
1181 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1182 	       nr_page_allocs, total_page_alloc_bytes / 1024);
1183 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1184 	       nr_page_frees, total_page_free_bytes / 1024);
1185 	printf("\n");
1186 
1187 	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1188 	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1189 	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1190 	       nr_page_allocs - nr_alloc_freed,
1191 	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1192 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1193 	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1194 	printf("\n");
1195 
1196 	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1197 	       nr_page_fails, total_page_fail_bytes / 1024);
1198 	printf("\n");
1199 
1200 	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1201 	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1202 	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1203 	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1204 	       graph_dotted_line, graph_dotted_line);
1205 
1206 	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1207 		printf("%5d", o);
1208 		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1209 			if (order_stats[o][m])
1210 				printf("  %'12d", order_stats[o][m]);
1211 			else
1212 				printf("  %12c", '.');
1213 		}
1214 		printf("\n");
1215 	}
1216 }
1217 
1218 static void print_slab_result(struct perf_session *session)
1219 {
1220 	if (caller_flag)
1221 		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1222 	if (alloc_flag)
1223 		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1224 	print_slab_summary();
1225 }
1226 
1227 static void print_page_result(struct perf_session *session)
1228 {
1229 	if (caller_flag || alloc_flag)
1230 		print_gfp_flags();
1231 	if (caller_flag)
1232 		__print_page_caller_result(session, caller_lines);
1233 	if (alloc_flag)
1234 		__print_page_alloc_result(session, alloc_lines);
1235 	print_page_summary();
1236 }
1237 
1238 static void print_result(struct perf_session *session)
1239 {
1240 	if (kmem_slab)
1241 		print_slab_result(session);
1242 	if (kmem_page)
1243 		print_page_result(session);
1244 }
1245 
1246 static LIST_HEAD(slab_caller_sort);
1247 static LIST_HEAD(slab_alloc_sort);
1248 static LIST_HEAD(page_caller_sort);
1249 static LIST_HEAD(page_alloc_sort);
1250 
1251 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1252 			     struct list_head *sort_list)
1253 {
1254 	struct rb_node **new = &(root->rb_node);
1255 	struct rb_node *parent = NULL;
1256 	struct sort_dimension *sort;
1257 
1258 	while (*new) {
1259 		struct alloc_stat *this;
1260 		int cmp = 0;
1261 
1262 		this = rb_entry(*new, struct alloc_stat, node);
1263 		parent = *new;
1264 
1265 		list_for_each_entry(sort, sort_list, list) {
1266 			cmp = sort->cmp(data, this);
1267 			if (cmp)
1268 				break;
1269 		}
1270 
1271 		if (cmp > 0)
1272 			new = &((*new)->rb_left);
1273 		else
1274 			new = &((*new)->rb_right);
1275 	}
1276 
1277 	rb_link_node(&data->node, parent, new);
1278 	rb_insert_color(&data->node, root);
1279 }
1280 
1281 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1282 			       struct list_head *sort_list)
1283 {
1284 	struct rb_node *node;
1285 	struct alloc_stat *data;
1286 
1287 	for (;;) {
1288 		node = rb_first(root);
1289 		if (!node)
1290 			break;
1291 
1292 		rb_erase(node, root);
1293 		data = rb_entry(node, struct alloc_stat, node);
1294 		sort_slab_insert(root_sorted, data, sort_list);
1295 	}
1296 }
1297 
1298 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1299 			     struct list_head *sort_list)
1300 {
1301 	struct rb_node **new = &root->rb_node;
1302 	struct rb_node *parent = NULL;
1303 	struct sort_dimension *sort;
1304 
1305 	while (*new) {
1306 		struct page_stat *this;
1307 		int cmp = 0;
1308 
1309 		this = rb_entry(*new, struct page_stat, node);
1310 		parent = *new;
1311 
1312 		list_for_each_entry(sort, sort_list, list) {
1313 			cmp = sort->cmp(data, this);
1314 			if (cmp)
1315 				break;
1316 		}
1317 
1318 		if (cmp > 0)
1319 			new = &parent->rb_left;
1320 		else
1321 			new = &parent->rb_right;
1322 	}
1323 
1324 	rb_link_node(&data->node, parent, new);
1325 	rb_insert_color(&data->node, root);
1326 }
1327 
1328 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1329 			       struct list_head *sort_list)
1330 {
1331 	struct rb_node *node;
1332 	struct page_stat *data;
1333 
1334 	for (;;) {
1335 		node = rb_first(root);
1336 		if (!node)
1337 			break;
1338 
1339 		rb_erase(node, root);
1340 		data = rb_entry(node, struct page_stat, node);
1341 		sort_page_insert(root_sorted, data, sort_list);
1342 	}
1343 }
1344 
1345 static void sort_result(void)
1346 {
1347 	if (kmem_slab) {
1348 		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1349 				   &slab_alloc_sort);
1350 		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1351 				   &slab_caller_sort);
1352 	}
1353 	if (kmem_page) {
1354 		if (live_page)
1355 			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1356 					   &page_alloc_sort);
1357 		else
1358 			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1359 					   &page_alloc_sort);
1360 
1361 		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1362 				   &page_caller_sort);
1363 	}
1364 }
1365 
1366 static int __cmd_kmem(struct perf_session *session)
1367 {
1368 	int err = -EINVAL;
1369 	struct evsel *evsel;
1370 	const struct evsel_str_handler kmem_tracepoints[] = {
1371 		/* slab allocator */
1372 		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
1373     		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
1374 		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
1375     		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1376 		{ "kmem:kfree",			perf_evsel__process_free_event, },
1377     		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
1378 		/* page allocator */
1379 		{ "kmem:mm_page_alloc",		perf_evsel__process_page_alloc_event, },
1380 		{ "kmem:mm_page_free",		perf_evsel__process_page_free_event, },
1381 	};
1382 
1383 	if (!perf_session__has_traces(session, "kmem record"))
1384 		goto out;
1385 
1386 	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1387 		pr_err("Initializing perf session tracepoint handlers failed\n");
1388 		goto out;
1389 	}
1390 
1391 	evlist__for_each_entry(session->evlist, evsel) {
1392 		if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1393 		    perf_evsel__field(evsel, "pfn")) {
1394 			use_pfn = true;
1395 			break;
1396 		}
1397 	}
1398 
1399 	setup_pager();
1400 	err = perf_session__process_events(session);
1401 	if (err != 0) {
1402 		pr_err("error during process events: %d\n", err);
1403 		goto out;
1404 	}
1405 	sort_result();
1406 	print_result(session);
1407 out:
1408 	return err;
1409 }
1410 
1411 /* slab sort keys */
1412 static int ptr_cmp(void *a, void *b)
1413 {
1414 	struct alloc_stat *l = a;
1415 	struct alloc_stat *r = b;
1416 
1417 	if (l->ptr < r->ptr)
1418 		return -1;
1419 	else if (l->ptr > r->ptr)
1420 		return 1;
1421 	return 0;
1422 }
1423 
1424 static struct sort_dimension ptr_sort_dimension = {
1425 	.name	= "ptr",
1426 	.cmp	= ptr_cmp,
1427 };
1428 
1429 static int slab_callsite_cmp(void *a, void *b)
1430 {
1431 	struct alloc_stat *l = a;
1432 	struct alloc_stat *r = b;
1433 
1434 	if (l->call_site < r->call_site)
1435 		return -1;
1436 	else if (l->call_site > r->call_site)
1437 		return 1;
1438 	return 0;
1439 }
1440 
1441 static struct sort_dimension callsite_sort_dimension = {
1442 	.name	= "callsite",
1443 	.cmp	= slab_callsite_cmp,
1444 };
1445 
1446 static int hit_cmp(void *a, void *b)
1447 {
1448 	struct alloc_stat *l = a;
1449 	struct alloc_stat *r = b;
1450 
1451 	if (l->hit < r->hit)
1452 		return -1;
1453 	else if (l->hit > r->hit)
1454 		return 1;
1455 	return 0;
1456 }
1457 
1458 static struct sort_dimension hit_sort_dimension = {
1459 	.name	= "hit",
1460 	.cmp	= hit_cmp,
1461 };
1462 
1463 static int bytes_cmp(void *a, void *b)
1464 {
1465 	struct alloc_stat *l = a;
1466 	struct alloc_stat *r = b;
1467 
1468 	if (l->bytes_alloc < r->bytes_alloc)
1469 		return -1;
1470 	else if (l->bytes_alloc > r->bytes_alloc)
1471 		return 1;
1472 	return 0;
1473 }
1474 
1475 static struct sort_dimension bytes_sort_dimension = {
1476 	.name	= "bytes",
1477 	.cmp	= bytes_cmp,
1478 };
1479 
1480 static int frag_cmp(void *a, void *b)
1481 {
1482 	double x, y;
1483 	struct alloc_stat *l = a;
1484 	struct alloc_stat *r = b;
1485 
1486 	x = fragmentation(l->bytes_req, l->bytes_alloc);
1487 	y = fragmentation(r->bytes_req, r->bytes_alloc);
1488 
1489 	if (x < y)
1490 		return -1;
1491 	else if (x > y)
1492 		return 1;
1493 	return 0;
1494 }
1495 
1496 static struct sort_dimension frag_sort_dimension = {
1497 	.name	= "frag",
1498 	.cmp	= frag_cmp,
1499 };
1500 
1501 static int pingpong_cmp(void *a, void *b)
1502 {
1503 	struct alloc_stat *l = a;
1504 	struct alloc_stat *r = b;
1505 
1506 	if (l->pingpong < r->pingpong)
1507 		return -1;
1508 	else if (l->pingpong > r->pingpong)
1509 		return 1;
1510 	return 0;
1511 }
1512 
1513 static struct sort_dimension pingpong_sort_dimension = {
1514 	.name	= "pingpong",
1515 	.cmp	= pingpong_cmp,
1516 };
1517 
1518 /* page sort keys */
1519 static int page_cmp(void *a, void *b)
1520 {
1521 	struct page_stat *l = a;
1522 	struct page_stat *r = b;
1523 
1524 	if (l->page < r->page)
1525 		return -1;
1526 	else if (l->page > r->page)
1527 		return 1;
1528 	return 0;
1529 }
1530 
1531 static struct sort_dimension page_sort_dimension = {
1532 	.name	= "page",
1533 	.cmp	= page_cmp,
1534 };
1535 
1536 static int page_callsite_cmp(void *a, void *b)
1537 {
1538 	struct page_stat *l = a;
1539 	struct page_stat *r = b;
1540 
1541 	if (l->callsite < r->callsite)
1542 		return -1;
1543 	else if (l->callsite > r->callsite)
1544 		return 1;
1545 	return 0;
1546 }
1547 
1548 static struct sort_dimension page_callsite_sort_dimension = {
1549 	.name	= "callsite",
1550 	.cmp	= page_callsite_cmp,
1551 };
1552 
1553 static int page_hit_cmp(void *a, void *b)
1554 {
1555 	struct page_stat *l = a;
1556 	struct page_stat *r = b;
1557 
1558 	if (l->nr_alloc < r->nr_alloc)
1559 		return -1;
1560 	else if (l->nr_alloc > r->nr_alloc)
1561 		return 1;
1562 	return 0;
1563 }
1564 
1565 static struct sort_dimension page_hit_sort_dimension = {
1566 	.name	= "hit",
1567 	.cmp	= page_hit_cmp,
1568 };
1569 
1570 static int page_bytes_cmp(void *a, void *b)
1571 {
1572 	struct page_stat *l = a;
1573 	struct page_stat *r = b;
1574 
1575 	if (l->alloc_bytes < r->alloc_bytes)
1576 		return -1;
1577 	else if (l->alloc_bytes > r->alloc_bytes)
1578 		return 1;
1579 	return 0;
1580 }
1581 
1582 static struct sort_dimension page_bytes_sort_dimension = {
1583 	.name	= "bytes",
1584 	.cmp	= page_bytes_cmp,
1585 };
1586 
1587 static int page_order_cmp(void *a, void *b)
1588 {
1589 	struct page_stat *l = a;
1590 	struct page_stat *r = b;
1591 
1592 	if (l->order < r->order)
1593 		return -1;
1594 	else if (l->order > r->order)
1595 		return 1;
1596 	return 0;
1597 }
1598 
1599 static struct sort_dimension page_order_sort_dimension = {
1600 	.name	= "order",
1601 	.cmp	= page_order_cmp,
1602 };
1603 
1604 static int migrate_type_cmp(void *a, void *b)
1605 {
1606 	struct page_stat *l = a;
1607 	struct page_stat *r = b;
1608 
1609 	/* for internal use to find free'd page */
1610 	if (l->migrate_type == -1U)
1611 		return 0;
1612 
1613 	if (l->migrate_type < r->migrate_type)
1614 		return -1;
1615 	else if (l->migrate_type > r->migrate_type)
1616 		return 1;
1617 	return 0;
1618 }
1619 
1620 static struct sort_dimension migrate_type_sort_dimension = {
1621 	.name	= "migtype",
1622 	.cmp	= migrate_type_cmp,
1623 };
1624 
1625 static int gfp_flags_cmp(void *a, void *b)
1626 {
1627 	struct page_stat *l = a;
1628 	struct page_stat *r = b;
1629 
1630 	/* for internal use to find free'd page */
1631 	if (l->gfp_flags == -1U)
1632 		return 0;
1633 
1634 	if (l->gfp_flags < r->gfp_flags)
1635 		return -1;
1636 	else if (l->gfp_flags > r->gfp_flags)
1637 		return 1;
1638 	return 0;
1639 }
1640 
1641 static struct sort_dimension gfp_flags_sort_dimension = {
1642 	.name	= "gfp",
1643 	.cmp	= gfp_flags_cmp,
1644 };
1645 
1646 static struct sort_dimension *slab_sorts[] = {
1647 	&ptr_sort_dimension,
1648 	&callsite_sort_dimension,
1649 	&hit_sort_dimension,
1650 	&bytes_sort_dimension,
1651 	&frag_sort_dimension,
1652 	&pingpong_sort_dimension,
1653 };
1654 
1655 static struct sort_dimension *page_sorts[] = {
1656 	&page_sort_dimension,
1657 	&page_callsite_sort_dimension,
1658 	&page_hit_sort_dimension,
1659 	&page_bytes_sort_dimension,
1660 	&page_order_sort_dimension,
1661 	&migrate_type_sort_dimension,
1662 	&gfp_flags_sort_dimension,
1663 };
1664 
1665 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1666 {
1667 	struct sort_dimension *sort;
1668 	int i;
1669 
1670 	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1671 		if (!strcmp(slab_sorts[i]->name, tok)) {
1672 			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1673 			if (!sort) {
1674 				pr_err("%s: memdup failed\n", __func__);
1675 				return -1;
1676 			}
1677 			list_add_tail(&sort->list, list);
1678 			return 0;
1679 		}
1680 	}
1681 
1682 	return -1;
1683 }
1684 
1685 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1686 {
1687 	struct sort_dimension *sort;
1688 	int i;
1689 
1690 	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1691 		if (!strcmp(page_sorts[i]->name, tok)) {
1692 			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1693 			if (!sort) {
1694 				pr_err("%s: memdup failed\n", __func__);
1695 				return -1;
1696 			}
1697 			list_add_tail(&sort->list, list);
1698 			return 0;
1699 		}
1700 	}
1701 
1702 	return -1;
1703 }
1704 
1705 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1706 {
1707 	char *tok;
1708 	char *str = strdup(arg);
1709 	char *pos = str;
1710 
1711 	if (!str) {
1712 		pr_err("%s: strdup failed\n", __func__);
1713 		return -1;
1714 	}
1715 
1716 	while (true) {
1717 		tok = strsep(&pos, ",");
1718 		if (!tok)
1719 			break;
1720 		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1721 			pr_err("Unknown slab --sort key: '%s'", tok);
1722 			free(str);
1723 			return -1;
1724 		}
1725 	}
1726 
1727 	free(str);
1728 	return 0;
1729 }
1730 
1731 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1732 {
1733 	char *tok;
1734 	char *str = strdup(arg);
1735 	char *pos = str;
1736 
1737 	if (!str) {
1738 		pr_err("%s: strdup failed\n", __func__);
1739 		return -1;
1740 	}
1741 
1742 	while (true) {
1743 		tok = strsep(&pos, ",");
1744 		if (!tok)
1745 			break;
1746 		if (page_sort_dimension__add(tok, sort_list) < 0) {
1747 			pr_err("Unknown page --sort key: '%s'", tok);
1748 			free(str);
1749 			return -1;
1750 		}
1751 	}
1752 
1753 	free(str);
1754 	return 0;
1755 }
1756 
1757 static int parse_sort_opt(const struct option *opt __maybe_unused,
1758 			  const char *arg, int unset __maybe_unused)
1759 {
1760 	if (!arg)
1761 		return -1;
1762 
1763 	if (kmem_page > kmem_slab ||
1764 	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1765 		if (caller_flag > alloc_flag)
1766 			return setup_page_sorting(&page_caller_sort, arg);
1767 		else
1768 			return setup_page_sorting(&page_alloc_sort, arg);
1769 	} else {
1770 		if (caller_flag > alloc_flag)
1771 			return setup_slab_sorting(&slab_caller_sort, arg);
1772 		else
1773 			return setup_slab_sorting(&slab_alloc_sort, arg);
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 static int parse_caller_opt(const struct option *opt __maybe_unused,
1780 			    const char *arg __maybe_unused,
1781 			    int unset __maybe_unused)
1782 {
1783 	caller_flag = (alloc_flag + 1);
1784 	return 0;
1785 }
1786 
1787 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1788 			   const char *arg __maybe_unused,
1789 			   int unset __maybe_unused)
1790 {
1791 	alloc_flag = (caller_flag + 1);
1792 	return 0;
1793 }
1794 
1795 static int parse_slab_opt(const struct option *opt __maybe_unused,
1796 			  const char *arg __maybe_unused,
1797 			  int unset __maybe_unused)
1798 {
1799 	kmem_slab = (kmem_page + 1);
1800 	return 0;
1801 }
1802 
1803 static int parse_page_opt(const struct option *opt __maybe_unused,
1804 			  const char *arg __maybe_unused,
1805 			  int unset __maybe_unused)
1806 {
1807 	kmem_page = (kmem_slab + 1);
1808 	return 0;
1809 }
1810 
1811 static int parse_line_opt(const struct option *opt __maybe_unused,
1812 			  const char *arg, int unset __maybe_unused)
1813 {
1814 	int lines;
1815 
1816 	if (!arg)
1817 		return -1;
1818 
1819 	lines = strtoul(arg, NULL, 10);
1820 
1821 	if (caller_flag > alloc_flag)
1822 		caller_lines = lines;
1823 	else
1824 		alloc_lines = lines;
1825 
1826 	return 0;
1827 }
1828 
1829 static int __cmd_record(int argc, const char **argv)
1830 {
1831 	const char * const record_args[] = {
1832 	"record", "-a", "-R", "-c", "1",
1833 	};
1834 	const char * const slab_events[] = {
1835 	"-e", "kmem:kmalloc",
1836 	"-e", "kmem:kmalloc_node",
1837 	"-e", "kmem:kfree",
1838 	"-e", "kmem:kmem_cache_alloc",
1839 	"-e", "kmem:kmem_cache_alloc_node",
1840 	"-e", "kmem:kmem_cache_free",
1841 	};
1842 	const char * const page_events[] = {
1843 	"-e", "kmem:mm_page_alloc",
1844 	"-e", "kmem:mm_page_free",
1845 	};
1846 	unsigned int rec_argc, i, j;
1847 	const char **rec_argv;
1848 
1849 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1850 	if (kmem_slab)
1851 		rec_argc += ARRAY_SIZE(slab_events);
1852 	if (kmem_page)
1853 		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1854 
1855 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1856 
1857 	if (rec_argv == NULL)
1858 		return -ENOMEM;
1859 
1860 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1861 		rec_argv[i] = strdup(record_args[i]);
1862 
1863 	if (kmem_slab) {
1864 		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1865 			rec_argv[i] = strdup(slab_events[j]);
1866 	}
1867 	if (kmem_page) {
1868 		rec_argv[i++] = strdup("-g");
1869 
1870 		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1871 			rec_argv[i] = strdup(page_events[j]);
1872 	}
1873 
1874 	for (j = 1; j < (unsigned int)argc; j++, i++)
1875 		rec_argv[i] = argv[j];
1876 
1877 	return cmd_record(i, rec_argv);
1878 }
1879 
1880 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1881 {
1882 	if (!strcmp(var, "kmem.default")) {
1883 		if (!strcmp(value, "slab"))
1884 			kmem_default = KMEM_SLAB;
1885 		else if (!strcmp(value, "page"))
1886 			kmem_default = KMEM_PAGE;
1887 		else
1888 			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1889 			       value);
1890 		return 0;
1891 	}
1892 
1893 	return 0;
1894 }
1895 
1896 int cmd_kmem(int argc, const char **argv)
1897 {
1898 	const char * const default_slab_sort = "frag,hit,bytes";
1899 	const char * const default_page_sort = "bytes,hit";
1900 	struct perf_data data = {
1901 		.mode = PERF_DATA_MODE_READ,
1902 	};
1903 	const struct option kmem_options[] = {
1904 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1905 	OPT_INCR('v', "verbose", &verbose,
1906 		    "be more verbose (show symbol address, etc)"),
1907 	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1908 			   "show per-callsite statistics", parse_caller_opt),
1909 	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1910 			   "show per-allocation statistics", parse_alloc_opt),
1911 	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1912 		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1913 		     "page, order, migtype, gfp", parse_sort_opt),
1914 	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1915 	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1916 	OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1917 	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1918 			   parse_slab_opt),
1919 	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1920 			   parse_page_opt),
1921 	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1922 	OPT_STRING(0, "time", &time_str, "str",
1923 		   "Time span of interest (start,stop)"),
1924 	OPT_END()
1925 	};
1926 	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1927 	const char *kmem_usage[] = {
1928 		NULL,
1929 		NULL
1930 	};
1931 	struct perf_session *session;
1932 	static const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1933 	int ret = perf_config(kmem_config, NULL);
1934 
1935 	if (ret)
1936 		return ret;
1937 
1938 	argc = parse_options_subcommand(argc, argv, kmem_options,
1939 					kmem_subcommands, kmem_usage, 0);
1940 
1941 	if (!argc)
1942 		usage_with_options(kmem_usage, kmem_options);
1943 
1944 	if (kmem_slab == 0 && kmem_page == 0) {
1945 		if (kmem_default == KMEM_SLAB)
1946 			kmem_slab = 1;
1947 		else
1948 			kmem_page = 1;
1949 	}
1950 
1951 	if (!strncmp(argv[0], "rec", 3)) {
1952 		symbol__init(NULL);
1953 		return __cmd_record(argc, argv);
1954 	}
1955 
1956 	data.path = input_name;
1957 
1958 	kmem_session = session = perf_session__new(&data, false, &perf_kmem);
1959 	if (session == NULL)
1960 		return -1;
1961 
1962 	ret = -1;
1963 
1964 	if (kmem_slab) {
1965 		if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1966 							  "kmem:kmalloc")) {
1967 			pr_err(errmsg, "slab", "slab");
1968 			goto out_delete;
1969 		}
1970 	}
1971 
1972 	if (kmem_page) {
1973 		struct evsel *evsel;
1974 
1975 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1976 							     "kmem:mm_page_alloc");
1977 		if (evsel == NULL) {
1978 			pr_err(errmsg, "page", "page");
1979 			goto out_delete;
1980 		}
1981 
1982 		kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
1983 		symbol_conf.use_callchain = true;
1984 	}
1985 
1986 	symbol__init(&session->header.env);
1987 
1988 	if (perf_time__parse_str(&ptime, time_str) != 0) {
1989 		pr_err("Invalid time string\n");
1990 		ret = -EINVAL;
1991 		goto out_delete;
1992 	}
1993 
1994 	if (!strcmp(argv[0], "stat")) {
1995 		setlocale(LC_ALL, "");
1996 
1997 		if (cpu__setup_cpunode_map())
1998 			goto out_delete;
1999 
2000 		if (list_empty(&slab_caller_sort))
2001 			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2002 		if (list_empty(&slab_alloc_sort))
2003 			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2004 		if (list_empty(&page_caller_sort))
2005 			setup_page_sorting(&page_caller_sort, default_page_sort);
2006 		if (list_empty(&page_alloc_sort))
2007 			setup_page_sorting(&page_alloc_sort, default_page_sort);
2008 
2009 		if (kmem_page) {
2010 			setup_page_sorting(&page_alloc_sort_input,
2011 					   "page,order,migtype,gfp");
2012 			setup_page_sorting(&page_caller_sort_input,
2013 					   "callsite,order,migtype,gfp");
2014 		}
2015 		ret = __cmd_kmem(session);
2016 	} else
2017 		usage_with_options(kmem_usage, kmem_options);
2018 
2019 out_delete:
2020 	perf_session__delete(session);
2021 
2022 	return ret;
2023 }
2024 
2025