xref: /openbmc/linux/tools/perf/builtin-kmem.c (revision efad14150a0b4429f37da7245001a8096ef7ee38)
1 #include "builtin.h"
2 #include "perf.h"
3 
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
10 #include "util/tool.h"
11 
12 #include "util/parse-options.h"
13 #include "util/trace-event.h"
14 
15 #include "util/debug.h"
16 
17 #include <linux/rbtree.h>
18 
19 struct alloc_stat;
20 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
21 
22 static const char		*input_name;
23 
24 static int			alloc_flag;
25 static int			caller_flag;
26 
27 static int			alloc_lines = -1;
28 static int			caller_lines = -1;
29 
30 static bool			raw_ip;
31 
32 static char			default_sort_order[] = "frag,hit,bytes";
33 
34 static int			*cpunode_map;
35 static int			max_cpu_num;
36 
37 struct alloc_stat {
38 	u64	call_site;
39 	u64	ptr;
40 	u64	bytes_req;
41 	u64	bytes_alloc;
42 	u32	hit;
43 	u32	pingpong;
44 
45 	short	alloc_cpu;
46 
47 	struct rb_node node;
48 };
49 
50 static struct rb_root root_alloc_stat;
51 static struct rb_root root_alloc_sorted;
52 static struct rb_root root_caller_stat;
53 static struct rb_root root_caller_sorted;
54 
55 static unsigned long total_requested, total_allocated;
56 static unsigned long nr_allocs, nr_cross_allocs;
57 
58 #define PATH_SYS_NODE	"/sys/devices/system/node"
59 
60 static void init_cpunode_map(void)
61 {
62 	FILE *fp;
63 	int i;
64 
65 	fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
66 	if (!fp) {
67 		max_cpu_num = 4096;
68 		return;
69 	}
70 
71 	if (fscanf(fp, "%d", &max_cpu_num) < 1)
72 		die("Failed to read 'kernel_max' from sysfs");
73 	max_cpu_num++;
74 
75 	cpunode_map = calloc(max_cpu_num, sizeof(int));
76 	if (!cpunode_map)
77 		die("calloc");
78 	for (i = 0; i < max_cpu_num; i++)
79 		cpunode_map[i] = -1;
80 	fclose(fp);
81 }
82 
83 static void setup_cpunode_map(void)
84 {
85 	struct dirent *dent1, *dent2;
86 	DIR *dir1, *dir2;
87 	unsigned int cpu, mem;
88 	char buf[PATH_MAX];
89 
90 	init_cpunode_map();
91 
92 	dir1 = opendir(PATH_SYS_NODE);
93 	if (!dir1)
94 		return;
95 
96 	while ((dent1 = readdir(dir1)) != NULL) {
97 		if (dent1->d_type != DT_DIR ||
98 		    sscanf(dent1->d_name, "node%u", &mem) < 1)
99 			continue;
100 
101 		snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
102 		dir2 = opendir(buf);
103 		if (!dir2)
104 			continue;
105 		while ((dent2 = readdir(dir2)) != NULL) {
106 			if (dent2->d_type != DT_LNK ||
107 			    sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
108 				continue;
109 			cpunode_map[cpu] = mem;
110 		}
111 	}
112 }
113 
114 static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
115 			      int bytes_req, int bytes_alloc, int cpu)
116 {
117 	struct rb_node **node = &root_alloc_stat.rb_node;
118 	struct rb_node *parent = NULL;
119 	struct alloc_stat *data = NULL;
120 
121 	while (*node) {
122 		parent = *node;
123 		data = rb_entry(*node, struct alloc_stat, node);
124 
125 		if (ptr > data->ptr)
126 			node = &(*node)->rb_right;
127 		else if (ptr < data->ptr)
128 			node = &(*node)->rb_left;
129 		else
130 			break;
131 	}
132 
133 	if (data && data->ptr == ptr) {
134 		data->hit++;
135 		data->bytes_req += bytes_req;
136 		data->bytes_alloc += bytes_alloc;
137 	} else {
138 		data = malloc(sizeof(*data));
139 		if (!data)
140 			die("malloc");
141 		data->ptr = ptr;
142 		data->pingpong = 0;
143 		data->hit = 1;
144 		data->bytes_req = bytes_req;
145 		data->bytes_alloc = bytes_alloc;
146 
147 		rb_link_node(&data->node, parent, node);
148 		rb_insert_color(&data->node, &root_alloc_stat);
149 	}
150 	data->call_site = call_site;
151 	data->alloc_cpu = cpu;
152 }
153 
154 static void insert_caller_stat(unsigned long call_site,
155 			      int bytes_req, int bytes_alloc)
156 {
157 	struct rb_node **node = &root_caller_stat.rb_node;
158 	struct rb_node *parent = NULL;
159 	struct alloc_stat *data = NULL;
160 
161 	while (*node) {
162 		parent = *node;
163 		data = rb_entry(*node, struct alloc_stat, node);
164 
165 		if (call_site > data->call_site)
166 			node = &(*node)->rb_right;
167 		else if (call_site < data->call_site)
168 			node = &(*node)->rb_left;
169 		else
170 			break;
171 	}
172 
173 	if (data && data->call_site == call_site) {
174 		data->hit++;
175 		data->bytes_req += bytes_req;
176 		data->bytes_alloc += bytes_alloc;
177 	} else {
178 		data = malloc(sizeof(*data));
179 		if (!data)
180 			die("malloc");
181 		data->call_site = call_site;
182 		data->pingpong = 0;
183 		data->hit = 1;
184 		data->bytes_req = bytes_req;
185 		data->bytes_alloc = bytes_alloc;
186 
187 		rb_link_node(&data->node, parent, node);
188 		rb_insert_color(&data->node, &root_caller_stat);
189 	}
190 }
191 
192 static void process_alloc_event(void *data,
193 				struct event *event,
194 				int cpu,
195 				u64 timestamp __used,
196 				struct thread *thread __used,
197 				int node)
198 {
199 	unsigned long call_site;
200 	unsigned long ptr;
201 	int bytes_req;
202 	int bytes_alloc;
203 	int node1, node2;
204 
205 	ptr = raw_field_value(event, "ptr", data);
206 	call_site = raw_field_value(event, "call_site", data);
207 	bytes_req = raw_field_value(event, "bytes_req", data);
208 	bytes_alloc = raw_field_value(event, "bytes_alloc", data);
209 
210 	insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
211 	insert_caller_stat(call_site, bytes_req, bytes_alloc);
212 
213 	total_requested += bytes_req;
214 	total_allocated += bytes_alloc;
215 
216 	if (node) {
217 		node1 = cpunode_map[cpu];
218 		node2 = raw_field_value(event, "node", data);
219 		if (node1 != node2)
220 			nr_cross_allocs++;
221 	}
222 	nr_allocs++;
223 }
224 
225 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
226 static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
227 
228 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
229 					    unsigned long call_site,
230 					    struct rb_root *root,
231 					    sort_fn_t sort_fn)
232 {
233 	struct rb_node *node = root->rb_node;
234 	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
235 
236 	while (node) {
237 		struct alloc_stat *data;
238 		int cmp;
239 
240 		data = rb_entry(node, struct alloc_stat, node);
241 
242 		cmp = sort_fn(&key, data);
243 		if (cmp < 0)
244 			node = node->rb_left;
245 		else if (cmp > 0)
246 			node = node->rb_right;
247 		else
248 			return data;
249 	}
250 	return NULL;
251 }
252 
253 static void process_free_event(void *data,
254 			       struct event *event,
255 			       int cpu,
256 			       u64 timestamp __used,
257 			       struct thread *thread __used)
258 {
259 	unsigned long ptr;
260 	struct alloc_stat *s_alloc, *s_caller;
261 
262 	ptr = raw_field_value(event, "ptr", data);
263 
264 	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
265 	if (!s_alloc)
266 		return;
267 
268 	if (cpu != s_alloc->alloc_cpu) {
269 		s_alloc->pingpong++;
270 
271 		s_caller = search_alloc_stat(0, s_alloc->call_site,
272 					     &root_caller_stat, callsite_cmp);
273 		assert(s_caller);
274 		s_caller->pingpong++;
275 	}
276 	s_alloc->alloc_cpu = -1;
277 }
278 
279 static void process_raw_event(union perf_event *raw_event __used, void *data,
280 			      int cpu, u64 timestamp, struct thread *thread)
281 {
282 	struct event *event;
283 	int type;
284 
285 	type = trace_parse_common_type(data);
286 	event = trace_find_event(type);
287 
288 	if (!strcmp(event->name, "kmalloc") ||
289 	    !strcmp(event->name, "kmem_cache_alloc")) {
290 		process_alloc_event(data, event, cpu, timestamp, thread, 0);
291 		return;
292 	}
293 
294 	if (!strcmp(event->name, "kmalloc_node") ||
295 	    !strcmp(event->name, "kmem_cache_alloc_node")) {
296 		process_alloc_event(data, event, cpu, timestamp, thread, 1);
297 		return;
298 	}
299 
300 	if (!strcmp(event->name, "kfree") ||
301 	    !strcmp(event->name, "kmem_cache_free")) {
302 		process_free_event(data, event, cpu, timestamp, thread);
303 		return;
304 	}
305 }
306 
307 static int process_sample_event(struct perf_tool *tool __used,
308 				union perf_event *event,
309 				struct perf_sample *sample,
310 				struct perf_evsel *evsel __used,
311 				struct machine *machine)
312 {
313 	struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
314 
315 	if (thread == NULL) {
316 		pr_debug("problem processing %d event, skipping it.\n",
317 			 event->header.type);
318 		return -1;
319 	}
320 
321 	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
322 
323 	process_raw_event(event, sample->raw_data, sample->cpu,
324 			  sample->time, thread);
325 
326 	return 0;
327 }
328 
329 static struct perf_tool perf_kmem = {
330 	.sample			= process_sample_event,
331 	.comm			= perf_event__process_comm,
332 	.ordered_samples	= true,
333 };
334 
335 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
336 {
337 	if (n_alloc == 0)
338 		return 0.0;
339 	else
340 		return 100.0 - (100.0 * n_req / n_alloc);
341 }
342 
343 static void __print_result(struct rb_root *root, struct perf_session *session,
344 			   int n_lines, int is_caller)
345 {
346 	struct rb_node *next;
347 	struct machine *machine;
348 
349 	printf("%.102s\n", graph_dotted_line);
350 	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
351 	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
352 	printf("%.102s\n", graph_dotted_line);
353 
354 	next = rb_first(root);
355 
356 	machine = perf_session__find_host_machine(session);
357 	if (!machine) {
358 		pr_err("__print_result: couldn't find kernel information\n");
359 		return;
360 	}
361 	while (next && n_lines--) {
362 		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
363 						   node);
364 		struct symbol *sym = NULL;
365 		struct map *map;
366 		char buf[BUFSIZ];
367 		u64 addr;
368 
369 		if (is_caller) {
370 			addr = data->call_site;
371 			if (!raw_ip)
372 				sym = machine__find_kernel_function(machine, addr, &map, NULL);
373 		} else
374 			addr = data->ptr;
375 
376 		if (sym != NULL)
377 			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
378 				 addr - map->unmap_ip(map, sym->start));
379 		else
380 			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
381 		printf(" %-34s |", buf);
382 
383 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
384 		       (unsigned long long)data->bytes_alloc,
385 		       (unsigned long)data->bytes_alloc / data->hit,
386 		       (unsigned long long)data->bytes_req,
387 		       (unsigned long)data->bytes_req / data->hit,
388 		       (unsigned long)data->hit,
389 		       (unsigned long)data->pingpong,
390 		       fragmentation(data->bytes_req, data->bytes_alloc));
391 
392 		next = rb_next(next);
393 	}
394 
395 	if (n_lines == -1)
396 		printf(" ...                                | ...             | ...             | ...    | ...      | ...   \n");
397 
398 	printf("%.102s\n", graph_dotted_line);
399 }
400 
401 static void print_summary(void)
402 {
403 	printf("\nSUMMARY\n=======\n");
404 	printf("Total bytes requested: %lu\n", total_requested);
405 	printf("Total bytes allocated: %lu\n", total_allocated);
406 	printf("Total bytes wasted on internal fragmentation: %lu\n",
407 	       total_allocated - total_requested);
408 	printf("Internal fragmentation: %f%%\n",
409 	       fragmentation(total_requested, total_allocated));
410 	printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
411 }
412 
413 static void print_result(struct perf_session *session)
414 {
415 	if (caller_flag)
416 		__print_result(&root_caller_sorted, session, caller_lines, 1);
417 	if (alloc_flag)
418 		__print_result(&root_alloc_sorted, session, alloc_lines, 0);
419 	print_summary();
420 }
421 
422 struct sort_dimension {
423 	const char		name[20];
424 	sort_fn_t		cmp;
425 	struct list_head	list;
426 };
427 
428 static LIST_HEAD(caller_sort);
429 static LIST_HEAD(alloc_sort);
430 
431 static void sort_insert(struct rb_root *root, struct alloc_stat *data,
432 			struct list_head *sort_list)
433 {
434 	struct rb_node **new = &(root->rb_node);
435 	struct rb_node *parent = NULL;
436 	struct sort_dimension *sort;
437 
438 	while (*new) {
439 		struct alloc_stat *this;
440 		int cmp = 0;
441 
442 		this = rb_entry(*new, struct alloc_stat, node);
443 		parent = *new;
444 
445 		list_for_each_entry(sort, sort_list, list) {
446 			cmp = sort->cmp(data, this);
447 			if (cmp)
448 				break;
449 		}
450 
451 		if (cmp > 0)
452 			new = &((*new)->rb_left);
453 		else
454 			new = &((*new)->rb_right);
455 	}
456 
457 	rb_link_node(&data->node, parent, new);
458 	rb_insert_color(&data->node, root);
459 }
460 
461 static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
462 			  struct list_head *sort_list)
463 {
464 	struct rb_node *node;
465 	struct alloc_stat *data;
466 
467 	for (;;) {
468 		node = rb_first(root);
469 		if (!node)
470 			break;
471 
472 		rb_erase(node, root);
473 		data = rb_entry(node, struct alloc_stat, node);
474 		sort_insert(root_sorted, data, sort_list);
475 	}
476 }
477 
478 static void sort_result(void)
479 {
480 	__sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
481 	__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
482 }
483 
484 static int __cmd_kmem(void)
485 {
486 	int err = -EINVAL;
487 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
488 							 0, false, &perf_kmem);
489 	if (session == NULL)
490 		return -ENOMEM;
491 
492 	if (perf_session__create_kernel_maps(session) < 0)
493 		goto out_delete;
494 
495 	if (!perf_session__has_traces(session, "kmem record"))
496 		goto out_delete;
497 
498 	setup_pager();
499 	err = perf_session__process_events(session, &perf_kmem);
500 	if (err != 0)
501 		goto out_delete;
502 	sort_result();
503 	print_result(session);
504 out_delete:
505 	perf_session__delete(session);
506 	return err;
507 }
508 
509 static const char * const kmem_usage[] = {
510 	"perf kmem [<options>] {record|stat}",
511 	NULL
512 };
513 
514 static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
515 {
516 	if (l->ptr < r->ptr)
517 		return -1;
518 	else if (l->ptr > r->ptr)
519 		return 1;
520 	return 0;
521 }
522 
523 static struct sort_dimension ptr_sort_dimension = {
524 	.name	= "ptr",
525 	.cmp	= ptr_cmp,
526 };
527 
528 static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
529 {
530 	if (l->call_site < r->call_site)
531 		return -1;
532 	else if (l->call_site > r->call_site)
533 		return 1;
534 	return 0;
535 }
536 
537 static struct sort_dimension callsite_sort_dimension = {
538 	.name	= "callsite",
539 	.cmp	= callsite_cmp,
540 };
541 
542 static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
543 {
544 	if (l->hit < r->hit)
545 		return -1;
546 	else if (l->hit > r->hit)
547 		return 1;
548 	return 0;
549 }
550 
551 static struct sort_dimension hit_sort_dimension = {
552 	.name	= "hit",
553 	.cmp	= hit_cmp,
554 };
555 
556 static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
557 {
558 	if (l->bytes_alloc < r->bytes_alloc)
559 		return -1;
560 	else if (l->bytes_alloc > r->bytes_alloc)
561 		return 1;
562 	return 0;
563 }
564 
565 static struct sort_dimension bytes_sort_dimension = {
566 	.name	= "bytes",
567 	.cmp	= bytes_cmp,
568 };
569 
570 static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
571 {
572 	double x, y;
573 
574 	x = fragmentation(l->bytes_req, l->bytes_alloc);
575 	y = fragmentation(r->bytes_req, r->bytes_alloc);
576 
577 	if (x < y)
578 		return -1;
579 	else if (x > y)
580 		return 1;
581 	return 0;
582 }
583 
584 static struct sort_dimension frag_sort_dimension = {
585 	.name	= "frag",
586 	.cmp	= frag_cmp,
587 };
588 
589 static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
590 {
591 	if (l->pingpong < r->pingpong)
592 		return -1;
593 	else if (l->pingpong > r->pingpong)
594 		return 1;
595 	return 0;
596 }
597 
598 static struct sort_dimension pingpong_sort_dimension = {
599 	.name	= "pingpong",
600 	.cmp	= pingpong_cmp,
601 };
602 
603 static struct sort_dimension *avail_sorts[] = {
604 	&ptr_sort_dimension,
605 	&callsite_sort_dimension,
606 	&hit_sort_dimension,
607 	&bytes_sort_dimension,
608 	&frag_sort_dimension,
609 	&pingpong_sort_dimension,
610 };
611 
612 #define NUM_AVAIL_SORTS	\
613 	(int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
614 
615 static int sort_dimension__add(const char *tok, struct list_head *list)
616 {
617 	struct sort_dimension *sort;
618 	int i;
619 
620 	for (i = 0; i < NUM_AVAIL_SORTS; i++) {
621 		if (!strcmp(avail_sorts[i]->name, tok)) {
622 			sort = malloc(sizeof(*sort));
623 			if (!sort)
624 				die("malloc");
625 			memcpy(sort, avail_sorts[i], sizeof(*sort));
626 			list_add_tail(&sort->list, list);
627 			return 0;
628 		}
629 	}
630 
631 	return -1;
632 }
633 
634 static int setup_sorting(struct list_head *sort_list, const char *arg)
635 {
636 	char *tok;
637 	char *str = strdup(arg);
638 
639 	if (!str)
640 		die("strdup");
641 
642 	while (true) {
643 		tok = strsep(&str, ",");
644 		if (!tok)
645 			break;
646 		if (sort_dimension__add(tok, sort_list) < 0) {
647 			error("Unknown --sort key: '%s'", tok);
648 			return -1;
649 		}
650 	}
651 
652 	free(str);
653 	return 0;
654 }
655 
656 static int parse_sort_opt(const struct option *opt __used,
657 			  const char *arg, int unset __used)
658 {
659 	if (!arg)
660 		return -1;
661 
662 	if (caller_flag > alloc_flag)
663 		return setup_sorting(&caller_sort, arg);
664 	else
665 		return setup_sorting(&alloc_sort, arg);
666 
667 	return 0;
668 }
669 
670 static int parse_caller_opt(const struct option *opt __used,
671 			  const char *arg __used, int unset __used)
672 {
673 	caller_flag = (alloc_flag + 1);
674 	return 0;
675 }
676 
677 static int parse_alloc_opt(const struct option *opt __used,
678 			  const char *arg __used, int unset __used)
679 {
680 	alloc_flag = (caller_flag + 1);
681 	return 0;
682 }
683 
684 static int parse_line_opt(const struct option *opt __used,
685 			  const char *arg, int unset __used)
686 {
687 	int lines;
688 
689 	if (!arg)
690 		return -1;
691 
692 	lines = strtoul(arg, NULL, 10);
693 
694 	if (caller_flag > alloc_flag)
695 		caller_lines = lines;
696 	else
697 		alloc_lines = lines;
698 
699 	return 0;
700 }
701 
702 static const struct option kmem_options[] = {
703 	OPT_STRING('i', "input", &input_name, "file",
704 		   "input file name"),
705 	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
706 			   "show per-callsite statistics",
707 			   parse_caller_opt),
708 	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
709 			   "show per-allocation statistics",
710 			   parse_alloc_opt),
711 	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
712 		     "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
713 		     parse_sort_opt),
714 	OPT_CALLBACK('l', "line", NULL, "num",
715 		     "show n lines",
716 		     parse_line_opt),
717 	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
718 	OPT_END()
719 };
720 
721 static const char *record_args[] = {
722 	"record",
723 	"-a",
724 	"-R",
725 	"-f",
726 	"-c", "1",
727 	"-e", "kmem:kmalloc",
728 	"-e", "kmem:kmalloc_node",
729 	"-e", "kmem:kfree",
730 	"-e", "kmem:kmem_cache_alloc",
731 	"-e", "kmem:kmem_cache_alloc_node",
732 	"-e", "kmem:kmem_cache_free",
733 };
734 
735 static int __cmd_record(int argc, const char **argv)
736 {
737 	unsigned int rec_argc, i, j;
738 	const char **rec_argv;
739 
740 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
741 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
742 
743 	if (rec_argv == NULL)
744 		return -ENOMEM;
745 
746 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
747 		rec_argv[i] = strdup(record_args[i]);
748 
749 	for (j = 1; j < (unsigned int)argc; j++, i++)
750 		rec_argv[i] = argv[j];
751 
752 	return cmd_record(i, rec_argv, NULL);
753 }
754 
755 int cmd_kmem(int argc, const char **argv, const char *prefix __used)
756 {
757 	argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
758 
759 	if (!argc)
760 		usage_with_options(kmem_usage, kmem_options);
761 
762 	symbol__init();
763 
764 	if (!strncmp(argv[0], "rec", 3)) {
765 		return __cmd_record(argc, argv);
766 	} else if (!strcmp(argv[0], "stat")) {
767 		setup_cpunode_map();
768 
769 		if (list_empty(&caller_sort))
770 			setup_sorting(&caller_sort, default_sort_order);
771 		if (list_empty(&alloc_sort))
772 			setup_sorting(&alloc_sort, default_sort_order);
773 
774 		return __cmd_kmem();
775 	} else
776 		usage_with_options(kmem_usage, kmem_options);
777 
778 	return 0;
779 }
780 
781