1177f4eacSNamhyung Kim #include <stdio.h>
2177f4eacSNamhyung Kim #include <fcntl.h>
3177f4eacSNamhyung Kim #include <stdint.h>
4177f4eacSNamhyung Kim #include <stdlib.h>
5177f4eacSNamhyung Kim
6177f4eacSNamhyung Kim #include <linux/err.h>
7177f4eacSNamhyung Kim
8177f4eacSNamhyung Kim #include "util/ftrace.h"
9177f4eacSNamhyung Kim #include "util/cpumap.h"
109c5c6052SNamhyung Kim #include "util/thread_map.h"
11177f4eacSNamhyung Kim #include "util/debug.h"
129c5c6052SNamhyung Kim #include "util/evlist.h"
13177f4eacSNamhyung Kim #include "util/bpf_counter.h"
14177f4eacSNamhyung Kim
15177f4eacSNamhyung Kim #include "util/bpf_skel/func_latency.skel.h"
16177f4eacSNamhyung Kim
17177f4eacSNamhyung Kim static struct func_latency_bpf *skel;
18177f4eacSNamhyung Kim
perf_ftrace__latency_prepare_bpf(struct perf_ftrace * ftrace)19177f4eacSNamhyung Kim int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
20177f4eacSNamhyung Kim {
219c5c6052SNamhyung Kim int fd, err;
229c5c6052SNamhyung Kim int i, ncpus = 1, ntasks = 1;
23177f4eacSNamhyung Kim struct filter_entry *func;
24177f4eacSNamhyung Kim
25177f4eacSNamhyung Kim if (!list_is_singular(&ftrace->filters)) {
26177f4eacSNamhyung Kim pr_err("ERROR: %s target function(s).\n",
27177f4eacSNamhyung Kim list_empty(&ftrace->filters) ? "No" : "Too many");
28177f4eacSNamhyung Kim return -1;
29177f4eacSNamhyung Kim }
30177f4eacSNamhyung Kim
31177f4eacSNamhyung Kim func = list_first_entry(&ftrace->filters, struct filter_entry, list);
32177f4eacSNamhyung Kim
33177f4eacSNamhyung Kim skel = func_latency_bpf__open();
34177f4eacSNamhyung Kim if (!skel) {
35177f4eacSNamhyung Kim pr_err("Failed to open func latency skeleton\n");
36177f4eacSNamhyung Kim return -1;
37177f4eacSNamhyung Kim }
38177f4eacSNamhyung Kim
399c5c6052SNamhyung Kim /* don't need to set cpu filter for system-wide mode */
409c5c6052SNamhyung Kim if (ftrace->target.cpu_list) {
41*0df6ade7SIan Rogers ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
429c5c6052SNamhyung Kim bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
439c5c6052SNamhyung Kim }
449c5c6052SNamhyung Kim
459c5c6052SNamhyung Kim if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
469c5c6052SNamhyung Kim ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
479c5c6052SNamhyung Kim bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
489c5c6052SNamhyung Kim }
499c5c6052SNamhyung Kim
50177f4eacSNamhyung Kim set_max_rlimit();
51177f4eacSNamhyung Kim
52177f4eacSNamhyung Kim err = func_latency_bpf__load(skel);
53177f4eacSNamhyung Kim if (err) {
54177f4eacSNamhyung Kim pr_err("Failed to load func latency skeleton\n");
55177f4eacSNamhyung Kim goto out;
56177f4eacSNamhyung Kim }
57177f4eacSNamhyung Kim
589c5c6052SNamhyung Kim if (ftrace->target.cpu_list) {
599c5c6052SNamhyung Kim u32 cpu;
609c5c6052SNamhyung Kim u8 val = 1;
619c5c6052SNamhyung Kim
629c5c6052SNamhyung Kim skel->bss->has_cpu = 1;
639c5c6052SNamhyung Kim fd = bpf_map__fd(skel->maps.cpu_filter);
649c5c6052SNamhyung Kim
659c5c6052SNamhyung Kim for (i = 0; i < ncpus; i++) {
66*0df6ade7SIan Rogers cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
679c5c6052SNamhyung Kim bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
689c5c6052SNamhyung Kim }
699c5c6052SNamhyung Kim }
709c5c6052SNamhyung Kim
719c5c6052SNamhyung Kim if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
729c5c6052SNamhyung Kim u32 pid;
739c5c6052SNamhyung Kim u8 val = 1;
749c5c6052SNamhyung Kim
759c5c6052SNamhyung Kim skel->bss->has_task = 1;
769c5c6052SNamhyung Kim fd = bpf_map__fd(skel->maps.task_filter);
779c5c6052SNamhyung Kim
789c5c6052SNamhyung Kim for (i = 0; i < ntasks; i++) {
799c5c6052SNamhyung Kim pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
809c5c6052SNamhyung Kim bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
819c5c6052SNamhyung Kim }
829c5c6052SNamhyung Kim }
839c5c6052SNamhyung Kim
8484005bb6SNamhyung Kim skel->bss->use_nsec = ftrace->use_nsec;
8584005bb6SNamhyung Kim
86177f4eacSNamhyung Kim skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
87177f4eacSNamhyung Kim false, func->name);
88177f4eacSNamhyung Kim if (IS_ERR(skel->links.func_begin)) {
89177f4eacSNamhyung Kim pr_err("Failed to attach fentry program\n");
90177f4eacSNamhyung Kim err = PTR_ERR(skel->links.func_begin);
91177f4eacSNamhyung Kim goto out;
92177f4eacSNamhyung Kim }
93177f4eacSNamhyung Kim
94177f4eacSNamhyung Kim skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
95177f4eacSNamhyung Kim true, func->name);
96177f4eacSNamhyung Kim if (IS_ERR(skel->links.func_end)) {
97177f4eacSNamhyung Kim pr_err("Failed to attach fexit program\n");
98177f4eacSNamhyung Kim err = PTR_ERR(skel->links.func_end);
99177f4eacSNamhyung Kim goto out;
100177f4eacSNamhyung Kim }
101177f4eacSNamhyung Kim
102177f4eacSNamhyung Kim /* XXX: we don't actually use this fd - just for poll() */
103177f4eacSNamhyung Kim return open("/dev/null", O_RDONLY);
104177f4eacSNamhyung Kim
105177f4eacSNamhyung Kim out:
106177f4eacSNamhyung Kim return err;
107177f4eacSNamhyung Kim }
108177f4eacSNamhyung Kim
perf_ftrace__latency_start_bpf(struct perf_ftrace * ftrace __maybe_unused)109177f4eacSNamhyung Kim int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
110177f4eacSNamhyung Kim {
111177f4eacSNamhyung Kim skel->bss->enabled = 1;
112177f4eacSNamhyung Kim return 0;
113177f4eacSNamhyung Kim }
114177f4eacSNamhyung Kim
perf_ftrace__latency_stop_bpf(struct perf_ftrace * ftrace __maybe_unused)115177f4eacSNamhyung Kim int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
116177f4eacSNamhyung Kim {
117177f4eacSNamhyung Kim skel->bss->enabled = 0;
118177f4eacSNamhyung Kim return 0;
119177f4eacSNamhyung Kim }
120177f4eacSNamhyung Kim
perf_ftrace__latency_read_bpf(struct perf_ftrace * ftrace __maybe_unused,int buckets[])121177f4eacSNamhyung Kim int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
122177f4eacSNamhyung Kim int buckets[])
123177f4eacSNamhyung Kim {
124177f4eacSNamhyung Kim int i, fd, err;
125177f4eacSNamhyung Kim u32 idx;
126177f4eacSNamhyung Kim u64 *hist;
1276d18804bSIan Rogers int ncpus = cpu__max_cpu().cpu;
128177f4eacSNamhyung Kim
129177f4eacSNamhyung Kim fd = bpf_map__fd(skel->maps.latency);
130177f4eacSNamhyung Kim
131177f4eacSNamhyung Kim hist = calloc(ncpus, sizeof(*hist));
132177f4eacSNamhyung Kim if (hist == NULL)
133177f4eacSNamhyung Kim return -ENOMEM;
134177f4eacSNamhyung Kim
135177f4eacSNamhyung Kim for (idx = 0; idx < NUM_BUCKET; idx++) {
136177f4eacSNamhyung Kim err = bpf_map_lookup_elem(fd, &idx, hist);
137177f4eacSNamhyung Kim if (err) {
138177f4eacSNamhyung Kim buckets[idx] = 0;
139177f4eacSNamhyung Kim continue;
140177f4eacSNamhyung Kim }
141177f4eacSNamhyung Kim
142177f4eacSNamhyung Kim for (i = 0; i < ncpus; i++)
143177f4eacSNamhyung Kim buckets[idx] += hist[i];
144177f4eacSNamhyung Kim }
145177f4eacSNamhyung Kim
146177f4eacSNamhyung Kim free(hist);
147177f4eacSNamhyung Kim return 0;
148177f4eacSNamhyung Kim }
149177f4eacSNamhyung Kim
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace * ftrace __maybe_unused)150177f4eacSNamhyung Kim int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
151177f4eacSNamhyung Kim {
152177f4eacSNamhyung Kim func_latency_bpf__destroy(skel);
153177f4eacSNamhyung Kim return 0;
154177f4eacSNamhyung Kim }
155