xref: /openbmc/linux/tools/perf/util/bpf_off_cpu.c (revision d9bd26de)
1edc41a10SNamhyung Kim // SPDX-License-Identifier: GPL-2.0
2edc41a10SNamhyung Kim #include "util/bpf_counter.h"
3edc41a10SNamhyung Kim #include "util/debug.h"
4edc41a10SNamhyung Kim #include "util/evsel.h"
5edc41a10SNamhyung Kim #include "util/evlist.h"
6edc41a10SNamhyung Kim #include "util/off_cpu.h"
7edc41a10SNamhyung Kim #include "util/perf-hooks.h"
8685439a7SNamhyung Kim #include "util/record.h"
9edc41a10SNamhyung Kim #include "util/session.h"
1010742d0cSNamhyung Kim #include "util/target.h"
1110742d0cSNamhyung Kim #include "util/cpumap.h"
1210742d0cSNamhyung Kim #include "util/thread_map.h"
13685439a7SNamhyung Kim #include "util/cgroup.h"
14d6f415caSNamhyung Kim #include "util/strlist.h"
15edc41a10SNamhyung Kim #include <bpf/bpf.h>
16edc41a10SNamhyung Kim 
17edc41a10SNamhyung Kim #include "bpf_skel/off_cpu.skel.h"
18edc41a10SNamhyung Kim 
19edc41a10SNamhyung Kim #define MAX_STACKS  32
20d2347763SNamhyung Kim #define MAX_PROC  4096
21edc41a10SNamhyung Kim /* we don't need actual timestamp, just want to put the samples at last */
22edc41a10SNamhyung Kim #define OFF_CPU_TIMESTAMP  (~0ull << 32)
23edc41a10SNamhyung Kim 
24edc41a10SNamhyung Kim static struct off_cpu_bpf *skel;
25edc41a10SNamhyung Kim 
26edc41a10SNamhyung Kim struct off_cpu_key {
27edc41a10SNamhyung Kim 	u32 pid;
28edc41a10SNamhyung Kim 	u32 tgid;
29edc41a10SNamhyung Kim 	u32 stack_id;
30edc41a10SNamhyung Kim 	u32 state;
31685439a7SNamhyung Kim 	u64 cgroup_id;
32edc41a10SNamhyung Kim };
33edc41a10SNamhyung Kim 
34edc41a10SNamhyung Kim union off_cpu_data {
35edc41a10SNamhyung Kim 	struct perf_event_header hdr;
36edc41a10SNamhyung Kim 	u64 array[1024 / sizeof(u64)];
37edc41a10SNamhyung Kim };
38edc41a10SNamhyung Kim 
off_cpu_config(struct evlist * evlist)39edc41a10SNamhyung Kim static int off_cpu_config(struct evlist *evlist)
40edc41a10SNamhyung Kim {
41edc41a10SNamhyung Kim 	struct evsel *evsel;
42edc41a10SNamhyung Kim 	struct perf_event_attr attr = {
43edc41a10SNamhyung Kim 		.type	= PERF_TYPE_SOFTWARE,
44edc41a10SNamhyung Kim 		.config = PERF_COUNT_SW_BPF_OUTPUT,
45edc41a10SNamhyung Kim 		.size	= sizeof(attr), /* to capture ABI version */
46edc41a10SNamhyung Kim 	};
47edc41a10SNamhyung Kim 	char *evname = strdup(OFFCPU_EVENT);
48edc41a10SNamhyung Kim 
49edc41a10SNamhyung Kim 	if (evname == NULL)
50edc41a10SNamhyung Kim 		return -ENOMEM;
51edc41a10SNamhyung Kim 
52edc41a10SNamhyung Kim 	evsel = evsel__new(&attr);
53edc41a10SNamhyung Kim 	if (!evsel) {
54edc41a10SNamhyung Kim 		free(evname);
55edc41a10SNamhyung Kim 		return -ENOMEM;
56edc41a10SNamhyung Kim 	}
57edc41a10SNamhyung Kim 
58edc41a10SNamhyung Kim 	evsel->core.attr.freq = 1;
59edc41a10SNamhyung Kim 	evsel->core.attr.sample_period = 1;
60edc41a10SNamhyung Kim 	/* off-cpu analysis depends on stack trace */
61edc41a10SNamhyung Kim 	evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
62edc41a10SNamhyung Kim 
63edc41a10SNamhyung Kim 	evlist__add(evlist, evsel);
64edc41a10SNamhyung Kim 
65edc41a10SNamhyung Kim 	free(evsel->name);
66edc41a10SNamhyung Kim 	evsel->name = evname;
67edc41a10SNamhyung Kim 
68edc41a10SNamhyung Kim 	return 0;
69edc41a10SNamhyung Kim }
70edc41a10SNamhyung Kim 
off_cpu_start(void * arg)7110742d0cSNamhyung Kim static void off_cpu_start(void *arg)
72edc41a10SNamhyung Kim {
7310742d0cSNamhyung Kim 	struct evlist *evlist = arg;
7410742d0cSNamhyung Kim 
7510742d0cSNamhyung Kim 	/* update task filter for the given workload */
7610742d0cSNamhyung Kim 	if (!skel->bss->has_cpu && !skel->bss->has_task &&
7710742d0cSNamhyung Kim 	    perf_thread_map__pid(evlist->core.threads, 0) != -1) {
7810742d0cSNamhyung Kim 		int fd;
7910742d0cSNamhyung Kim 		u32 pid;
8010742d0cSNamhyung Kim 		u8 val = 1;
8110742d0cSNamhyung Kim 
8210742d0cSNamhyung Kim 		skel->bss->has_task = 1;
8307fc958bSNamhyung Kim 		skel->bss->uses_tgid = 1;
8410742d0cSNamhyung Kim 		fd = bpf_map__fd(skel->maps.task_filter);
8510742d0cSNamhyung Kim 		pid = perf_thread_map__pid(evlist->core.threads, 0);
8610742d0cSNamhyung Kim 		bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
8710742d0cSNamhyung Kim 	}
8810742d0cSNamhyung Kim 
89edc41a10SNamhyung Kim 	skel->bss->enabled = 1;
90edc41a10SNamhyung Kim }
91edc41a10SNamhyung Kim 
off_cpu_finish(void * arg __maybe_unused)92edc41a10SNamhyung Kim static void off_cpu_finish(void *arg __maybe_unused)
93edc41a10SNamhyung Kim {
94edc41a10SNamhyung Kim 	skel->bss->enabled = 0;
95edc41a10SNamhyung Kim 	off_cpu_bpf__destroy(skel);
96edc41a10SNamhyung Kim }
97edc41a10SNamhyung Kim 
98b36888f7SNamhyung Kim /* v5.18 kernel added prev_state arg, so it needs to check the signature */
check_sched_switch_args(void)99b36888f7SNamhyung Kim static void check_sched_switch_args(void)
100b36888f7SNamhyung Kim {
101*d9bd26deSNamhyung Kim 	const struct btf *btf = btf__load_vmlinux_btf();
102b36888f7SNamhyung Kim 	const struct btf_type *t1, *t2, *t3;
103b36888f7SNamhyung Kim 	u32 type_id;
104b36888f7SNamhyung Kim 
105167b266bSNamhyung Kim 	type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
106b36888f7SNamhyung Kim 					 BTF_KIND_TYPEDEF);
107b36888f7SNamhyung Kim 	if ((s32)type_id < 0)
108b36888f7SNamhyung Kim 		return;
109b36888f7SNamhyung Kim 
110b36888f7SNamhyung Kim 	t1 = btf__type_by_id(btf, type_id);
111b36888f7SNamhyung Kim 	if (t1 == NULL)
112b36888f7SNamhyung Kim 		return;
113b36888f7SNamhyung Kim 
114b36888f7SNamhyung Kim 	t2 = btf__type_by_id(btf, t1->type);
115b36888f7SNamhyung Kim 	if (t2 == NULL || !btf_is_ptr(t2))
116b36888f7SNamhyung Kim 		return;
117b36888f7SNamhyung Kim 
118b36888f7SNamhyung Kim 	t3 = btf__type_by_id(btf, t2->type);
119*d9bd26deSNamhyung Kim 	/* btf_trace func proto has one more argument for the context */
120*d9bd26deSNamhyung Kim 	if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
121b36888f7SNamhyung Kim 		/* new format: pass prev_state as 4th arg */
122b36888f7SNamhyung Kim 		skel->rodata->has_prev_state = true;
123b36888f7SNamhyung Kim 	}
124b36888f7SNamhyung Kim }
125b36888f7SNamhyung Kim 
off_cpu_prepare(struct evlist * evlist,struct target * target,struct record_opts * opts)126685439a7SNamhyung Kim int off_cpu_prepare(struct evlist *evlist, struct target *target,
127685439a7SNamhyung Kim 		    struct record_opts *opts)
128edc41a10SNamhyung Kim {
12910742d0cSNamhyung Kim 	int err, fd, i;
130685439a7SNamhyung Kim 	int ncpus = 1, ntasks = 1, ncgrps = 1;
131d6f415caSNamhyung Kim 	struct strlist *pid_slist = NULL;
132d6f415caSNamhyung Kim 	struct str_node *pos;
133edc41a10SNamhyung Kim 
134edc41a10SNamhyung Kim 	if (off_cpu_config(evlist) < 0) {
135edc41a10SNamhyung Kim 		pr_err("Failed to config off-cpu BPF event\n");
136edc41a10SNamhyung Kim 		return -1;
137edc41a10SNamhyung Kim 	}
138edc41a10SNamhyung Kim 
13910742d0cSNamhyung Kim 	skel = off_cpu_bpf__open();
140edc41a10SNamhyung Kim 	if (!skel) {
141edc41a10SNamhyung Kim 		pr_err("Failed to open off-cpu BPF skeleton\n");
142edc41a10SNamhyung Kim 		return -1;
143edc41a10SNamhyung Kim 	}
144edc41a10SNamhyung Kim 
14510742d0cSNamhyung Kim 	/* don't need to set cpu filter for system-wide mode */
14610742d0cSNamhyung Kim 	if (target->cpu_list) {
14710742d0cSNamhyung Kim 		ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
14810742d0cSNamhyung Kim 		bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
14910742d0cSNamhyung Kim 	}
15010742d0cSNamhyung Kim 
151d6f415caSNamhyung Kim 	if (target->pid) {
152d6f415caSNamhyung Kim 		pid_slist = strlist__new(target->pid, NULL);
153d6f415caSNamhyung Kim 		if (!pid_slist) {
154d6f415caSNamhyung Kim 			pr_err("Failed to create a strlist for pid\n");
155d6f415caSNamhyung Kim 			return -1;
156d6f415caSNamhyung Kim 		}
157d6f415caSNamhyung Kim 
158d6f415caSNamhyung Kim 		ntasks = 0;
159d6f415caSNamhyung Kim 		strlist__for_each_entry(pos, pid_slist) {
160d6f415caSNamhyung Kim 			char *end_ptr;
161d6f415caSNamhyung Kim 			int pid = strtol(pos->s, &end_ptr, 10);
162d6f415caSNamhyung Kim 
163d6f415caSNamhyung Kim 			if (pid == INT_MIN || pid == INT_MAX ||
164d6f415caSNamhyung Kim 			    (*end_ptr != '\0' && *end_ptr != ','))
165d6f415caSNamhyung Kim 				continue;
166d6f415caSNamhyung Kim 
167d6f415caSNamhyung Kim 			ntasks++;
168d6f415caSNamhyung Kim 		}
169d2347763SNamhyung Kim 
170d2347763SNamhyung Kim 		if (ntasks < MAX_PROC)
171d2347763SNamhyung Kim 			ntasks = MAX_PROC;
172d2347763SNamhyung Kim 
173d6f415caSNamhyung Kim 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
174d6f415caSNamhyung Kim 	} else if (target__has_task(target)) {
17510742d0cSNamhyung Kim 		ntasks = perf_thread_map__nr(evlist->core.threads);
17610742d0cSNamhyung Kim 		bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
177d2347763SNamhyung Kim 	} else if (target__none(target)) {
178d2347763SNamhyung Kim 		bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
17910742d0cSNamhyung Kim 	}
18010742d0cSNamhyung Kim 
181685439a7SNamhyung Kim 	if (evlist__first(evlist)->cgrp) {
182685439a7SNamhyung Kim 		ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
183685439a7SNamhyung Kim 		bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
184685439a7SNamhyung Kim 
185685439a7SNamhyung Kim 		if (!cgroup_is_v2("perf_event"))
186685439a7SNamhyung Kim 			skel->rodata->uses_cgroup_v1 = true;
187685439a7SNamhyung Kim 	}
188685439a7SNamhyung Kim 
189685439a7SNamhyung Kim 	if (opts->record_cgroup) {
190685439a7SNamhyung Kim 		skel->rodata->needs_cgroup = true;
191685439a7SNamhyung Kim 
192685439a7SNamhyung Kim 		if (!cgroup_is_v2("perf_event"))
193685439a7SNamhyung Kim 			skel->rodata->uses_cgroup_v1 = true;
194685439a7SNamhyung Kim 	}
195685439a7SNamhyung Kim 
19610742d0cSNamhyung Kim 	set_max_rlimit();
197b36888f7SNamhyung Kim 	check_sched_switch_args();
19810742d0cSNamhyung Kim 
19910742d0cSNamhyung Kim 	err = off_cpu_bpf__load(skel);
20010742d0cSNamhyung Kim 	if (err) {
20110742d0cSNamhyung Kim 		pr_err("Failed to load off-cpu skeleton\n");
20210742d0cSNamhyung Kim 		goto out;
20310742d0cSNamhyung Kim 	}
20410742d0cSNamhyung Kim 
20510742d0cSNamhyung Kim 	if (target->cpu_list) {
20610742d0cSNamhyung Kim 		u32 cpu;
20710742d0cSNamhyung Kim 		u8 val = 1;
20810742d0cSNamhyung Kim 
20910742d0cSNamhyung Kim 		skel->bss->has_cpu = 1;
21010742d0cSNamhyung Kim 		fd = bpf_map__fd(skel->maps.cpu_filter);
21110742d0cSNamhyung Kim 
21210742d0cSNamhyung Kim 		for (i = 0; i < ncpus; i++) {
21310742d0cSNamhyung Kim 			cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
21410742d0cSNamhyung Kim 			bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
21510742d0cSNamhyung Kim 		}
21610742d0cSNamhyung Kim 	}
21710742d0cSNamhyung Kim 
218d6f415caSNamhyung Kim 	if (target->pid) {
219d6f415caSNamhyung Kim 		u8 val = 1;
220d6f415caSNamhyung Kim 
221d6f415caSNamhyung Kim 		skel->bss->has_task = 1;
222d6f415caSNamhyung Kim 		skel->bss->uses_tgid = 1;
223d6f415caSNamhyung Kim 		fd = bpf_map__fd(skel->maps.task_filter);
224d6f415caSNamhyung Kim 
225d6f415caSNamhyung Kim 		strlist__for_each_entry(pos, pid_slist) {
226d6f415caSNamhyung Kim 			char *end_ptr;
227d6f415caSNamhyung Kim 			u32 tgid;
228d6f415caSNamhyung Kim 			int pid = strtol(pos->s, &end_ptr, 10);
229d6f415caSNamhyung Kim 
230d6f415caSNamhyung Kim 			if (pid == INT_MIN || pid == INT_MAX ||
231d6f415caSNamhyung Kim 			    (*end_ptr != '\0' && *end_ptr != ','))
232d6f415caSNamhyung Kim 				continue;
233d6f415caSNamhyung Kim 
234d6f415caSNamhyung Kim 			tgid = pid;
235d6f415caSNamhyung Kim 			bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
236d6f415caSNamhyung Kim 		}
237d6f415caSNamhyung Kim 	} else if (target__has_task(target)) {
23810742d0cSNamhyung Kim 		u32 pid;
23910742d0cSNamhyung Kim 		u8 val = 1;
24010742d0cSNamhyung Kim 
24110742d0cSNamhyung Kim 		skel->bss->has_task = 1;
24210742d0cSNamhyung Kim 		fd = bpf_map__fd(skel->maps.task_filter);
24310742d0cSNamhyung Kim 
24410742d0cSNamhyung Kim 		for (i = 0; i < ntasks; i++) {
24510742d0cSNamhyung Kim 			pid = perf_thread_map__pid(evlist->core.threads, i);
24610742d0cSNamhyung Kim 			bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
24710742d0cSNamhyung Kim 		}
24810742d0cSNamhyung Kim 	}
24910742d0cSNamhyung Kim 
250685439a7SNamhyung Kim 	if (evlist__first(evlist)->cgrp) {
251685439a7SNamhyung Kim 		struct evsel *evsel;
252685439a7SNamhyung Kim 		u8 val = 1;
253685439a7SNamhyung Kim 
254685439a7SNamhyung Kim 		skel->bss->has_cgroup = 1;
255685439a7SNamhyung Kim 		fd = bpf_map__fd(skel->maps.cgroup_filter);
256685439a7SNamhyung Kim 
257685439a7SNamhyung Kim 		evlist__for_each_entry(evlist, evsel) {
258685439a7SNamhyung Kim 			struct cgroup *cgrp = evsel->cgrp;
259685439a7SNamhyung Kim 
260685439a7SNamhyung Kim 			if (cgrp == NULL)
261685439a7SNamhyung Kim 				continue;
262685439a7SNamhyung Kim 
263685439a7SNamhyung Kim 			if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
264685439a7SNamhyung Kim 				pr_err("Failed to read cgroup id of %s\n",
265685439a7SNamhyung Kim 				       cgrp->name);
266685439a7SNamhyung Kim 				goto out;
267685439a7SNamhyung Kim 			}
268685439a7SNamhyung Kim 
269685439a7SNamhyung Kim 			bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
270685439a7SNamhyung Kim 		}
271685439a7SNamhyung Kim 	}
272685439a7SNamhyung Kim 
273edc41a10SNamhyung Kim 	err = off_cpu_bpf__attach(skel);
274edc41a10SNamhyung Kim 	if (err) {
275edc41a10SNamhyung Kim 		pr_err("Failed to attach off-cpu BPF skeleton\n");
276edc41a10SNamhyung Kim 		goto out;
277edc41a10SNamhyung Kim 	}
278edc41a10SNamhyung Kim 
27910742d0cSNamhyung Kim 	if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
28010742d0cSNamhyung Kim 	    perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
281edc41a10SNamhyung Kim 		pr_err("Failed to attach off-cpu skeleton\n");
282edc41a10SNamhyung Kim 		goto out;
283edc41a10SNamhyung Kim 	}
284edc41a10SNamhyung Kim 
285edc41a10SNamhyung Kim 	return 0;
286edc41a10SNamhyung Kim 
287edc41a10SNamhyung Kim out:
288edc41a10SNamhyung Kim 	off_cpu_bpf__destroy(skel);
289edc41a10SNamhyung Kim 	return -1;
290edc41a10SNamhyung Kim }
291edc41a10SNamhyung Kim 
off_cpu_write(struct perf_session * session)292edc41a10SNamhyung Kim int off_cpu_write(struct perf_session *session)
293edc41a10SNamhyung Kim {
294edc41a10SNamhyung Kim 	int bytes = 0, size;
295edc41a10SNamhyung Kim 	int fd, stack;
296edc41a10SNamhyung Kim 	u64 sample_type, val, sid = 0;
297edc41a10SNamhyung Kim 	struct evsel *evsel;
298edc41a10SNamhyung Kim 	struct perf_data_file *file = &session->data->file;
299edc41a10SNamhyung Kim 	struct off_cpu_key prev, key;
300edc41a10SNamhyung Kim 	union off_cpu_data data = {
301edc41a10SNamhyung Kim 		.hdr = {
302edc41a10SNamhyung Kim 			.type = PERF_RECORD_SAMPLE,
303edc41a10SNamhyung Kim 			.misc = PERF_RECORD_MISC_USER,
304edc41a10SNamhyung Kim 		},
305edc41a10SNamhyung Kim 	};
306edc41a10SNamhyung Kim 	u64 tstamp = OFF_CPU_TIMESTAMP;
307edc41a10SNamhyung Kim 
308edc41a10SNamhyung Kim 	skel->bss->enabled = 0;
309edc41a10SNamhyung Kim 
310edc41a10SNamhyung Kim 	evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
311edc41a10SNamhyung Kim 	if (evsel == NULL) {
312edc41a10SNamhyung Kim 		pr_err("%s evsel not found\n", OFFCPU_EVENT);
313edc41a10SNamhyung Kim 		return 0;
314edc41a10SNamhyung Kim 	}
315edc41a10SNamhyung Kim 
316edc41a10SNamhyung Kim 	sample_type = evsel->core.attr.sample_type;
317edc41a10SNamhyung Kim 
31849c692b7SNamhyung Kim 	if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
31949c692b7SNamhyung Kim 		pr_err("not supported sample type: %llx\n",
32049c692b7SNamhyung Kim 		       (unsigned long long)sample_type);
32149c692b7SNamhyung Kim 		return -1;
32249c692b7SNamhyung Kim 	}
32349c692b7SNamhyung Kim 
324edc41a10SNamhyung Kim 	if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
325edc41a10SNamhyung Kim 		if (evsel->core.id)
326edc41a10SNamhyung Kim 			sid = evsel->core.id[0];
327edc41a10SNamhyung Kim 	}
328edc41a10SNamhyung Kim 
329edc41a10SNamhyung Kim 	fd = bpf_map__fd(skel->maps.off_cpu);
330edc41a10SNamhyung Kim 	stack = bpf_map__fd(skel->maps.stacks);
331edc41a10SNamhyung Kim 	memset(&prev, 0, sizeof(prev));
332edc41a10SNamhyung Kim 
333edc41a10SNamhyung Kim 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
334edc41a10SNamhyung Kim 		int n = 1;  /* start from perf_event_header */
335edc41a10SNamhyung Kim 		int ip_pos = -1;
336edc41a10SNamhyung Kim 
337edc41a10SNamhyung Kim 		bpf_map_lookup_elem(fd, &key, &val);
338edc41a10SNamhyung Kim 
339edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
340edc41a10SNamhyung Kim 			data.array[n++] = sid;
341edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_IP) {
342edc41a10SNamhyung Kim 			ip_pos = n;
343edc41a10SNamhyung Kim 			data.array[n++] = 0;  /* will be updated */
344edc41a10SNamhyung Kim 		}
345edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_TID)
346edc41a10SNamhyung Kim 			data.array[n++] = (u64)key.pid << 32 | key.tgid;
347edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_TIME)
348edc41a10SNamhyung Kim 			data.array[n++] = tstamp;
349edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_ID)
350edc41a10SNamhyung Kim 			data.array[n++] = sid;
351edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_CPU)
352edc41a10SNamhyung Kim 			data.array[n++] = 0;
353edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_PERIOD)
354edc41a10SNamhyung Kim 			data.array[n++] = val;
355edc41a10SNamhyung Kim 		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
356edc41a10SNamhyung Kim 			int len = 0;
357edc41a10SNamhyung Kim 
358edc41a10SNamhyung Kim 			/* data.array[n] is callchain->nr (updated later) */
359edc41a10SNamhyung Kim 			data.array[n + 1] = PERF_CONTEXT_USER;
360edc41a10SNamhyung Kim 			data.array[n + 2] = 0;
361edc41a10SNamhyung Kim 
362edc41a10SNamhyung Kim 			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
363edc41a10SNamhyung Kim 			while (data.array[n + 2 + len])
364edc41a10SNamhyung Kim 				len++;
365edc41a10SNamhyung Kim 
366edc41a10SNamhyung Kim 			/* update length of callchain */
367edc41a10SNamhyung Kim 			data.array[n] = len + 1;
368edc41a10SNamhyung Kim 
369edc41a10SNamhyung Kim 			/* update sample ip with the first callchain entry */
370edc41a10SNamhyung Kim 			if (ip_pos >= 0)
371edc41a10SNamhyung Kim 				data.array[ip_pos] = data.array[n + 2];
372edc41a10SNamhyung Kim 
373edc41a10SNamhyung Kim 			/* calculate sample callchain data array length */
374edc41a10SNamhyung Kim 			n += len + 2;
375edc41a10SNamhyung Kim 		}
376685439a7SNamhyung Kim 		if (sample_type & PERF_SAMPLE_CGROUP)
377685439a7SNamhyung Kim 			data.array[n++] = key.cgroup_id;
378edc41a10SNamhyung Kim 
379edc41a10SNamhyung Kim 		size = n * sizeof(u64);
380edc41a10SNamhyung Kim 		data.hdr.size = size;
381edc41a10SNamhyung Kim 		bytes += size;
382edc41a10SNamhyung Kim 
383edc41a10SNamhyung Kim 		if (perf_data_file__write(file, &data, size) < 0) {
384edc41a10SNamhyung Kim 			pr_err("failed to write perf data, error: %m\n");
385edc41a10SNamhyung Kim 			return bytes;
386edc41a10SNamhyung Kim 		}
387edc41a10SNamhyung Kim 
388edc41a10SNamhyung Kim 		prev = key;
389edc41a10SNamhyung Kim 		/* increase dummy timestamp to sort later samples */
390edc41a10SNamhyung Kim 		tstamp++;
391edc41a10SNamhyung Kim 	}
392edc41a10SNamhyung Kim 	return bytes;
393edc41a10SNamhyung Kim }
394