1c7568114SSong Liu // SPDX-License-Identifier: GPL-2.0
2c7568114SSong Liu /* Copyright (c) 2020 Facebook */
3c7568114SSong Liu #include "bpf_iter.h"
4c7568114SSong Liu #include <bpf/bpf_helpers.h>
5c7568114SSong Liu 
6c7568114SSong Liu char _license[] SEC("license") = "GPL";
7c7568114SSong Liu 
8c7568114SSong Liu #define MAX_STACK_TRACE_DEPTH   64
99ff79af3SSong Liu unsigned long entries[MAX_STACK_TRACE_DEPTH] = {};
10c7568114SSong Liu #define SIZE_OF_ULONG (sizeof(unsigned long))
11c7568114SSong Liu 
12c7568114SSong Liu SEC("iter/task")
dump_task_stack(struct bpf_iter__task * ctx)13c7568114SSong Liu int dump_task_stack(struct bpf_iter__task *ctx)
14c7568114SSong Liu {
15c7568114SSong Liu 	struct seq_file *seq = ctx->meta->seq;
16c7568114SSong Liu 	struct task_struct *task = ctx->task;
17c7568114SSong Liu 	long i, retlen;
18c7568114SSong Liu 
19c7568114SSong Liu 	if (task == (void *)0)
20c7568114SSong Liu 		return 0;
21c7568114SSong Liu 
22c7568114SSong Liu 	retlen = bpf_get_task_stack(task, entries,
23c7568114SSong Liu 				    MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0);
24c7568114SSong Liu 	if (retlen < 0)
25c7568114SSong Liu 		return 0;
26c7568114SSong Liu 
27c7568114SSong Liu 	BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
28c7568114SSong Liu 		       retlen / SIZE_OF_ULONG);
29c7568114SSong Liu 	for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) {
30c7568114SSong Liu 		if (retlen > i * SIZE_OF_ULONG)
31c7568114SSong Liu 			BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
32c7568114SSong Liu 	}
33c7568114SSong Liu 	BPF_SEQ_PRINTF(seq, "\n");
34c7568114SSong Liu 
35c7568114SSong Liu 	return 0;
36c7568114SSong Liu }
37c77cec5cSDave Marchevsky 
38*3db79d21SJordan Rome int num_user_stacks = 0;
39*3db79d21SJordan Rome 
40c77cec5cSDave Marchevsky SEC("iter/task")
get_task_user_stacks(struct bpf_iter__task * ctx)41c77cec5cSDave Marchevsky int get_task_user_stacks(struct bpf_iter__task *ctx)
42c77cec5cSDave Marchevsky {
43c77cec5cSDave Marchevsky 	struct seq_file *seq = ctx->meta->seq;
44c77cec5cSDave Marchevsky 	struct task_struct *task = ctx->task;
45c77cec5cSDave Marchevsky 	uint64_t buf_sz = 0;
46c77cec5cSDave Marchevsky 	int64_t res;
47c77cec5cSDave Marchevsky 
48c77cec5cSDave Marchevsky 	if (task == (void *)0)
49c77cec5cSDave Marchevsky 		return 0;
50c77cec5cSDave Marchevsky 
51c77cec5cSDave Marchevsky 	res = bpf_get_task_stack(task, entries,
52c77cec5cSDave Marchevsky 			MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
53c77cec5cSDave Marchevsky 	if (res <= 0)
54c77cec5cSDave Marchevsky 		return 0;
55c77cec5cSDave Marchevsky 
56*3db79d21SJordan Rome 	/* Only one task, the current one, should succeed */
57*3db79d21SJordan Rome 	++num_user_stacks;
58*3db79d21SJordan Rome 
59c77cec5cSDave Marchevsky 	buf_sz += res;
60c77cec5cSDave Marchevsky 
61c77cec5cSDave Marchevsky 	/* If the verifier doesn't refine bpf_get_task_stack res, and instead
62c77cec5cSDave Marchevsky 	 * assumes res is entirely unknown, this program will fail to load as
63c77cec5cSDave Marchevsky 	 * the verifier will believe that max buf_sz value allows reading
64c77cec5cSDave Marchevsky 	 * past the end of entries in bpf_seq_write call
65c77cec5cSDave Marchevsky 	 */
66c77cec5cSDave Marchevsky 	bpf_seq_write(seq, &entries, buf_sz);
67c77cec5cSDave Marchevsky 	return 0;
68c77cec5cSDave Marchevsky }
69