1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 
8 #include "task_kfunc_common.h"
9 
10 char _license[] SEC("license") = "GPL";
11 
12 int err, pid;
13 
14 /* Prototype for all of the program trace events below:
15  *
16  * TRACE_EVENT(task_newtask,
17  *         TP_PROTO(struct task_struct *p, u64 clone_flags)
18  */
19 
20 struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
21 
22 struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
23 /* The two-param bpf_task_acquire doesn't exist */
24 struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
25 /* Incorrect type for first param */
26 struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
27 
28 void invalid_kfunc(void) __ksym __weak;
29 void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
30 
31 static bool is_test_kfunc_task(void)
32 {
33 	int cur_pid = bpf_get_current_pid_tgid() >> 32;
34 
35 	return pid == cur_pid;
36 }
37 
38 static int test_acquire_release(struct task_struct *task)
39 {
40 	struct task_struct *acquired = NULL;
41 
42 	if (!bpf_ksym_exists(bpf_task_acquire)) {
43 		err = 3;
44 		return 0;
45 	}
46 	if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) {
47 		err = 4;
48 		return 0;
49 	}
50 	if (bpf_ksym_exists(invalid_kfunc)) {
51 		/* the verifier's dead code elimination should remove this */
52 		err = 5;
53 		asm volatile ("goto -1"); /* for (;;); */
54 	}
55 
56 	acquired = bpf_task_acquire(task);
57 	if (acquired)
58 		bpf_task_release(acquired);
59 	else
60 		err = 6;
61 
62 	return 0;
63 }
64 
65 SEC("tp_btf/task_newtask")
66 int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
67 {
68 	struct task_struct *acquired = NULL;
69 	int fake_ctx = 42;
70 
71 	if (bpf_ksym_exists(bpf_task_acquire___one)) {
72 		acquired = bpf_task_acquire___one(task);
73 	} else if (bpf_ksym_exists(bpf_task_acquire___two)) {
74 		/* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
75 		 * call will find vmlinux's bpf_task_acquire, but subsequent
76 		 * bpf_core_types_are_compat will fail
77 		 */
78 		acquired = bpf_task_acquire___two(task, &fake_ctx);
79 		err = 3;
80 		return 0;
81 	} else if (bpf_ksym_exists(bpf_task_acquire___three)) {
82 		/* bpf_core_types_are_compat will fail similarly to above case */
83 		acquired = bpf_task_acquire___three(&fake_ctx);
84 		err = 4;
85 		return 0;
86 	}
87 
88 	if (acquired)
89 		bpf_task_release(acquired);
90 	else
91 		err = 5;
92 	return 0;
93 }
94 
95 SEC("tp_btf/task_newtask")
96 int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
97 {
98 	/* Neither symbol should successfully resolve.
99 	 * Success or failure of one ___flavor should not affect others
100 	 */
101 	if (bpf_ksym_exists(bpf_task_acquire___two))
102 		err = 1;
103 	else if (bpf_ksym_exists(bpf_task_acquire___three))
104 		err = 2;
105 
106 	return 0;
107 }
108 
109 SEC("tp_btf/task_newtask")
110 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
111 {
112 	if (!is_test_kfunc_task())
113 		return 0;
114 
115 	return test_acquire_release(task);
116 }
117 
118 SEC("tp_btf/task_newtask")
119 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
120 {
121 	if (!is_test_kfunc_task())
122 		return 0;
123 
124 	return test_acquire_release(bpf_get_current_task_btf());
125 }
126 
127 SEC("tp_btf/task_newtask")
128 int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
129 {
130 	long status;
131 
132 	if (!is_test_kfunc_task())
133 		return 0;
134 
135 	status = tasks_kfunc_map_insert(task);
136 	if (status)
137 		err = 1;
138 
139 	return 0;
140 }
141 
142 SEC("tp_btf/task_newtask")
143 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
144 {
145 	struct task_struct *kptr;
146 	struct __tasks_kfunc_map_value *v;
147 	long status;
148 
149 	if (!is_test_kfunc_task())
150 		return 0;
151 
152 	status = tasks_kfunc_map_insert(task);
153 	if (status) {
154 		err = 1;
155 		return 0;
156 	}
157 
158 	v = tasks_kfunc_map_value_lookup(task);
159 	if (!v) {
160 		err = 2;
161 		return 0;
162 	}
163 
164 	kptr = bpf_kptr_xchg(&v->task, NULL);
165 	if (!kptr) {
166 		err = 3;
167 		return 0;
168 	}
169 
170 	bpf_task_release(kptr);
171 
172 	return 0;
173 }
174 
175 SEC("tp_btf/task_newtask")
176 int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags)
177 {
178 	struct task_struct *kptr;
179 	struct __tasks_kfunc_map_value *v;
180 	long status;
181 
182 	if (!is_test_kfunc_task())
183 		return 0;
184 
185 	status = tasks_kfunc_map_insert(task);
186 	if (status) {
187 		err = 1;
188 		return 0;
189 	}
190 
191 	v = tasks_kfunc_map_value_lookup(task);
192 	if (!v) {
193 		err = 2;
194 		return 0;
195 	}
196 
197 	bpf_rcu_read_lock();
198 	kptr = v->task;
199 	if (!kptr) {
200 		err = 3;
201 	} else {
202 		kptr = bpf_task_acquire(kptr);
203 		if (!kptr)
204 			err = 4;
205 		else
206 			bpf_task_release(kptr);
207 	}
208 	bpf_rcu_read_unlock();
209 
210 	return 0;
211 }
212 
213 SEC("tp_btf/task_newtask")
214 int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
215 {
216 	struct task_struct *current, *acquired;
217 
218 	if (!is_test_kfunc_task())
219 		return 0;
220 
221 	current = bpf_get_current_task_btf();
222 	acquired = bpf_task_acquire(current);
223 	if (acquired)
224 		bpf_task_release(acquired);
225 	else
226 		err = 1;
227 
228 	return 0;
229 }
230 
231 static void lookup_compare_pid(const struct task_struct *p)
232 {
233 	struct task_struct *acquired;
234 
235 	acquired = bpf_task_from_pid(p->pid);
236 	if (!acquired) {
237 		err = 1;
238 		return;
239 	}
240 
241 	if (acquired->pid != p->pid)
242 		err = 2;
243 	bpf_task_release(acquired);
244 }
245 
246 SEC("tp_btf/task_newtask")
247 int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
248 {
249 	if (!is_test_kfunc_task())
250 		return 0;
251 
252 	lookup_compare_pid(task);
253 	return 0;
254 }
255 
256 SEC("tp_btf/task_newtask")
257 int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
258 {
259 	if (!is_test_kfunc_task())
260 		return 0;
261 
262 	lookup_compare_pid(bpf_get_current_task_btf());
263 	return 0;
264 }
265 
266 static int is_pid_lookup_valid(s32 pid)
267 {
268 	struct task_struct *acquired;
269 
270 	acquired = bpf_task_from_pid(pid);
271 	if (acquired) {
272 		bpf_task_release(acquired);
273 		return 1;
274 	}
275 
276 	return 0;
277 }
278 
279 SEC("tp_btf/task_newtask")
280 int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
281 {
282 	if (!is_test_kfunc_task())
283 		return 0;
284 
285 	bpf_strncmp(task->comm, 12, "foo");
286 	bpf_strncmp(task->comm, 16, "foo");
287 	bpf_strncmp(&task->comm[8], 4, "foo");
288 
289 	if (is_pid_lookup_valid(-1)) {
290 		err = 1;
291 		return 0;
292 	}
293 
294 	if (is_pid_lookup_valid(0xcafef00d)) {
295 		err = 2;
296 		return 0;
297 	}
298 
299 	return 0;
300 }
301 
302 SEC("tp_btf/task_newtask")
303 int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags)
304 {
305 	struct task_struct *acquired;
306 
307 	/* task->group_leader is listed as a trusted, non-NULL field of task struct. */
308 	acquired = bpf_task_acquire(task->group_leader);
309 	if (acquired)
310 		bpf_task_release(acquired);
311 	else
312 		err = 1;
313 
314 
315 	return 0;
316 }
317