xref: /openbmc/linux/tools/testing/selftests/bpf/progs/refcounted_kptr.c (revision 3d40aed862874db14e1dd41fd6f12636dcfdcc3e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_core_read.h>
8 #include "bpf_misc.h"
9 #include "bpf_experimental.h"
10 
11 struct node_data {
12 	long key;
13 	long list_data;
14 	struct bpf_rb_node r;
15 	struct bpf_list_node l;
16 	struct bpf_refcount ref;
17 };
18 
19 struct map_value {
20 	struct node_data __kptr *node;
21 };
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_ARRAY);
25 	__type(key, int);
26 	__type(value, struct map_value);
27 	__uint(max_entries, 2);
28 } stashed_nodes SEC(".maps");
29 
30 struct node_acquire {
31 	long key;
32 	long data;
33 	struct bpf_rb_node node;
34 	struct bpf_refcount refcount;
35 };
36 
37 #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
38 private(A) struct bpf_spin_lock lock;
39 private(A) struct bpf_rb_root root __contains(node_data, r);
40 private(A) struct bpf_list_head head __contains(node_data, l);
41 
42 private(B) struct bpf_spin_lock alock;
43 private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
44 
45 private(C) struct bpf_spin_lock block;
46 private(C) struct bpf_rb_root broot __contains(node_data, r);
47 
48 static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
49 {
50 	struct node_data *a;
51 	struct node_data *b;
52 
53 	a = container_of(node_a, struct node_data, r);
54 	b = container_of(node_b, struct node_data, r);
55 
56 	return a->key < b->key;
57 }
58 
59 static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
60 {
61 	struct node_acquire *node_a;
62 	struct node_acquire *node_b;
63 
64 	node_a = container_of(a, struct node_acquire, node);
65 	node_b = container_of(b, struct node_acquire, node);
66 
67 	return node_a->key < node_b->key;
68 }
69 
70 static long __insert_in_tree_and_list(struct bpf_list_head *head,
71 				      struct bpf_rb_root *root,
72 				      struct bpf_spin_lock *lock)
73 {
74 	struct node_data *n, *m;
75 
76 	n = bpf_obj_new(typeof(*n));
77 	if (!n)
78 		return -1;
79 
80 	m = bpf_refcount_acquire(n);
81 	m->key = 123;
82 	m->list_data = 456;
83 
84 	bpf_spin_lock(lock);
85 	if (bpf_rbtree_add(root, &n->r, less)) {
86 		/* Failure to insert - unexpected */
87 		bpf_spin_unlock(lock);
88 		bpf_obj_drop(m);
89 		return -2;
90 	}
91 	bpf_spin_unlock(lock);
92 
93 	bpf_spin_lock(lock);
94 	if (bpf_list_push_front(head, &m->l)) {
95 		/* Failure to insert - unexpected */
96 		bpf_spin_unlock(lock);
97 		return -3;
98 	}
99 	bpf_spin_unlock(lock);
100 	return 0;
101 }
102 
103 static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
104 				    struct bpf_spin_lock *lock)
105 {
106 	struct map_value *mapval;
107 	struct node_data *n, *m;
108 
109 	mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
110 	if (!mapval)
111 		return -1;
112 
113 	n = bpf_obj_new(typeof(*n));
114 	if (!n)
115 		return -2;
116 
117 	n->key = val;
118 	m = bpf_refcount_acquire(n);
119 
120 	n = bpf_kptr_xchg(&mapval->node, n);
121 	if (n) {
122 		bpf_obj_drop(n);
123 		bpf_obj_drop(m);
124 		return -3;
125 	}
126 
127 	bpf_spin_lock(lock);
128 	if (bpf_rbtree_add(root, &m->r, less)) {
129 		/* Failure to insert - unexpected */
130 		bpf_spin_unlock(lock);
131 		return -4;
132 	}
133 	bpf_spin_unlock(lock);
134 	return 0;
135 }
136 
137 static long __read_from_tree(struct bpf_rb_root *root,
138 			     struct bpf_spin_lock *lock,
139 			     bool remove_from_tree)
140 {
141 	struct bpf_rb_node *rb;
142 	struct node_data *n;
143 	long res = -99;
144 
145 	bpf_spin_lock(lock);
146 
147 	rb = bpf_rbtree_first(root);
148 	if (!rb) {
149 		bpf_spin_unlock(lock);
150 		return -1;
151 	}
152 
153 	n = container_of(rb, struct node_data, r);
154 	res = n->key;
155 
156 	if (!remove_from_tree) {
157 		bpf_spin_unlock(lock);
158 		return res;
159 	}
160 
161 	rb = bpf_rbtree_remove(root, rb);
162 	bpf_spin_unlock(lock);
163 	if (!rb)
164 		return -2;
165 	n = container_of(rb, struct node_data, r);
166 	bpf_obj_drop(n);
167 	return res;
168 }
169 
170 static long __read_from_list(struct bpf_list_head *head,
171 			     struct bpf_spin_lock *lock,
172 			     bool remove_from_list)
173 {
174 	struct bpf_list_node *l;
175 	struct node_data *n;
176 	long res = -99;
177 
178 	bpf_spin_lock(lock);
179 
180 	l = bpf_list_pop_front(head);
181 	if (!l) {
182 		bpf_spin_unlock(lock);
183 		return -1;
184 	}
185 
186 	n = container_of(l, struct node_data, l);
187 	res = n->list_data;
188 
189 	if (!remove_from_list) {
190 		if (bpf_list_push_back(head, &n->l)) {
191 			bpf_spin_unlock(lock);
192 			return -2;
193 		}
194 	}
195 
196 	bpf_spin_unlock(lock);
197 
198 	if (remove_from_list)
199 		bpf_obj_drop(n);
200 	return res;
201 }
202 
203 static long __read_from_unstash(int idx)
204 {
205 	struct node_data *n = NULL;
206 	struct map_value *mapval;
207 	long val = -99;
208 
209 	mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
210 	if (!mapval)
211 		return -1;
212 
213 	n = bpf_kptr_xchg(&mapval->node, n);
214 	if (!n)
215 		return -2;
216 
217 	val = n->key;
218 	bpf_obj_drop(n);
219 	return val;
220 }
221 
222 #define INSERT_READ_BOTH(rem_tree, rem_list, desc)			\
223 SEC("tc")								\
224 __description(desc)							\
225 __success __retval(579)							\
226 long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx)	\
227 {									\
228 	long err, tree_data, list_data;					\
229 									\
230 	err = __insert_in_tree_and_list(&head, &root, &lock);		\
231 	if (err)							\
232 		return err;						\
233 									\
234 	err = __read_from_tree(&root, &lock, rem_tree);			\
235 	if (err < 0)							\
236 		return err;						\
237 	else								\
238 		tree_data = err;					\
239 									\
240 	err = __read_from_list(&head, &lock, rem_list);			\
241 	if (err < 0)							\
242 		return err;						\
243 	else								\
244 		list_data = err;					\
245 									\
246 	return tree_data + list_data;					\
247 }
248 
249 /* After successful insert of struct node_data into both collections:
250  *   - it should have refcount = 2
251  *   - removing / not removing the node_data from a collection after
252  *     reading should have no effect on ability to read / remove from
253  *     the other collection
254  */
255 INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list");
256 INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither");
257 INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree");
258 INSERT_READ_BOTH(false, true, "insert_read_both: remove from list");
259 
260 #undef INSERT_READ_BOTH
261 #define INSERT_READ_BOTH(rem_tree, rem_list, desc)			\
262 SEC("tc")								\
263 __description(desc)							\
264 __success __retval(579)							\
265 long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx)	\
266 {									\
267 	long err, tree_data, list_data;					\
268 									\
269 	err = __insert_in_tree_and_list(&head, &root, &lock);		\
270 	if (err)							\
271 		return err;						\
272 									\
273 	err = __read_from_list(&head, &lock, rem_list);			\
274 	if (err < 0)							\
275 		return err;						\
276 	else								\
277 		list_data = err;					\
278 									\
279 	err = __read_from_tree(&root, &lock, rem_tree);			\
280 	if (err < 0)							\
281 		return err;						\
282 	else								\
283 		tree_data = err;					\
284 									\
285 	return tree_data + list_data;					\
286 }
287 
288 /* Similar to insert_read_both, but list data is read and possibly removed
289  * first
290  *
291  * Results should be no different than reading and possibly removing rbtree
292  * node first
293  */
294 INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list");
295 INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither");
296 INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree");
297 INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list");
298 
299 #define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc)		\
300 SEC("tc")								\
301 __description(desc)							\
302 __success __retval(-1)							\
303 long insert_double_##read_fn##_and_del_##read_root(void *ctx)		\
304 {									\
305 	long err, list_data;						\
306 									\
307 	err = __insert_in_tree_and_list(&head, &root, &lock);		\
308 	if (err)							\
309 		return err;						\
310 									\
311 	err = read_fn(&read_root, &lock, true);				\
312 	if (err < 0)							\
313 		return err;						\
314 	else								\
315 		list_data = err;					\
316 									\
317 	err = read_fn(&read_root, &lock, true);				\
318 	if (err < 0)							\
319 		return err;						\
320 									\
321 	return err + list_data;						\
322 }
323 
324 /* Insert into both tree and list, then try reading-and-removing from either twice
325  *
326  * The second read-and-remove should fail on read step since the node has
327  * already been removed
328  */
329 INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree");
330 INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list");
331 
332 #define INSERT_STASH_READ(rem_tree, desc)				\
333 SEC("tc")								\
334 __description(desc)							\
335 __success __retval(84)							\
336 long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx)		\
337 {									\
338 	long err, tree_data, map_data;					\
339 									\
340 	err = __stash_map_insert_tree(0, 42, &root, &lock);		\
341 	if (err)							\
342 		return err;						\
343 									\
344 	err = __read_from_tree(&root, &lock, rem_tree);			\
345 	if (err < 0)							\
346 		return err;						\
347 	else								\
348 		tree_data = err;					\
349 									\
350 	err = __read_from_unstash(0);					\
351 	if (err < 0)							\
352 		return err;						\
353 	else								\
354 		map_data = err;						\
355 									\
356 	return tree_data + map_data;					\
357 }
358 
359 /* Stash a refcounted node in map_val, insert same node into tree, then try
360  * reading data from tree then unstashed map_val, possibly removing from tree
361  *
362  * Removing from tree should have no effect on map_val kptr validity
363  */
364 INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
365 INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
366 
367 SEC("tc")
368 __success
369 long rbtree_refcounted_node_ref_escapes(void *ctx)
370 {
371 	struct node_acquire *n, *m;
372 
373 	n = bpf_obj_new(typeof(*n));
374 	if (!n)
375 		return 1;
376 
377 	bpf_spin_lock(&alock);
378 	bpf_rbtree_add(&aroot, &n->node, less_a);
379 	m = bpf_refcount_acquire(n);
380 	bpf_spin_unlock(&alock);
381 	if (!m)
382 		return 2;
383 
384 	m->key = 2;
385 	bpf_obj_drop(m);
386 	return 0;
387 }
388 
389 SEC("tc")
390 __success
391 long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
392 {
393 	struct node_acquire *n, *m;
394 
395 	n = bpf_obj_new(typeof(*n));
396 	if (!n)
397 		return 1;
398 
399 	m = bpf_refcount_acquire(n);
400 	m->key = 2;
401 
402 	bpf_spin_lock(&alock);
403 	bpf_rbtree_add(&aroot, &n->node, less_a);
404 	bpf_spin_unlock(&alock);
405 
406 	bpf_obj_drop(m);
407 
408 	return 0;
409 }
410 
411 static long __stash_map_empty_xchg(struct node_data *n, int idx)
412 {
413 	struct map_value *mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
414 
415 	if (!mapval) {
416 		bpf_obj_drop(n);
417 		return 1;
418 	}
419 	n = bpf_kptr_xchg(&mapval->node, n);
420 	if (n) {
421 		bpf_obj_drop(n);
422 		return 2;
423 	}
424 	return 0;
425 }
426 
427 SEC("tc")
428 long rbtree_wrong_owner_remove_fail_a1(void *ctx)
429 {
430 	struct node_data *n, *m;
431 
432 	n = bpf_obj_new(typeof(*n));
433 	if (!n)
434 		return 1;
435 	m = bpf_refcount_acquire(n);
436 
437 	if (__stash_map_empty_xchg(n, 0)) {
438 		bpf_obj_drop(m);
439 		return 2;
440 	}
441 
442 	if (__stash_map_empty_xchg(m, 1))
443 		return 3;
444 
445 	return 0;
446 }
447 
448 SEC("tc")
449 long rbtree_wrong_owner_remove_fail_b(void *ctx)
450 {
451 	struct map_value *mapval;
452 	struct node_data *n;
453 	int idx = 0;
454 
455 	mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
456 	if (!mapval)
457 		return 1;
458 
459 	n = bpf_kptr_xchg(&mapval->node, NULL);
460 	if (!n)
461 		return 2;
462 
463 	bpf_spin_lock(&block);
464 
465 	bpf_rbtree_add(&broot, &n->r, less);
466 
467 	bpf_spin_unlock(&block);
468 	return 0;
469 }
470 
471 SEC("tc")
472 long rbtree_wrong_owner_remove_fail_a2(void *ctx)
473 {
474 	struct map_value *mapval;
475 	struct bpf_rb_node *res;
476 	struct node_data *m;
477 	int idx = 1;
478 
479 	mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
480 	if (!mapval)
481 		return 1;
482 
483 	m = bpf_kptr_xchg(&mapval->node, NULL);
484 	if (!m)
485 		return 2;
486 	bpf_spin_lock(&lock);
487 
488 	/* make m non-owning ref */
489 	bpf_list_push_back(&head, &m->l);
490 	res = bpf_rbtree_remove(&root, &m->r);
491 
492 	bpf_spin_unlock(&lock);
493 	if (res) {
494 		bpf_obj_drop(container_of(res, struct node_data, r));
495 		return 3;
496 	}
497 	return 0;
498 }
499 
500 char _license[] SEC("license") = "GPL";
501