1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_core_read.h>
8 #include "bpf_experimental.h"
9 
10 struct node_data {
11 	long key;
12 	long data;
13 	struct bpf_rb_node node;
14 };
15 
16 long less_callback_ran = -1;
17 long removed_key = -1;
18 long first_data[2] = {-1, -1};
19 
20 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
21 private(A) struct bpf_spin_lock glock;
22 private(A) struct bpf_rb_root groot __contains(node_data, node);
23 
24 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
25 {
26 	struct node_data *node_a;
27 	struct node_data *node_b;
28 
29 	node_a = container_of(a, struct node_data, node);
30 	node_b = container_of(b, struct node_data, node);
31 	less_callback_ran = 1;
32 
33 	return node_a->key < node_b->key;
34 }
35 
36 static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
37 {
38 	struct node_data *n, *m;
39 
40 	n = bpf_obj_new(typeof(*n));
41 	if (!n)
42 		return 1;
43 	n->key = 5;
44 
45 	m = bpf_obj_new(typeof(*m));
46 	if (!m) {
47 		bpf_obj_drop(n);
48 		return 2;
49 	}
50 	m->key = 1;
51 
52 	bpf_spin_lock(&glock);
53 	bpf_rbtree_add(&groot, &n->node, less);
54 	bpf_rbtree_add(&groot, &m->node, less);
55 	bpf_spin_unlock(&glock);
56 
57 	n = bpf_obj_new(typeof(*n));
58 	if (!n)
59 		return 3;
60 	n->key = 3;
61 
62 	bpf_spin_lock(&glock);
63 	bpf_rbtree_add(&groot, &n->node, less);
64 	bpf_spin_unlock(&glock);
65 	return 0;
66 }
67 
68 SEC("tc")
69 long rbtree_add_nodes(void *ctx)
70 {
71 	return __add_three(&groot, &glock);
72 }
73 
74 SEC("tc")
75 long rbtree_add_and_remove(void *ctx)
76 {
77 	struct bpf_rb_node *res = NULL;
78 	struct node_data *n, *m = NULL;
79 
80 	n = bpf_obj_new(typeof(*n));
81 	if (!n)
82 		goto err_out;
83 	n->key = 5;
84 
85 	m = bpf_obj_new(typeof(*m));
86 	if (!m)
87 		goto err_out;
88 	m->key = 3;
89 
90 	bpf_spin_lock(&glock);
91 	bpf_rbtree_add(&groot, &n->node, less);
92 	bpf_rbtree_add(&groot, &m->node, less);
93 	res = bpf_rbtree_remove(&groot, &n->node);
94 	bpf_spin_unlock(&glock);
95 
96 	if (!res)
97 		return 1;
98 
99 	n = container_of(res, struct node_data, node);
100 	removed_key = n->key;
101 	bpf_obj_drop(n);
102 
103 	return 0;
104 err_out:
105 	if (n)
106 		bpf_obj_drop(n);
107 	if (m)
108 		bpf_obj_drop(m);
109 	return 1;
110 }
111 
112 SEC("tc")
113 long rbtree_first_and_remove(void *ctx)
114 {
115 	struct bpf_rb_node *res = NULL;
116 	struct node_data *n, *m, *o;
117 
118 	n = bpf_obj_new(typeof(*n));
119 	if (!n)
120 		return 1;
121 	n->key = 3;
122 	n->data = 4;
123 
124 	m = bpf_obj_new(typeof(*m));
125 	if (!m)
126 		goto err_out;
127 	m->key = 5;
128 	m->data = 6;
129 
130 	o = bpf_obj_new(typeof(*o));
131 	if (!o)
132 		goto err_out;
133 	o->key = 1;
134 	o->data = 2;
135 
136 	bpf_spin_lock(&glock);
137 	bpf_rbtree_add(&groot, &n->node, less);
138 	bpf_rbtree_add(&groot, &m->node, less);
139 	bpf_rbtree_add(&groot, &o->node, less);
140 
141 	res = bpf_rbtree_first(&groot);
142 	if (!res) {
143 		bpf_spin_unlock(&glock);
144 		return 2;
145 	}
146 
147 	o = container_of(res, struct node_data, node);
148 	first_data[0] = o->data;
149 
150 	res = bpf_rbtree_remove(&groot, &o->node);
151 	bpf_spin_unlock(&glock);
152 
153 	if (!res)
154 		return 5;
155 
156 	o = container_of(res, struct node_data, node);
157 	removed_key = o->key;
158 	bpf_obj_drop(o);
159 
160 	bpf_spin_lock(&glock);
161 	res = bpf_rbtree_first(&groot);
162 	if (!res) {
163 		bpf_spin_unlock(&glock);
164 		return 3;
165 	}
166 
167 	o = container_of(res, struct node_data, node);
168 	first_data[1] = o->data;
169 	bpf_spin_unlock(&glock);
170 
171 	return 0;
172 err_out:
173 	if (n)
174 		bpf_obj_drop(n);
175 	if (m)
176 		bpf_obj_drop(m);
177 	return 1;
178 }
179 
180 SEC("tc")
181 long rbtree_api_release_aliasing(void *ctx)
182 {
183 	struct node_data *n, *m, *o;
184 	struct bpf_rb_node *res, *res2;
185 
186 	n = bpf_obj_new(typeof(*n));
187 	if (!n)
188 		return 1;
189 	n->key = 41;
190 	n->data = 42;
191 
192 	bpf_spin_lock(&glock);
193 	bpf_rbtree_add(&groot, &n->node, less);
194 	bpf_spin_unlock(&glock);
195 
196 	bpf_spin_lock(&glock);
197 
198 	/* m and o point to the same node,
199 	 * but verifier doesn't know this
200 	 */
201 	res = bpf_rbtree_first(&groot);
202 	if (!res)
203 		goto err_out;
204 	o = container_of(res, struct node_data, node);
205 
206 	res = bpf_rbtree_first(&groot);
207 	if (!res)
208 		goto err_out;
209 	m = container_of(res, struct node_data, node);
210 
211 	res = bpf_rbtree_remove(&groot, &m->node);
212 	/* Retval of previous remove returns an owning reference to m,
213 	 * which is the same node non-owning ref o is pointing at.
214 	 * We can safely try to remove o as the second rbtree_remove will
215 	 * return NULL since the node isn't in a tree.
216 	 *
217 	 * Previously we relied on the verifier type system + rbtree_remove
218 	 * invalidating non-owning refs to ensure that rbtree_remove couldn't
219 	 * fail, but now rbtree_remove does runtime checking so we no longer
220 	 * invalidate non-owning refs after remove.
221 	 */
222 	res2 = bpf_rbtree_remove(&groot, &o->node);
223 
224 	bpf_spin_unlock(&glock);
225 
226 	if (res) {
227 		o = container_of(res, struct node_data, node);
228 		first_data[0] = o->data;
229 		bpf_obj_drop(o);
230 	}
231 	if (res2) {
232 		/* The second remove fails, so res2 is null and this doesn't
233 		 * execute
234 		 */
235 		m = container_of(res2, struct node_data, node);
236 		first_data[1] = m->data;
237 		bpf_obj_drop(m);
238 	}
239 	return 0;
240 
241 err_out:
242 	bpf_spin_unlock(&glock);
243 	return 1;
244 }
245 
246 char _license[] SEC("license") = "GPL";
247