xref: /openbmc/linux/kernel/bpf/mprog.c (revision f14c1a14)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_mprog.h>
6 
7 static int bpf_mprog_link(struct bpf_tuple *tuple,
8 			  u32 id_or_fd, u32 flags,
9 			  enum bpf_prog_type type)
10 {
11 	struct bpf_link *link = ERR_PTR(-EINVAL);
12 	bool id = flags & BPF_F_ID;
13 
14 	if (id)
15 		link = bpf_link_by_id(id_or_fd);
16 	else if (id_or_fd)
17 		link = bpf_link_get_from_fd(id_or_fd);
18 	if (IS_ERR(link))
19 		return PTR_ERR(link);
20 	if (type && link->prog->type != type) {
21 		bpf_link_put(link);
22 		return -EINVAL;
23 	}
24 
25 	tuple->link = link;
26 	tuple->prog = link->prog;
27 	return 0;
28 }
29 
30 static int bpf_mprog_prog(struct bpf_tuple *tuple,
31 			  u32 id_or_fd, u32 flags,
32 			  enum bpf_prog_type type)
33 {
34 	struct bpf_prog *prog = ERR_PTR(-EINVAL);
35 	bool id = flags & BPF_F_ID;
36 
37 	if (id)
38 		prog = bpf_prog_by_id(id_or_fd);
39 	else if (id_or_fd)
40 		prog = bpf_prog_get(id_or_fd);
41 	if (IS_ERR(prog))
42 		return PTR_ERR(prog);
43 	if (type && prog->type != type) {
44 		bpf_prog_put(prog);
45 		return -EINVAL;
46 	}
47 
48 	tuple->link = NULL;
49 	tuple->prog = prog;
50 	return 0;
51 }
52 
53 static int bpf_mprog_tuple_relative(struct bpf_tuple *tuple,
54 				    u32 id_or_fd, u32 flags,
55 				    enum bpf_prog_type type)
56 {
57 	bool link = flags & BPF_F_LINK;
58 	bool id = flags & BPF_F_ID;
59 
60 	memset(tuple, 0, sizeof(*tuple));
61 	if (link)
62 		return bpf_mprog_link(tuple, id_or_fd, flags, type);
63 	/* If no relevant flag is set and no id_or_fd was passed, then
64 	 * tuple link/prog is just NULLed. This is the case when before/
65 	 * after selects first/last position without passing fd.
66 	 */
67 	if (!id && !id_or_fd)
68 		return 0;
69 	return bpf_mprog_prog(tuple, id_or_fd, flags, type);
70 }
71 
72 static void bpf_mprog_tuple_put(struct bpf_tuple *tuple)
73 {
74 	if (tuple->link)
75 		bpf_link_put(tuple->link);
76 	else if (tuple->prog)
77 		bpf_prog_put(tuple->prog);
78 }
79 
80 /* The bpf_mprog_{replace,delete}() operate on exact idx position with the
81  * one exception that for deletion we support delete from front/back. In
82  * case of front idx is -1, in case of back idx is bpf_mprog_total(entry).
83  * Adjustment to first and last entry is trivial. The bpf_mprog_insert()
84  * we have to deal with the following cases:
85  *
86  * idx + before:
87  *
88  * Insert P4 before P3: idx for old array is 1, idx for new array is 2,
89  * hence we adjust target idx for the new array, so that memmove copies
90  * P1 and P2 to the new entry, and we insert P4 into idx 2. Inserting
91  * before P1 would have old idx -1 and new idx 0.
92  *
93  * +--+--+--+     +--+--+--+--+     +--+--+--+--+
94  * |P1|P2|P3| ==> |P1|P2|  |P3| ==> |P1|P2|P4|P3|
95  * +--+--+--+     +--+--+--+--+     +--+--+--+--+
96  *
97  * idx + after:
98  *
99  * Insert P4 after P2: idx for old array is 2, idx for new array is 2.
100  * Again, memmove copies P1 and P2 to the new entry, and we insert P4
101  * into idx 2. Inserting after P3 would have both old/new idx at 4 aka
102  * bpf_mprog_total(entry).
103  *
104  * +--+--+--+     +--+--+--+--+     +--+--+--+--+
105  * |P1|P2|P3| ==> |P1|P2|  |P3| ==> |P1|P2|P4|P3|
106  * +--+--+--+     +--+--+--+--+     +--+--+--+--+
107  */
108 static int bpf_mprog_replace(struct bpf_mprog_entry *entry,
109 			     struct bpf_mprog_entry **entry_new,
110 			     struct bpf_tuple *ntuple, int idx)
111 {
112 	struct bpf_mprog_fp *fp;
113 	struct bpf_mprog_cp *cp;
114 	struct bpf_prog *oprog;
115 
116 	bpf_mprog_read(entry, idx, &fp, &cp);
117 	oprog = READ_ONCE(fp->prog);
118 	bpf_mprog_write(fp, cp, ntuple);
119 	if (!ntuple->link) {
120 		WARN_ON_ONCE(cp->link);
121 		bpf_prog_put(oprog);
122 	}
123 	*entry_new = entry;
124 	return 0;
125 }
126 
127 static int bpf_mprog_insert(struct bpf_mprog_entry *entry,
128 			    struct bpf_mprog_entry **entry_new,
129 			    struct bpf_tuple *ntuple, int idx, u32 flags)
130 {
131 	int total = bpf_mprog_total(entry);
132 	struct bpf_mprog_entry *peer;
133 	struct bpf_mprog_fp *fp;
134 	struct bpf_mprog_cp *cp;
135 
136 	peer = bpf_mprog_peer(entry);
137 	bpf_mprog_entry_copy(peer, entry);
138 	if (idx == total)
139 		goto insert;
140 	else if (flags & BPF_F_BEFORE)
141 		idx += 1;
142 	bpf_mprog_entry_grow(peer, idx);
143 insert:
144 	bpf_mprog_read(peer, idx, &fp, &cp);
145 	bpf_mprog_write(fp, cp, ntuple);
146 	bpf_mprog_inc(peer);
147 	*entry_new = peer;
148 	return 0;
149 }
150 
151 static int bpf_mprog_delete(struct bpf_mprog_entry *entry,
152 			    struct bpf_mprog_entry **entry_new,
153 			    struct bpf_tuple *dtuple, int idx)
154 {
155 	int total = bpf_mprog_total(entry);
156 	struct bpf_mprog_entry *peer;
157 
158 	peer = bpf_mprog_peer(entry);
159 	bpf_mprog_entry_copy(peer, entry);
160 	if (idx == -1)
161 		idx = 0;
162 	else if (idx == total)
163 		idx = total - 1;
164 	bpf_mprog_entry_shrink(peer, idx);
165 	bpf_mprog_dec(peer);
166 	bpf_mprog_mark_for_release(peer, dtuple);
167 	*entry_new = peer;
168 	return 0;
169 }
170 
171 /* In bpf_mprog_pos_*() we evaluate the target position for the BPF
172  * program/link that needs to be replaced, inserted or deleted for
173  * each "rule" independently. If all rules agree on that position
174  * or existing element, then enact replacement, addition or deletion.
175  * If this is not the case, then the request cannot be satisfied and
176  * we bail out with an error.
177  */
178 static int bpf_mprog_pos_exact(struct bpf_mprog_entry *entry,
179 			       struct bpf_tuple *tuple)
180 {
181 	struct bpf_mprog_fp *fp;
182 	struct bpf_mprog_cp *cp;
183 	int i;
184 
185 	for (i = 0; i < bpf_mprog_total(entry); i++) {
186 		bpf_mprog_read(entry, i, &fp, &cp);
187 		if (tuple->prog == READ_ONCE(fp->prog))
188 			return tuple->link == cp->link ? i : -EBUSY;
189 	}
190 	return -ENOENT;
191 }
192 
193 static int bpf_mprog_pos_before(struct bpf_mprog_entry *entry,
194 				struct bpf_tuple *tuple)
195 {
196 	struct bpf_mprog_fp *fp;
197 	struct bpf_mprog_cp *cp;
198 	int i;
199 
200 	for (i = 0; i < bpf_mprog_total(entry); i++) {
201 		bpf_mprog_read(entry, i, &fp, &cp);
202 		if (tuple->prog == READ_ONCE(fp->prog) &&
203 		    (!tuple->link || tuple->link == cp->link))
204 			return i - 1;
205 	}
206 	return tuple->prog ? -ENOENT : -1;
207 }
208 
209 static int bpf_mprog_pos_after(struct bpf_mprog_entry *entry,
210 			       struct bpf_tuple *tuple)
211 {
212 	struct bpf_mprog_fp *fp;
213 	struct bpf_mprog_cp *cp;
214 	int i;
215 
216 	for (i = 0; i < bpf_mprog_total(entry); i++) {
217 		bpf_mprog_read(entry, i, &fp, &cp);
218 		if (tuple->prog == READ_ONCE(fp->prog) &&
219 		    (!tuple->link || tuple->link == cp->link))
220 			return i + 1;
221 	}
222 	return tuple->prog ? -ENOENT : bpf_mprog_total(entry);
223 }
224 
225 int bpf_mprog_attach(struct bpf_mprog_entry *entry,
226 		     struct bpf_mprog_entry **entry_new,
227 		     struct bpf_prog *prog_new, struct bpf_link *link,
228 		     struct bpf_prog *prog_old,
229 		     u32 flags, u32 id_or_fd, u64 revision)
230 {
231 	struct bpf_tuple rtuple, ntuple = {
232 		.prog = prog_new,
233 		.link = link,
234 	}, otuple = {
235 		.prog = prog_old,
236 		.link = link,
237 	};
238 	int ret, idx = -ERANGE, tidx;
239 
240 	if (revision && revision != bpf_mprog_revision(entry))
241 		return -ESTALE;
242 	if (bpf_mprog_exists(entry, prog_new))
243 		return -EEXIST;
244 	ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd,
245 				       flags & ~BPF_F_REPLACE,
246 				       prog_new->type);
247 	if (ret)
248 		return ret;
249 	if (flags & BPF_F_REPLACE) {
250 		tidx = bpf_mprog_pos_exact(entry, &otuple);
251 		if (tidx < 0) {
252 			ret = tidx;
253 			goto out;
254 		}
255 		idx = tidx;
256 	}
257 	if (flags & BPF_F_BEFORE) {
258 		tidx = bpf_mprog_pos_before(entry, &rtuple);
259 		if (tidx < -1 || (idx >= -1 && tidx != idx)) {
260 			ret = tidx < -1 ? tidx : -ERANGE;
261 			goto out;
262 		}
263 		idx = tidx;
264 	}
265 	if (flags & BPF_F_AFTER) {
266 		tidx = bpf_mprog_pos_after(entry, &rtuple);
267 		if (tidx < -1 || (idx >= -1 && tidx != idx)) {
268 			ret = tidx < 0 ? tidx : -ERANGE;
269 			goto out;
270 		}
271 		idx = tidx;
272 	}
273 	if (idx < -1) {
274 		if (rtuple.prog || flags) {
275 			ret = -EINVAL;
276 			goto out;
277 		}
278 		idx = bpf_mprog_total(entry);
279 		flags = BPF_F_AFTER;
280 	}
281 	if (idx >= bpf_mprog_max()) {
282 		ret = -ERANGE;
283 		goto out;
284 	}
285 	if (flags & BPF_F_REPLACE)
286 		ret = bpf_mprog_replace(entry, entry_new, &ntuple, idx);
287 	else
288 		ret = bpf_mprog_insert(entry, entry_new, &ntuple, idx, flags);
289 out:
290 	bpf_mprog_tuple_put(&rtuple);
291 	return ret;
292 }
293 
294 static int bpf_mprog_fetch(struct bpf_mprog_entry *entry,
295 			   struct bpf_tuple *tuple, int idx)
296 {
297 	int total = bpf_mprog_total(entry);
298 	struct bpf_mprog_cp *cp;
299 	struct bpf_mprog_fp *fp;
300 	struct bpf_prog *prog;
301 	struct bpf_link *link;
302 
303 	if (idx == -1)
304 		idx = 0;
305 	else if (idx == total)
306 		idx = total - 1;
307 	bpf_mprog_read(entry, idx, &fp, &cp);
308 	prog = READ_ONCE(fp->prog);
309 	link = cp->link;
310 	/* The deletion request can either be without filled tuple in which
311 	 * case it gets populated here based on idx, or with filled tuple
312 	 * where the only thing we end up doing is the WARN_ON_ONCE() assert.
313 	 * If we hit a BPF link at the given index, it must not be removed
314 	 * from opts path.
315 	 */
316 	if (link && !tuple->link)
317 		return -EBUSY;
318 	WARN_ON_ONCE(tuple->prog && tuple->prog != prog);
319 	WARN_ON_ONCE(tuple->link && tuple->link != link);
320 	tuple->prog = prog;
321 	tuple->link = link;
322 	return 0;
323 }
324 
325 int bpf_mprog_detach(struct bpf_mprog_entry *entry,
326 		     struct bpf_mprog_entry **entry_new,
327 		     struct bpf_prog *prog, struct bpf_link *link,
328 		     u32 flags, u32 id_or_fd, u64 revision)
329 {
330 	struct bpf_tuple rtuple, dtuple = {
331 		.prog = prog,
332 		.link = link,
333 	};
334 	int ret, idx = -ERANGE, tidx;
335 
336 	if (flags & BPF_F_REPLACE)
337 		return -EINVAL;
338 	if (revision && revision != bpf_mprog_revision(entry))
339 		return -ESTALE;
340 	ret = bpf_mprog_tuple_relative(&rtuple, id_or_fd, flags,
341 				       prog ? prog->type :
342 				       BPF_PROG_TYPE_UNSPEC);
343 	if (ret)
344 		return ret;
345 	if (dtuple.prog) {
346 		tidx = bpf_mprog_pos_exact(entry, &dtuple);
347 		if (tidx < 0) {
348 			ret = tidx;
349 			goto out;
350 		}
351 		idx = tidx;
352 	}
353 	if (flags & BPF_F_BEFORE) {
354 		tidx = bpf_mprog_pos_before(entry, &rtuple);
355 		if (tidx < -1 || (idx >= -1 && tidx != idx)) {
356 			ret = tidx < -1 ? tidx : -ERANGE;
357 			goto out;
358 		}
359 		idx = tidx;
360 	}
361 	if (flags & BPF_F_AFTER) {
362 		tidx = bpf_mprog_pos_after(entry, &rtuple);
363 		if (tidx < -1 || (idx >= -1 && tidx != idx)) {
364 			ret = tidx < 0 ? tidx : -ERANGE;
365 			goto out;
366 		}
367 		idx = tidx;
368 	}
369 	if (idx < -1) {
370 		if (rtuple.prog || flags) {
371 			ret = -EINVAL;
372 			goto out;
373 		}
374 		idx = bpf_mprog_total(entry);
375 		flags = BPF_F_AFTER;
376 	}
377 	if (idx >= bpf_mprog_max()) {
378 		ret = -ERANGE;
379 		goto out;
380 	}
381 	ret = bpf_mprog_fetch(entry, &dtuple, idx);
382 	if (ret)
383 		goto out;
384 	ret = bpf_mprog_delete(entry, entry_new, &dtuple, idx);
385 out:
386 	bpf_mprog_tuple_put(&rtuple);
387 	return ret;
388 }
389 
390 int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
391 		    struct bpf_mprog_entry *entry)
392 {
393 	u32 __user *uprog_flags, *ulink_flags;
394 	u32 __user *uprog_id, *ulink_id;
395 	struct bpf_mprog_fp *fp;
396 	struct bpf_mprog_cp *cp;
397 	struct bpf_prog *prog;
398 	const u32 flags = 0;
399 	int i, ret = 0;
400 	u32 id, count;
401 	u64 revision;
402 
403 	if (attr->query.query_flags || attr->query.attach_flags)
404 		return -EINVAL;
405 	revision = bpf_mprog_revision(entry);
406 	count = bpf_mprog_total(entry);
407 	if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
408 		return -EFAULT;
409 	if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
410 		return -EFAULT;
411 	if (copy_to_user(&uattr->query.count, &count, sizeof(count)))
412 		return -EFAULT;
413 	uprog_id = u64_to_user_ptr(attr->query.prog_ids);
414 	uprog_flags = u64_to_user_ptr(attr->query.prog_attach_flags);
415 	ulink_id = u64_to_user_ptr(attr->query.link_ids);
416 	ulink_flags = u64_to_user_ptr(attr->query.link_attach_flags);
417 	if (attr->query.count == 0 || !uprog_id || !count)
418 		return 0;
419 	if (attr->query.count < count) {
420 		count = attr->query.count;
421 		ret = -ENOSPC;
422 	}
423 	for (i = 0; i < bpf_mprog_max(); i++) {
424 		bpf_mprog_read(entry, i, &fp, &cp);
425 		prog = READ_ONCE(fp->prog);
426 		if (!prog)
427 			break;
428 		id = prog->aux->id;
429 		if (copy_to_user(uprog_id + i, &id, sizeof(id)))
430 			return -EFAULT;
431 		if (uprog_flags &&
432 		    copy_to_user(uprog_flags + i, &flags, sizeof(flags)))
433 			return -EFAULT;
434 		id = cp->link ? cp->link->id : 0;
435 		if (ulink_id &&
436 		    copy_to_user(ulink_id + i, &id, sizeof(id)))
437 			return -EFAULT;
438 		if (ulink_flags &&
439 		    copy_to_user(ulink_flags + i, &flags, sizeof(flags)))
440 			return -EFAULT;
441 		if (i + 1 == count)
442 			break;
443 	}
444 	return ret;
445 }
446