xref: /openbmc/linux/kernel/bpf/offload.c (revision 08193d1a)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28 
29 /* Protects offdevs, members of bpf_offload_netdev and offload members
30  * of all progs.
31  * RTNL lock cannot be taken when holding this lock.
32  */
33 static DECLARE_RWSEM(bpf_devs_lock);
34 
35 struct bpf_offload_dev {
36 	struct list_head netdevs;
37 };
38 
39 struct bpf_offload_netdev {
40 	struct rhash_head l;
41 	struct net_device *netdev;
42 	struct bpf_offload_dev *offdev;
43 	struct list_head progs;
44 	struct list_head maps;
45 	struct list_head offdev_netdevs;
46 };
47 
48 static const struct rhashtable_params offdevs_params = {
49 	.nelem_hint		= 4,
50 	.key_len		= sizeof(struct net_device *),
51 	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
52 	.head_offset		= offsetof(struct bpf_offload_netdev, l),
53 	.automatic_shrinking	= true,
54 };
55 
56 static struct rhashtable offdevs;
57 static bool offdevs_inited;
58 
59 static int bpf_dev_offload_check(struct net_device *netdev)
60 {
61 	if (!netdev)
62 		return -EINVAL;
63 	if (!netdev->netdev_ops->ndo_bpf)
64 		return -EOPNOTSUPP;
65 	return 0;
66 }
67 
68 static struct bpf_offload_netdev *
69 bpf_offload_find_netdev(struct net_device *netdev)
70 {
71 	lockdep_assert_held(&bpf_devs_lock);
72 
73 	if (!offdevs_inited)
74 		return NULL;
75 	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
76 }
77 
78 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
79 {
80 	struct bpf_offload_netdev *ondev;
81 	struct bpf_prog_offload *offload;
82 	int err;
83 
84 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
85 	    attr->prog_type != BPF_PROG_TYPE_XDP)
86 		return -EINVAL;
87 
88 	if (attr->prog_flags)
89 		return -EINVAL;
90 
91 	offload = kzalloc(sizeof(*offload), GFP_USER);
92 	if (!offload)
93 		return -ENOMEM;
94 
95 	offload->prog = prog;
96 
97 	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
98 					   attr->prog_ifindex);
99 	err = bpf_dev_offload_check(offload->netdev);
100 	if (err)
101 		goto err_maybe_put;
102 
103 	down_write(&bpf_devs_lock);
104 	ondev = bpf_offload_find_netdev(offload->netdev);
105 	if (!ondev) {
106 		err = -EINVAL;
107 		goto err_unlock;
108 	}
109 	prog->aux->offload = offload;
110 	list_add_tail(&offload->offloads, &ondev->progs);
111 	dev_put(offload->netdev);
112 	up_write(&bpf_devs_lock);
113 
114 	return 0;
115 err_unlock:
116 	up_write(&bpf_devs_lock);
117 err_maybe_put:
118 	if (offload->netdev)
119 		dev_put(offload->netdev);
120 	kfree(offload);
121 	return err;
122 }
123 
124 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
125 			     struct netdev_bpf *data)
126 {
127 	struct bpf_prog_offload *offload = prog->aux->offload;
128 	struct net_device *netdev;
129 
130 	ASSERT_RTNL();
131 
132 	if (!offload)
133 		return -ENODEV;
134 	netdev = offload->netdev;
135 
136 	data->command = cmd;
137 
138 	return netdev->netdev_ops->ndo_bpf(netdev, data);
139 }
140 
141 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
142 {
143 	struct netdev_bpf data = {};
144 	int err;
145 
146 	data.verifier.prog = env->prog;
147 
148 	rtnl_lock();
149 	err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
150 	if (err)
151 		goto exit_unlock;
152 
153 	env->prog->aux->offload->dev_ops = data.verifier.ops;
154 	env->prog->aux->offload->dev_state = true;
155 exit_unlock:
156 	rtnl_unlock();
157 	return err;
158 }
159 
160 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
161 				 int insn_idx, int prev_insn_idx)
162 {
163 	struct bpf_prog_offload *offload;
164 	int ret = -ENODEV;
165 
166 	down_read(&bpf_devs_lock);
167 	offload = env->prog->aux->offload;
168 	if (offload)
169 		ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
170 	up_read(&bpf_devs_lock);
171 
172 	return ret;
173 }
174 
175 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
176 {
177 	struct bpf_prog_offload *offload = prog->aux->offload;
178 	struct netdev_bpf data = {};
179 
180 	data.offload.prog = prog;
181 
182 	if (offload->dev_state)
183 		WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
184 
185 	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
186 	bpf_prog_free_id(prog, true);
187 
188 	list_del_init(&offload->offloads);
189 	kfree(offload);
190 	prog->aux->offload = NULL;
191 }
192 
193 void bpf_prog_offload_destroy(struct bpf_prog *prog)
194 {
195 	rtnl_lock();
196 	down_write(&bpf_devs_lock);
197 	if (prog->aux->offload)
198 		__bpf_prog_offload_destroy(prog);
199 	up_write(&bpf_devs_lock);
200 	rtnl_unlock();
201 }
202 
203 static int bpf_prog_offload_translate(struct bpf_prog *prog)
204 {
205 	struct netdev_bpf data = {};
206 	int ret;
207 
208 	data.offload.prog = prog;
209 
210 	rtnl_lock();
211 	ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
212 	rtnl_unlock();
213 
214 	return ret;
215 }
216 
217 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
218 					  const struct bpf_insn *insn)
219 {
220 	WARN(1, "attempt to execute device eBPF program on the host!");
221 	return 0;
222 }
223 
224 int bpf_prog_offload_compile(struct bpf_prog *prog)
225 {
226 	prog->bpf_func = bpf_prog_warn_on_exec;
227 
228 	return bpf_prog_offload_translate(prog);
229 }
230 
231 struct ns_get_path_bpf_prog_args {
232 	struct bpf_prog *prog;
233 	struct bpf_prog_info *info;
234 };
235 
236 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
237 {
238 	struct ns_get_path_bpf_prog_args *args = private_data;
239 	struct bpf_prog_aux *aux = args->prog->aux;
240 	struct ns_common *ns;
241 	struct net *net;
242 
243 	rtnl_lock();
244 	down_read(&bpf_devs_lock);
245 
246 	if (aux->offload) {
247 		args->info->ifindex = aux->offload->netdev->ifindex;
248 		net = dev_net(aux->offload->netdev);
249 		get_net(net);
250 		ns = &net->ns;
251 	} else {
252 		args->info->ifindex = 0;
253 		ns = NULL;
254 	}
255 
256 	up_read(&bpf_devs_lock);
257 	rtnl_unlock();
258 
259 	return ns;
260 }
261 
262 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
263 			       struct bpf_prog *prog)
264 {
265 	struct ns_get_path_bpf_prog_args args = {
266 		.prog	= prog,
267 		.info	= info,
268 	};
269 	struct bpf_prog_aux *aux = prog->aux;
270 	struct inode *ns_inode;
271 	struct path ns_path;
272 	char __user *uinsns;
273 	void *res;
274 	u32 ulen;
275 
276 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
277 	if (IS_ERR(res)) {
278 		if (!info->ifindex)
279 			return -ENODEV;
280 		return PTR_ERR(res);
281 	}
282 
283 	down_read(&bpf_devs_lock);
284 
285 	if (!aux->offload) {
286 		up_read(&bpf_devs_lock);
287 		return -ENODEV;
288 	}
289 
290 	ulen = info->jited_prog_len;
291 	info->jited_prog_len = aux->offload->jited_len;
292 	if (info->jited_prog_len & ulen) {
293 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
294 		ulen = min_t(u32, info->jited_prog_len, ulen);
295 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
296 			up_read(&bpf_devs_lock);
297 			return -EFAULT;
298 		}
299 	}
300 
301 	up_read(&bpf_devs_lock);
302 
303 	ns_inode = ns_path.dentry->d_inode;
304 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
305 	info->netns_ino = ns_inode->i_ino;
306 	path_put(&ns_path);
307 
308 	return 0;
309 }
310 
311 const struct bpf_prog_ops bpf_offload_prog_ops = {
312 };
313 
314 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
315 			       enum bpf_netdev_command cmd)
316 {
317 	struct netdev_bpf data = {};
318 	struct net_device *netdev;
319 
320 	ASSERT_RTNL();
321 
322 	data.command = cmd;
323 	data.offmap = offmap;
324 	/* Caller must make sure netdev is valid */
325 	netdev = offmap->netdev;
326 
327 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
328 }
329 
330 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
331 {
332 	struct net *net = current->nsproxy->net_ns;
333 	struct bpf_offload_netdev *ondev;
334 	struct bpf_offloaded_map *offmap;
335 	int err;
336 
337 	if (!capable(CAP_SYS_ADMIN))
338 		return ERR_PTR(-EPERM);
339 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
340 	    attr->map_type != BPF_MAP_TYPE_HASH)
341 		return ERR_PTR(-EINVAL);
342 
343 	offmap = kzalloc(sizeof(*offmap), GFP_USER);
344 	if (!offmap)
345 		return ERR_PTR(-ENOMEM);
346 
347 	bpf_map_init_from_attr(&offmap->map, attr);
348 
349 	rtnl_lock();
350 	down_write(&bpf_devs_lock);
351 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
352 	err = bpf_dev_offload_check(offmap->netdev);
353 	if (err)
354 		goto err_unlock;
355 
356 	ondev = bpf_offload_find_netdev(offmap->netdev);
357 	if (!ondev) {
358 		err = -EINVAL;
359 		goto err_unlock;
360 	}
361 
362 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
363 	if (err)
364 		goto err_unlock;
365 
366 	list_add_tail(&offmap->offloads, &ondev->maps);
367 	up_write(&bpf_devs_lock);
368 	rtnl_unlock();
369 
370 	return &offmap->map;
371 
372 err_unlock:
373 	up_write(&bpf_devs_lock);
374 	rtnl_unlock();
375 	kfree(offmap);
376 	return ERR_PTR(err);
377 }
378 
379 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
380 {
381 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
382 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
383 	bpf_map_free_id(&offmap->map, true);
384 	list_del_init(&offmap->offloads);
385 	offmap->netdev = NULL;
386 }
387 
388 void bpf_map_offload_map_free(struct bpf_map *map)
389 {
390 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
391 
392 	rtnl_lock();
393 	down_write(&bpf_devs_lock);
394 	if (offmap->netdev)
395 		__bpf_map_offload_destroy(offmap);
396 	up_write(&bpf_devs_lock);
397 	rtnl_unlock();
398 
399 	kfree(offmap);
400 }
401 
402 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
403 {
404 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
405 	int ret = -ENODEV;
406 
407 	down_read(&bpf_devs_lock);
408 	if (offmap->netdev)
409 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
410 	up_read(&bpf_devs_lock);
411 
412 	return ret;
413 }
414 
415 int bpf_map_offload_update_elem(struct bpf_map *map,
416 				void *key, void *value, u64 flags)
417 {
418 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
419 	int ret = -ENODEV;
420 
421 	if (unlikely(flags > BPF_EXIST))
422 		return -EINVAL;
423 
424 	down_read(&bpf_devs_lock);
425 	if (offmap->netdev)
426 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
427 						       flags);
428 	up_read(&bpf_devs_lock);
429 
430 	return ret;
431 }
432 
433 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
434 {
435 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
436 	int ret = -ENODEV;
437 
438 	down_read(&bpf_devs_lock);
439 	if (offmap->netdev)
440 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
441 	up_read(&bpf_devs_lock);
442 
443 	return ret;
444 }
445 
446 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
447 {
448 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
449 	int ret = -ENODEV;
450 
451 	down_read(&bpf_devs_lock);
452 	if (offmap->netdev)
453 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
454 	up_read(&bpf_devs_lock);
455 
456 	return ret;
457 }
458 
459 struct ns_get_path_bpf_map_args {
460 	struct bpf_offloaded_map *offmap;
461 	struct bpf_map_info *info;
462 };
463 
464 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
465 {
466 	struct ns_get_path_bpf_map_args *args = private_data;
467 	struct ns_common *ns;
468 	struct net *net;
469 
470 	rtnl_lock();
471 	down_read(&bpf_devs_lock);
472 
473 	if (args->offmap->netdev) {
474 		args->info->ifindex = args->offmap->netdev->ifindex;
475 		net = dev_net(args->offmap->netdev);
476 		get_net(net);
477 		ns = &net->ns;
478 	} else {
479 		args->info->ifindex = 0;
480 		ns = NULL;
481 	}
482 
483 	up_read(&bpf_devs_lock);
484 	rtnl_unlock();
485 
486 	return ns;
487 }
488 
489 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
490 {
491 	struct ns_get_path_bpf_map_args args = {
492 		.offmap	= map_to_offmap(map),
493 		.info	= info,
494 	};
495 	struct inode *ns_inode;
496 	struct path ns_path;
497 	void *res;
498 
499 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
500 	if (IS_ERR(res)) {
501 		if (!info->ifindex)
502 			return -ENODEV;
503 		return PTR_ERR(res);
504 	}
505 
506 	ns_inode = ns_path.dentry->d_inode;
507 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
508 	info->netns_ino = ns_inode->i_ino;
509 	path_put(&ns_path);
510 
511 	return 0;
512 }
513 
514 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
515 				    struct net_device *netdev)
516 {
517 	struct bpf_offload_netdev *ondev1, *ondev2;
518 	struct bpf_prog_offload *offload;
519 
520 	if (!bpf_prog_is_dev_bound(prog->aux))
521 		return false;
522 
523 	offload = prog->aux->offload;
524 	if (!offload)
525 		return false;
526 	if (offload->netdev == netdev)
527 		return true;
528 
529 	ondev1 = bpf_offload_find_netdev(offload->netdev);
530 	ondev2 = bpf_offload_find_netdev(netdev);
531 
532 	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
533 }
534 
535 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
536 {
537 	bool ret;
538 
539 	down_read(&bpf_devs_lock);
540 	ret = __bpf_offload_dev_match(prog, netdev);
541 	up_read(&bpf_devs_lock);
542 
543 	return ret;
544 }
545 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
546 
547 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
548 {
549 	struct bpf_offloaded_map *offmap;
550 	bool ret;
551 
552 	if (!bpf_map_is_dev_bound(map))
553 		return bpf_map_offload_neutral(map);
554 	offmap = map_to_offmap(map);
555 
556 	down_read(&bpf_devs_lock);
557 	ret = __bpf_offload_dev_match(prog, offmap->netdev);
558 	up_read(&bpf_devs_lock);
559 
560 	return ret;
561 }
562 
563 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
564 				    struct net_device *netdev)
565 {
566 	struct bpf_offload_netdev *ondev;
567 	int err;
568 
569 	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
570 	if (!ondev)
571 		return -ENOMEM;
572 
573 	ondev->netdev = netdev;
574 	ondev->offdev = offdev;
575 	INIT_LIST_HEAD(&ondev->progs);
576 	INIT_LIST_HEAD(&ondev->maps);
577 
578 	down_write(&bpf_devs_lock);
579 	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
580 	if (err) {
581 		netdev_warn(netdev, "failed to register for BPF offload\n");
582 		goto err_unlock_free;
583 	}
584 
585 	list_add(&ondev->offdev_netdevs, &offdev->netdevs);
586 	up_write(&bpf_devs_lock);
587 	return 0;
588 
589 err_unlock_free:
590 	up_write(&bpf_devs_lock);
591 	kfree(ondev);
592 	return err;
593 }
594 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
595 
596 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
597 				       struct net_device *netdev)
598 {
599 	struct bpf_offload_netdev *ondev, *altdev;
600 	struct bpf_offloaded_map *offmap, *mtmp;
601 	struct bpf_prog_offload *offload, *ptmp;
602 
603 	ASSERT_RTNL();
604 
605 	down_write(&bpf_devs_lock);
606 	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
607 	if (WARN_ON(!ondev))
608 		goto unlock;
609 
610 	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
611 	list_del(&ondev->offdev_netdevs);
612 
613 	/* Try to move the objects to another netdev of the device */
614 	altdev = list_first_entry_or_null(&offdev->netdevs,
615 					  struct bpf_offload_netdev,
616 					  offdev_netdevs);
617 	if (altdev) {
618 		list_for_each_entry(offload, &ondev->progs, offloads)
619 			offload->netdev = altdev->netdev;
620 		list_splice_init(&ondev->progs, &altdev->progs);
621 
622 		list_for_each_entry(offmap, &ondev->maps, offloads)
623 			offmap->netdev = altdev->netdev;
624 		list_splice_init(&ondev->maps, &altdev->maps);
625 	} else {
626 		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
627 			__bpf_prog_offload_destroy(offload->prog);
628 		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
629 			__bpf_map_offload_destroy(offmap);
630 	}
631 
632 	WARN_ON(!list_empty(&ondev->progs));
633 	WARN_ON(!list_empty(&ondev->maps));
634 	kfree(ondev);
635 unlock:
636 	up_write(&bpf_devs_lock);
637 }
638 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
639 
640 struct bpf_offload_dev *bpf_offload_dev_create(void)
641 {
642 	struct bpf_offload_dev *offdev;
643 	int err;
644 
645 	down_write(&bpf_devs_lock);
646 	if (!offdevs_inited) {
647 		err = rhashtable_init(&offdevs, &offdevs_params);
648 		if (err)
649 			return ERR_PTR(err);
650 		offdevs_inited = true;
651 	}
652 	up_write(&bpf_devs_lock);
653 
654 	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
655 	if (!offdev)
656 		return ERR_PTR(-ENOMEM);
657 
658 	INIT_LIST_HEAD(&offdev->netdevs);
659 
660 	return offdev;
661 }
662 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
663 
664 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
665 {
666 	WARN_ON(!list_empty(&offdev->netdevs));
667 	kfree(offdev);
668 }
669 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
670