xref: /openbmc/linux/kernel/bpf/offload.c (revision 8d81cd1a)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28 #include <net/xdp.h>
29 
30 /* Protects offdevs, members of bpf_offload_netdev and offload members
31  * of all progs.
32  * RTNL lock cannot be taken when holding this lock.
33  */
34 static DECLARE_RWSEM(bpf_devs_lock);
35 
36 struct bpf_offload_dev {
37 	const struct bpf_prog_offload_ops *ops;
38 	struct list_head netdevs;
39 	void *priv;
40 };
41 
42 struct bpf_offload_netdev {
43 	struct rhash_head l;
44 	struct net_device *netdev;
45 	struct bpf_offload_dev *offdev; /* NULL when bound-only */
46 	struct list_head progs;
47 	struct list_head maps;
48 	struct list_head offdev_netdevs;
49 };
50 
51 static const struct rhashtable_params offdevs_params = {
52 	.nelem_hint		= 4,
53 	.key_len		= sizeof(struct net_device *),
54 	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
55 	.head_offset		= offsetof(struct bpf_offload_netdev, l),
56 	.automatic_shrinking	= true,
57 };
58 
59 static struct rhashtable offdevs;
60 
61 static int bpf_dev_offload_check(struct net_device *netdev)
62 {
63 	if (!netdev)
64 		return -EINVAL;
65 	if (!netdev->netdev_ops->ndo_bpf)
66 		return -EOPNOTSUPP;
67 	return 0;
68 }
69 
70 static struct bpf_offload_netdev *
71 bpf_offload_find_netdev(struct net_device *netdev)
72 {
73 	lockdep_assert_held(&bpf_devs_lock);
74 
75 	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
76 }
77 
78 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
79 					     struct net_device *netdev)
80 {
81 	struct bpf_offload_netdev *ondev;
82 	int err;
83 
84 	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
85 	if (!ondev)
86 		return -ENOMEM;
87 
88 	ondev->netdev = netdev;
89 	ondev->offdev = offdev;
90 	INIT_LIST_HEAD(&ondev->progs);
91 	INIT_LIST_HEAD(&ondev->maps);
92 
93 	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
94 	if (err) {
95 		netdev_warn(netdev, "failed to register for BPF offload\n");
96 		goto err_free;
97 	}
98 
99 	if (offdev)
100 		list_add(&ondev->offdev_netdevs, &offdev->netdevs);
101 	return 0;
102 
103 err_free:
104 	kfree(ondev);
105 	return err;
106 }
107 
108 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
109 {
110 	struct bpf_prog_offload *offload = prog->aux->offload;
111 
112 	if (offload->dev_state)
113 		offload->offdev->ops->destroy(prog);
114 
115 	list_del_init(&offload->offloads);
116 	kfree(offload);
117 	prog->aux->offload = NULL;
118 }
119 
120 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
121 			       enum bpf_netdev_command cmd)
122 {
123 	struct netdev_bpf data = {};
124 	struct net_device *netdev;
125 
126 	ASSERT_RTNL();
127 
128 	data.command = cmd;
129 	data.offmap = offmap;
130 	/* Caller must make sure netdev is valid */
131 	netdev = offmap->netdev;
132 
133 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
134 }
135 
136 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
137 {
138 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
139 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
140 	bpf_map_free_id(&offmap->map);
141 	list_del_init(&offmap->offloads);
142 	offmap->netdev = NULL;
143 }
144 
145 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
146 						struct net_device *netdev)
147 {
148 	struct bpf_offload_netdev *ondev, *altdev = NULL;
149 	struct bpf_offloaded_map *offmap, *mtmp;
150 	struct bpf_prog_offload *offload, *ptmp;
151 
152 	ASSERT_RTNL();
153 
154 	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
155 	if (WARN_ON(!ondev))
156 		return;
157 
158 	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
159 
160 	/* Try to move the objects to another netdev of the device */
161 	if (offdev) {
162 		list_del(&ondev->offdev_netdevs);
163 		altdev = list_first_entry_or_null(&offdev->netdevs,
164 						  struct bpf_offload_netdev,
165 						  offdev_netdevs);
166 	}
167 
168 	if (altdev) {
169 		list_for_each_entry(offload, &ondev->progs, offloads)
170 			offload->netdev = altdev->netdev;
171 		list_splice_init(&ondev->progs, &altdev->progs);
172 
173 		list_for_each_entry(offmap, &ondev->maps, offloads)
174 			offmap->netdev = altdev->netdev;
175 		list_splice_init(&ondev->maps, &altdev->maps);
176 	} else {
177 		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
178 			__bpf_prog_offload_destroy(offload->prog);
179 		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
180 			__bpf_map_offload_destroy(offmap);
181 	}
182 
183 	WARN_ON(!list_empty(&ondev->progs));
184 	WARN_ON(!list_empty(&ondev->maps));
185 	kfree(ondev);
186 }
187 
188 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
189 {
190 	struct bpf_offload_netdev *ondev;
191 	struct bpf_prog_offload *offload;
192 	int err;
193 
194 	offload = kzalloc(sizeof(*offload), GFP_USER);
195 	if (!offload)
196 		return -ENOMEM;
197 
198 	offload->prog = prog;
199 	offload->netdev = netdev;
200 
201 	ondev = bpf_offload_find_netdev(offload->netdev);
202 	/* When program is offloaded require presence of "true"
203 	 * bpf_offload_netdev, avoid the one created for !ondev case below.
204 	 */
205 	if (bpf_prog_is_offloaded(prog->aux) && (!ondev || !ondev->offdev)) {
206 		err = -EINVAL;
207 		goto err_free;
208 	}
209 	if (!ondev) {
210 		/* When only binding to the device, explicitly
211 		 * create an entry in the hashtable.
212 		 */
213 		err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
214 		if (err)
215 			goto err_free;
216 		ondev = bpf_offload_find_netdev(offload->netdev);
217 	}
218 	offload->offdev = ondev->offdev;
219 	prog->aux->offload = offload;
220 	list_add_tail(&offload->offloads, &ondev->progs);
221 
222 	return 0;
223 err_free:
224 	kfree(offload);
225 	return err;
226 }
227 
228 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
229 {
230 	struct net_device *netdev;
231 	int err;
232 
233 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
234 	    attr->prog_type != BPF_PROG_TYPE_XDP)
235 		return -EINVAL;
236 
237 	if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
238 		return -EINVAL;
239 
240 	if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
241 	    attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
242 		return -EINVAL;
243 
244 	netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
245 	if (!netdev)
246 		return -EINVAL;
247 
248 	err = bpf_dev_offload_check(netdev);
249 	if (err)
250 		goto out;
251 
252 	prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
253 
254 	down_write(&bpf_devs_lock);
255 	err = __bpf_prog_dev_bound_init(prog, netdev);
256 	up_write(&bpf_devs_lock);
257 
258 out:
259 	dev_put(netdev);
260 	return err;
261 }
262 
263 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
264 {
265 	int err;
266 
267 	if (!bpf_prog_is_dev_bound(old_prog->aux))
268 		return 0;
269 
270 	if (bpf_prog_is_offloaded(old_prog->aux))
271 		return -EINVAL;
272 
273 	new_prog->aux->dev_bound = old_prog->aux->dev_bound;
274 	new_prog->aux->offload_requested = old_prog->aux->offload_requested;
275 
276 	down_write(&bpf_devs_lock);
277 	if (!old_prog->aux->offload) {
278 		err = -EINVAL;
279 		goto out;
280 	}
281 
282 	err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
283 
284 out:
285 	up_write(&bpf_devs_lock);
286 	return err;
287 }
288 
289 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
290 {
291 	struct bpf_prog_offload *offload;
292 	int ret = -ENODEV;
293 
294 	down_read(&bpf_devs_lock);
295 	offload = prog->aux->offload;
296 	if (offload) {
297 		ret = offload->offdev->ops->prepare(prog);
298 		offload->dev_state = !ret;
299 	}
300 	up_read(&bpf_devs_lock);
301 
302 	return ret;
303 }
304 
305 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
306 				 int insn_idx, int prev_insn_idx)
307 {
308 	struct bpf_prog_offload *offload;
309 	int ret = -ENODEV;
310 
311 	down_read(&bpf_devs_lock);
312 	offload = env->prog->aux->offload;
313 	if (offload)
314 		ret = offload->offdev->ops->insn_hook(env, insn_idx,
315 						      prev_insn_idx);
316 	up_read(&bpf_devs_lock);
317 
318 	return ret;
319 }
320 
321 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
322 {
323 	struct bpf_prog_offload *offload;
324 	int ret = -ENODEV;
325 
326 	down_read(&bpf_devs_lock);
327 	offload = env->prog->aux->offload;
328 	if (offload) {
329 		if (offload->offdev->ops->finalize)
330 			ret = offload->offdev->ops->finalize(env);
331 		else
332 			ret = 0;
333 	}
334 	up_read(&bpf_devs_lock);
335 
336 	return ret;
337 }
338 
339 void
340 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
341 			      struct bpf_insn *insn)
342 {
343 	const struct bpf_prog_offload_ops *ops;
344 	struct bpf_prog_offload *offload;
345 	int ret = -EOPNOTSUPP;
346 
347 	down_read(&bpf_devs_lock);
348 	offload = env->prog->aux->offload;
349 	if (offload) {
350 		ops = offload->offdev->ops;
351 		if (!offload->opt_failed && ops->replace_insn)
352 			ret = ops->replace_insn(env, off, insn);
353 		offload->opt_failed |= ret;
354 	}
355 	up_read(&bpf_devs_lock);
356 }
357 
358 void
359 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
360 {
361 	struct bpf_prog_offload *offload;
362 	int ret = -EOPNOTSUPP;
363 
364 	down_read(&bpf_devs_lock);
365 	offload = env->prog->aux->offload;
366 	if (offload) {
367 		if (!offload->opt_failed && offload->offdev->ops->remove_insns)
368 			ret = offload->offdev->ops->remove_insns(env, off, cnt);
369 		offload->opt_failed |= ret;
370 	}
371 	up_read(&bpf_devs_lock);
372 }
373 
374 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
375 {
376 	struct bpf_offload_netdev *ondev;
377 	struct net_device *netdev;
378 
379 	rtnl_lock();
380 	down_write(&bpf_devs_lock);
381 	if (prog->aux->offload) {
382 		list_del_init(&prog->aux->offload->offloads);
383 
384 		netdev = prog->aux->offload->netdev;
385 		__bpf_prog_offload_destroy(prog);
386 
387 		ondev = bpf_offload_find_netdev(netdev);
388 		if (!ondev->offdev && list_empty(&ondev->progs))
389 			__bpf_offload_dev_netdev_unregister(NULL, netdev);
390 	}
391 	up_write(&bpf_devs_lock);
392 	rtnl_unlock();
393 }
394 
395 static int bpf_prog_offload_translate(struct bpf_prog *prog)
396 {
397 	struct bpf_prog_offload *offload;
398 	int ret = -ENODEV;
399 
400 	down_read(&bpf_devs_lock);
401 	offload = prog->aux->offload;
402 	if (offload)
403 		ret = offload->offdev->ops->translate(prog);
404 	up_read(&bpf_devs_lock);
405 
406 	return ret;
407 }
408 
409 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
410 					  const struct bpf_insn *insn)
411 {
412 	WARN(1, "attempt to execute device eBPF program on the host!");
413 	return 0;
414 }
415 
416 int bpf_prog_offload_compile(struct bpf_prog *prog)
417 {
418 	prog->bpf_func = bpf_prog_warn_on_exec;
419 
420 	return bpf_prog_offload_translate(prog);
421 }
422 
423 struct ns_get_path_bpf_prog_args {
424 	struct bpf_prog *prog;
425 	struct bpf_prog_info *info;
426 };
427 
428 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
429 {
430 	struct ns_get_path_bpf_prog_args *args = private_data;
431 	struct bpf_prog_aux *aux = args->prog->aux;
432 	struct ns_common *ns;
433 	struct net *net;
434 
435 	rtnl_lock();
436 	down_read(&bpf_devs_lock);
437 
438 	if (aux->offload) {
439 		args->info->ifindex = aux->offload->netdev->ifindex;
440 		net = dev_net(aux->offload->netdev);
441 		get_net(net);
442 		ns = &net->ns;
443 	} else {
444 		args->info->ifindex = 0;
445 		ns = NULL;
446 	}
447 
448 	up_read(&bpf_devs_lock);
449 	rtnl_unlock();
450 
451 	return ns;
452 }
453 
454 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
455 			       struct bpf_prog *prog)
456 {
457 	struct ns_get_path_bpf_prog_args args = {
458 		.prog	= prog,
459 		.info	= info,
460 	};
461 	struct bpf_prog_aux *aux = prog->aux;
462 	struct inode *ns_inode;
463 	struct path ns_path;
464 	char __user *uinsns;
465 	int res;
466 	u32 ulen;
467 
468 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
469 	if (res) {
470 		if (!info->ifindex)
471 			return -ENODEV;
472 		return res;
473 	}
474 
475 	down_read(&bpf_devs_lock);
476 
477 	if (!aux->offload) {
478 		up_read(&bpf_devs_lock);
479 		return -ENODEV;
480 	}
481 
482 	ulen = info->jited_prog_len;
483 	info->jited_prog_len = aux->offload->jited_len;
484 	if (info->jited_prog_len && ulen) {
485 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
486 		ulen = min_t(u32, info->jited_prog_len, ulen);
487 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
488 			up_read(&bpf_devs_lock);
489 			return -EFAULT;
490 		}
491 	}
492 
493 	up_read(&bpf_devs_lock);
494 
495 	ns_inode = ns_path.dentry->d_inode;
496 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
497 	info->netns_ino = ns_inode->i_ino;
498 	path_put(&ns_path);
499 
500 	return 0;
501 }
502 
503 const struct bpf_prog_ops bpf_offload_prog_ops = {
504 };
505 
506 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
507 {
508 	struct net *net = current->nsproxy->net_ns;
509 	struct bpf_offload_netdev *ondev;
510 	struct bpf_offloaded_map *offmap;
511 	int err;
512 
513 	if (!capable(CAP_SYS_ADMIN))
514 		return ERR_PTR(-EPERM);
515 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
516 	    attr->map_type != BPF_MAP_TYPE_HASH)
517 		return ERR_PTR(-EINVAL);
518 
519 	offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
520 	if (!offmap)
521 		return ERR_PTR(-ENOMEM);
522 
523 	bpf_map_init_from_attr(&offmap->map, attr);
524 
525 	rtnl_lock();
526 	down_write(&bpf_devs_lock);
527 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
528 	err = bpf_dev_offload_check(offmap->netdev);
529 	if (err)
530 		goto err_unlock;
531 
532 	ondev = bpf_offload_find_netdev(offmap->netdev);
533 	if (!ondev) {
534 		err = -EINVAL;
535 		goto err_unlock;
536 	}
537 
538 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
539 	if (err)
540 		goto err_unlock;
541 
542 	list_add_tail(&offmap->offloads, &ondev->maps);
543 	up_write(&bpf_devs_lock);
544 	rtnl_unlock();
545 
546 	return &offmap->map;
547 
548 err_unlock:
549 	up_write(&bpf_devs_lock);
550 	rtnl_unlock();
551 	bpf_map_area_free(offmap);
552 	return ERR_PTR(err);
553 }
554 
555 void bpf_map_offload_map_free(struct bpf_map *map)
556 {
557 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
558 
559 	rtnl_lock();
560 	down_write(&bpf_devs_lock);
561 	if (offmap->netdev)
562 		__bpf_map_offload_destroy(offmap);
563 	up_write(&bpf_devs_lock);
564 	rtnl_unlock();
565 
566 	bpf_map_area_free(offmap);
567 }
568 
569 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
570 {
571 	/* The memory dynamically allocated in netdev dev_ops is not counted */
572 	return sizeof(struct bpf_offloaded_map);
573 }
574 
575 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
576 {
577 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
578 	int ret = -ENODEV;
579 
580 	down_read(&bpf_devs_lock);
581 	if (offmap->netdev)
582 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
583 	up_read(&bpf_devs_lock);
584 
585 	return ret;
586 }
587 
588 int bpf_map_offload_update_elem(struct bpf_map *map,
589 				void *key, void *value, u64 flags)
590 {
591 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
592 	int ret = -ENODEV;
593 
594 	if (unlikely(flags > BPF_EXIST))
595 		return -EINVAL;
596 
597 	down_read(&bpf_devs_lock);
598 	if (offmap->netdev)
599 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
600 						       flags);
601 	up_read(&bpf_devs_lock);
602 
603 	return ret;
604 }
605 
606 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
607 {
608 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
609 	int ret = -ENODEV;
610 
611 	down_read(&bpf_devs_lock);
612 	if (offmap->netdev)
613 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
614 	up_read(&bpf_devs_lock);
615 
616 	return ret;
617 }
618 
619 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
620 {
621 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
622 	int ret = -ENODEV;
623 
624 	down_read(&bpf_devs_lock);
625 	if (offmap->netdev)
626 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
627 	up_read(&bpf_devs_lock);
628 
629 	return ret;
630 }
631 
632 struct ns_get_path_bpf_map_args {
633 	struct bpf_offloaded_map *offmap;
634 	struct bpf_map_info *info;
635 };
636 
637 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
638 {
639 	struct ns_get_path_bpf_map_args *args = private_data;
640 	struct ns_common *ns;
641 	struct net *net;
642 
643 	rtnl_lock();
644 	down_read(&bpf_devs_lock);
645 
646 	if (args->offmap->netdev) {
647 		args->info->ifindex = args->offmap->netdev->ifindex;
648 		net = dev_net(args->offmap->netdev);
649 		get_net(net);
650 		ns = &net->ns;
651 	} else {
652 		args->info->ifindex = 0;
653 		ns = NULL;
654 	}
655 
656 	up_read(&bpf_devs_lock);
657 	rtnl_unlock();
658 
659 	return ns;
660 }
661 
662 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
663 {
664 	struct ns_get_path_bpf_map_args args = {
665 		.offmap	= map_to_offmap(map),
666 		.info	= info,
667 	};
668 	struct inode *ns_inode;
669 	struct path ns_path;
670 	int res;
671 
672 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
673 	if (res) {
674 		if (!info->ifindex)
675 			return -ENODEV;
676 		return res;
677 	}
678 
679 	ns_inode = ns_path.dentry->d_inode;
680 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
681 	info->netns_ino = ns_inode->i_ino;
682 	path_put(&ns_path);
683 
684 	return 0;
685 }
686 
687 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
688 				    struct net_device *netdev)
689 {
690 	struct bpf_offload_netdev *ondev1, *ondev2;
691 	struct bpf_prog_offload *offload;
692 
693 	if (!bpf_prog_is_dev_bound(prog->aux))
694 		return false;
695 
696 	offload = prog->aux->offload;
697 	if (!offload)
698 		return false;
699 	if (offload->netdev == netdev)
700 		return true;
701 
702 	ondev1 = bpf_offload_find_netdev(offload->netdev);
703 	ondev2 = bpf_offload_find_netdev(netdev);
704 
705 	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
706 }
707 
708 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
709 {
710 	bool ret;
711 
712 	down_read(&bpf_devs_lock);
713 	ret = __bpf_offload_dev_match(prog, netdev);
714 	up_read(&bpf_devs_lock);
715 
716 	return ret;
717 }
718 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
719 
720 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
721 {
722 	bool ret;
723 
724 	if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
725 		return false;
726 
727 	down_read(&bpf_devs_lock);
728 	ret = lhs->aux->offload && rhs->aux->offload &&
729 	      lhs->aux->offload->netdev &&
730 	      lhs->aux->offload->netdev == rhs->aux->offload->netdev;
731 	up_read(&bpf_devs_lock);
732 
733 	return ret;
734 }
735 
736 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
737 {
738 	struct bpf_offloaded_map *offmap;
739 	bool ret;
740 
741 	if (!bpf_map_is_offloaded(map))
742 		return bpf_map_offload_neutral(map);
743 	offmap = map_to_offmap(map);
744 
745 	down_read(&bpf_devs_lock);
746 	ret = __bpf_offload_dev_match(prog, offmap->netdev);
747 	up_read(&bpf_devs_lock);
748 
749 	return ret;
750 }
751 
752 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
753 				    struct net_device *netdev)
754 {
755 	int err;
756 
757 	down_write(&bpf_devs_lock);
758 	err = __bpf_offload_dev_netdev_register(offdev, netdev);
759 	up_write(&bpf_devs_lock);
760 	return err;
761 }
762 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
763 
764 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
765 				       struct net_device *netdev)
766 {
767 	down_write(&bpf_devs_lock);
768 	__bpf_offload_dev_netdev_unregister(offdev, netdev);
769 	up_write(&bpf_devs_lock);
770 }
771 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
772 
773 struct bpf_offload_dev *
774 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
775 {
776 	struct bpf_offload_dev *offdev;
777 
778 	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
779 	if (!offdev)
780 		return ERR_PTR(-ENOMEM);
781 
782 	offdev->ops = ops;
783 	offdev->priv = priv;
784 	INIT_LIST_HEAD(&offdev->netdevs);
785 
786 	return offdev;
787 }
788 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
789 
790 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
791 {
792 	WARN_ON(!list_empty(&offdev->netdevs));
793 	kfree(offdev);
794 }
795 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
796 
797 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
798 {
799 	return offdev->priv;
800 }
801 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
802 
803 void bpf_dev_bound_netdev_unregister(struct net_device *dev)
804 {
805 	struct bpf_offload_netdev *ondev;
806 
807 	ASSERT_RTNL();
808 
809 	down_write(&bpf_devs_lock);
810 	ondev = bpf_offload_find_netdev(dev);
811 	if (ondev && !ondev->offdev)
812 		__bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
813 	up_write(&bpf_devs_lock);
814 }
815 
816 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
817 			      struct bpf_prog_aux *prog_aux)
818 {
819 	if (!bpf_prog_is_dev_bound(prog_aux)) {
820 		bpf_log(log, "metadata kfuncs require device-bound program\n");
821 		return -EINVAL;
822 	}
823 
824 	if (bpf_prog_is_offloaded(prog_aux)) {
825 		bpf_log(log, "metadata kfuncs can't be offloaded\n");
826 		return -EINVAL;
827 	}
828 
829 	return 0;
830 }
831 
832 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
833 {
834 	const struct xdp_metadata_ops *ops;
835 	void *p = NULL;
836 
837 	/* We don't hold bpf_devs_lock while resolving several
838 	 * kfuncs and can race with the unregister_netdevice().
839 	 * We rely on bpf_dev_bound_match() check at attach
840 	 * to render this program unusable.
841 	 */
842 	down_read(&bpf_devs_lock);
843 	if (!prog->aux->offload)
844 		goto out;
845 
846 	ops = prog->aux->offload->netdev->xdp_metadata_ops;
847 	if (!ops)
848 		goto out;
849 
850 	if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
851 		p = ops->xmo_rx_timestamp;
852 	else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
853 		p = ops->xmo_rx_hash;
854 out:
855 	up_read(&bpf_devs_lock);
856 
857 	return p;
858 }
859 
860 static int __init bpf_offload_init(void)
861 {
862 	return rhashtable_init(&offdevs, &offdevs_params);
863 }
864 
865 core_initcall(bpf_offload_init);
866