xref: /openbmc/linux/kernel/bpf/offload.c (revision 3d76a4d3d4e591af3e789698affaad88a5a8e8ab)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/kdev_t.h>
20 #include <linux/list.h>
21 #include <linux/lockdep.h>
22 #include <linux/netdevice.h>
23 #include <linux/printk.h>
24 #include <linux/proc_ns.h>
25 #include <linux/rhashtable.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/rwsem.h>
28 
29 /* Protects offdevs, members of bpf_offload_netdev and offload members
30  * of all progs.
31  * RTNL lock cannot be taken when holding this lock.
32  */
33 static DECLARE_RWSEM(bpf_devs_lock);
34 
35 struct bpf_offload_dev {
36 	const struct bpf_prog_offload_ops *ops;
37 	struct list_head netdevs;
38 	void *priv;
39 };
40 
41 struct bpf_offload_netdev {
42 	struct rhash_head l;
43 	struct net_device *netdev;
44 	struct bpf_offload_dev *offdev; /* NULL when bound-only */
45 	struct list_head progs;
46 	struct list_head maps;
47 	struct list_head offdev_netdevs;
48 };
49 
50 static const struct rhashtable_params offdevs_params = {
51 	.nelem_hint		= 4,
52 	.key_len		= sizeof(struct net_device *),
53 	.key_offset		= offsetof(struct bpf_offload_netdev, netdev),
54 	.head_offset		= offsetof(struct bpf_offload_netdev, l),
55 	.automatic_shrinking	= true,
56 };
57 
58 static struct rhashtable offdevs;
59 
60 static int bpf_dev_offload_check(struct net_device *netdev)
61 {
62 	if (!netdev)
63 		return -EINVAL;
64 	if (!netdev->netdev_ops->ndo_bpf)
65 		return -EOPNOTSUPP;
66 	return 0;
67 }
68 
69 static struct bpf_offload_netdev *
70 bpf_offload_find_netdev(struct net_device *netdev)
71 {
72 	lockdep_assert_held(&bpf_devs_lock);
73 
74 	return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
75 }
76 
77 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
78 					     struct net_device *netdev)
79 {
80 	struct bpf_offload_netdev *ondev;
81 	int err;
82 
83 	ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
84 	if (!ondev)
85 		return -ENOMEM;
86 
87 	ondev->netdev = netdev;
88 	ondev->offdev = offdev;
89 	INIT_LIST_HEAD(&ondev->progs);
90 	INIT_LIST_HEAD(&ondev->maps);
91 
92 	err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
93 	if (err) {
94 		netdev_warn(netdev, "failed to register for BPF offload\n");
95 		goto err_free;
96 	}
97 
98 	if (offdev)
99 		list_add(&ondev->offdev_netdevs, &offdev->netdevs);
100 	return 0;
101 
102 err_free:
103 	kfree(ondev);
104 	return err;
105 }
106 
107 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
108 {
109 	struct bpf_prog_offload *offload = prog->aux->offload;
110 
111 	if (offload->dev_state)
112 		offload->offdev->ops->destroy(prog);
113 
114 	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
115 	bpf_prog_free_id(prog, true);
116 
117 	list_del_init(&offload->offloads);
118 	kfree(offload);
119 	prog->aux->offload = NULL;
120 }
121 
122 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
123 			       enum bpf_netdev_command cmd)
124 {
125 	struct netdev_bpf data = {};
126 	struct net_device *netdev;
127 
128 	ASSERT_RTNL();
129 
130 	data.command = cmd;
131 	data.offmap = offmap;
132 	/* Caller must make sure netdev is valid */
133 	netdev = offmap->netdev;
134 
135 	return netdev->netdev_ops->ndo_bpf(netdev, &data);
136 }
137 
138 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
139 {
140 	WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
141 	/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
142 	bpf_map_free_id(&offmap->map, true);
143 	list_del_init(&offmap->offloads);
144 	offmap->netdev = NULL;
145 }
146 
147 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
148 						struct net_device *netdev)
149 {
150 	struct bpf_offload_netdev *ondev, *altdev = NULL;
151 	struct bpf_offloaded_map *offmap, *mtmp;
152 	struct bpf_prog_offload *offload, *ptmp;
153 
154 	ASSERT_RTNL();
155 
156 	ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
157 	if (WARN_ON(!ondev))
158 		return;
159 
160 	WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
161 
162 	/* Try to move the objects to another netdev of the device */
163 	if (offdev) {
164 		list_del(&ondev->offdev_netdevs);
165 		altdev = list_first_entry_or_null(&offdev->netdevs,
166 						  struct bpf_offload_netdev,
167 						  offdev_netdevs);
168 	}
169 
170 	if (altdev) {
171 		list_for_each_entry(offload, &ondev->progs, offloads)
172 			offload->netdev = altdev->netdev;
173 		list_splice_init(&ondev->progs, &altdev->progs);
174 
175 		list_for_each_entry(offmap, &ondev->maps, offloads)
176 			offmap->netdev = altdev->netdev;
177 		list_splice_init(&ondev->maps, &altdev->maps);
178 	} else {
179 		list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
180 			__bpf_prog_offload_destroy(offload->prog);
181 		list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
182 			__bpf_map_offload_destroy(offmap);
183 	}
184 
185 	WARN_ON(!list_empty(&ondev->progs));
186 	WARN_ON(!list_empty(&ondev->maps));
187 	kfree(ondev);
188 }
189 
190 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
191 {
192 	struct bpf_offload_netdev *ondev;
193 	struct bpf_prog_offload *offload;
194 	int err;
195 
196 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
197 	    attr->prog_type != BPF_PROG_TYPE_XDP)
198 		return -EINVAL;
199 
200 	if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
201 		return -EINVAL;
202 
203 	if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
204 	    attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
205 		return -EINVAL;
206 
207 	offload = kzalloc(sizeof(*offload), GFP_USER);
208 	if (!offload)
209 		return -ENOMEM;
210 
211 	offload->prog = prog;
212 
213 	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
214 					   attr->prog_ifindex);
215 	err = bpf_dev_offload_check(offload->netdev);
216 	if (err)
217 		goto err_maybe_put;
218 
219 	prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
220 
221 	down_write(&bpf_devs_lock);
222 	ondev = bpf_offload_find_netdev(offload->netdev);
223 	if (!ondev) {
224 		if (bpf_prog_is_offloaded(prog->aux)) {
225 			err = -EINVAL;
226 			goto err_unlock;
227 		}
228 
229 		/* When only binding to the device, explicitly
230 		 * create an entry in the hashtable.
231 		 */
232 		err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
233 		if (err)
234 			goto err_unlock;
235 		ondev = bpf_offload_find_netdev(offload->netdev);
236 	}
237 	offload->offdev = ondev->offdev;
238 	prog->aux->offload = offload;
239 	list_add_tail(&offload->offloads, &ondev->progs);
240 	dev_put(offload->netdev);
241 	up_write(&bpf_devs_lock);
242 
243 	return 0;
244 err_unlock:
245 	up_write(&bpf_devs_lock);
246 err_maybe_put:
247 	if (offload->netdev)
248 		dev_put(offload->netdev);
249 	kfree(offload);
250 	return err;
251 }
252 
253 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
254 {
255 	struct bpf_prog_offload *offload;
256 	int ret = -ENODEV;
257 
258 	down_read(&bpf_devs_lock);
259 	offload = prog->aux->offload;
260 	if (offload) {
261 		ret = offload->offdev->ops->prepare(prog);
262 		offload->dev_state = !ret;
263 	}
264 	up_read(&bpf_devs_lock);
265 
266 	return ret;
267 }
268 
269 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
270 				 int insn_idx, int prev_insn_idx)
271 {
272 	struct bpf_prog_offload *offload;
273 	int ret = -ENODEV;
274 
275 	down_read(&bpf_devs_lock);
276 	offload = env->prog->aux->offload;
277 	if (offload)
278 		ret = offload->offdev->ops->insn_hook(env, insn_idx,
279 						      prev_insn_idx);
280 	up_read(&bpf_devs_lock);
281 
282 	return ret;
283 }
284 
285 int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
286 {
287 	struct bpf_prog_offload *offload;
288 	int ret = -ENODEV;
289 
290 	down_read(&bpf_devs_lock);
291 	offload = env->prog->aux->offload;
292 	if (offload) {
293 		if (offload->offdev->ops->finalize)
294 			ret = offload->offdev->ops->finalize(env);
295 		else
296 			ret = 0;
297 	}
298 	up_read(&bpf_devs_lock);
299 
300 	return ret;
301 }
302 
303 void
304 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
305 			      struct bpf_insn *insn)
306 {
307 	const struct bpf_prog_offload_ops *ops;
308 	struct bpf_prog_offload *offload;
309 	int ret = -EOPNOTSUPP;
310 
311 	down_read(&bpf_devs_lock);
312 	offload = env->prog->aux->offload;
313 	if (offload) {
314 		ops = offload->offdev->ops;
315 		if (!offload->opt_failed && ops->replace_insn)
316 			ret = ops->replace_insn(env, off, insn);
317 		offload->opt_failed |= ret;
318 	}
319 	up_read(&bpf_devs_lock);
320 }
321 
322 void
323 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
324 {
325 	struct bpf_prog_offload *offload;
326 	int ret = -EOPNOTSUPP;
327 
328 	down_read(&bpf_devs_lock);
329 	offload = env->prog->aux->offload;
330 	if (offload) {
331 		if (!offload->opt_failed && offload->offdev->ops->remove_insns)
332 			ret = offload->offdev->ops->remove_insns(env, off, cnt);
333 		offload->opt_failed |= ret;
334 	}
335 	up_read(&bpf_devs_lock);
336 }
337 
338 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog)
339 {
340 	struct bpf_offload_netdev *ondev;
341 	struct net_device *netdev;
342 
343 	rtnl_lock();
344 	down_write(&bpf_devs_lock);
345 	if (prog->aux->offload) {
346 		list_del_init(&prog->aux->offload->offloads);
347 
348 		netdev = prog->aux->offload->netdev;
349 		__bpf_prog_offload_destroy(prog);
350 
351 		ondev = bpf_offload_find_netdev(netdev);
352 		if (!ondev->offdev && list_empty(&ondev->progs))
353 			__bpf_offload_dev_netdev_unregister(NULL, netdev);
354 	}
355 	up_write(&bpf_devs_lock);
356 	rtnl_unlock();
357 }
358 
359 static int bpf_prog_offload_translate(struct bpf_prog *prog)
360 {
361 	struct bpf_prog_offload *offload;
362 	int ret = -ENODEV;
363 
364 	down_read(&bpf_devs_lock);
365 	offload = prog->aux->offload;
366 	if (offload)
367 		ret = offload->offdev->ops->translate(prog);
368 	up_read(&bpf_devs_lock);
369 
370 	return ret;
371 }
372 
373 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
374 					  const struct bpf_insn *insn)
375 {
376 	WARN(1, "attempt to execute device eBPF program on the host!");
377 	return 0;
378 }
379 
380 int bpf_prog_offload_compile(struct bpf_prog *prog)
381 {
382 	prog->bpf_func = bpf_prog_warn_on_exec;
383 
384 	return bpf_prog_offload_translate(prog);
385 }
386 
387 struct ns_get_path_bpf_prog_args {
388 	struct bpf_prog *prog;
389 	struct bpf_prog_info *info;
390 };
391 
392 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
393 {
394 	struct ns_get_path_bpf_prog_args *args = private_data;
395 	struct bpf_prog_aux *aux = args->prog->aux;
396 	struct ns_common *ns;
397 	struct net *net;
398 
399 	rtnl_lock();
400 	down_read(&bpf_devs_lock);
401 
402 	if (aux->offload) {
403 		args->info->ifindex = aux->offload->netdev->ifindex;
404 		net = dev_net(aux->offload->netdev);
405 		get_net(net);
406 		ns = &net->ns;
407 	} else {
408 		args->info->ifindex = 0;
409 		ns = NULL;
410 	}
411 
412 	up_read(&bpf_devs_lock);
413 	rtnl_unlock();
414 
415 	return ns;
416 }
417 
418 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
419 			       struct bpf_prog *prog)
420 {
421 	struct ns_get_path_bpf_prog_args args = {
422 		.prog	= prog,
423 		.info	= info,
424 	};
425 	struct bpf_prog_aux *aux = prog->aux;
426 	struct inode *ns_inode;
427 	struct path ns_path;
428 	char __user *uinsns;
429 	int res;
430 	u32 ulen;
431 
432 	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
433 	if (res) {
434 		if (!info->ifindex)
435 			return -ENODEV;
436 		return res;
437 	}
438 
439 	down_read(&bpf_devs_lock);
440 
441 	if (!aux->offload) {
442 		up_read(&bpf_devs_lock);
443 		return -ENODEV;
444 	}
445 
446 	ulen = info->jited_prog_len;
447 	info->jited_prog_len = aux->offload->jited_len;
448 	if (info->jited_prog_len && ulen) {
449 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
450 		ulen = min_t(u32, info->jited_prog_len, ulen);
451 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
452 			up_read(&bpf_devs_lock);
453 			return -EFAULT;
454 		}
455 	}
456 
457 	up_read(&bpf_devs_lock);
458 
459 	ns_inode = ns_path.dentry->d_inode;
460 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
461 	info->netns_ino = ns_inode->i_ino;
462 	path_put(&ns_path);
463 
464 	return 0;
465 }
466 
467 const struct bpf_prog_ops bpf_offload_prog_ops = {
468 };
469 
470 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
471 {
472 	struct net *net = current->nsproxy->net_ns;
473 	struct bpf_offload_netdev *ondev;
474 	struct bpf_offloaded_map *offmap;
475 	int err;
476 
477 	if (!capable(CAP_SYS_ADMIN))
478 		return ERR_PTR(-EPERM);
479 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
480 	    attr->map_type != BPF_MAP_TYPE_HASH)
481 		return ERR_PTR(-EINVAL);
482 
483 	offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE);
484 	if (!offmap)
485 		return ERR_PTR(-ENOMEM);
486 
487 	bpf_map_init_from_attr(&offmap->map, attr);
488 
489 	rtnl_lock();
490 	down_write(&bpf_devs_lock);
491 	offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
492 	err = bpf_dev_offload_check(offmap->netdev);
493 	if (err)
494 		goto err_unlock;
495 
496 	ondev = bpf_offload_find_netdev(offmap->netdev);
497 	if (!ondev) {
498 		err = -EINVAL;
499 		goto err_unlock;
500 	}
501 
502 	err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
503 	if (err)
504 		goto err_unlock;
505 
506 	list_add_tail(&offmap->offloads, &ondev->maps);
507 	up_write(&bpf_devs_lock);
508 	rtnl_unlock();
509 
510 	return &offmap->map;
511 
512 err_unlock:
513 	up_write(&bpf_devs_lock);
514 	rtnl_unlock();
515 	bpf_map_area_free(offmap);
516 	return ERR_PTR(err);
517 }
518 
519 void bpf_map_offload_map_free(struct bpf_map *map)
520 {
521 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
522 
523 	rtnl_lock();
524 	down_write(&bpf_devs_lock);
525 	if (offmap->netdev)
526 		__bpf_map_offload_destroy(offmap);
527 	up_write(&bpf_devs_lock);
528 	rtnl_unlock();
529 
530 	bpf_map_area_free(offmap);
531 }
532 
533 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
534 {
535 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
536 	int ret = -ENODEV;
537 
538 	down_read(&bpf_devs_lock);
539 	if (offmap->netdev)
540 		ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
541 	up_read(&bpf_devs_lock);
542 
543 	return ret;
544 }
545 
546 int bpf_map_offload_update_elem(struct bpf_map *map,
547 				void *key, void *value, u64 flags)
548 {
549 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
550 	int ret = -ENODEV;
551 
552 	if (unlikely(flags > BPF_EXIST))
553 		return -EINVAL;
554 
555 	down_read(&bpf_devs_lock);
556 	if (offmap->netdev)
557 		ret = offmap->dev_ops->map_update_elem(offmap, key, value,
558 						       flags);
559 	up_read(&bpf_devs_lock);
560 
561 	return ret;
562 }
563 
564 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
565 {
566 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
567 	int ret = -ENODEV;
568 
569 	down_read(&bpf_devs_lock);
570 	if (offmap->netdev)
571 		ret = offmap->dev_ops->map_delete_elem(offmap, key);
572 	up_read(&bpf_devs_lock);
573 
574 	return ret;
575 }
576 
577 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
578 {
579 	struct bpf_offloaded_map *offmap = map_to_offmap(map);
580 	int ret = -ENODEV;
581 
582 	down_read(&bpf_devs_lock);
583 	if (offmap->netdev)
584 		ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
585 	up_read(&bpf_devs_lock);
586 
587 	return ret;
588 }
589 
590 struct ns_get_path_bpf_map_args {
591 	struct bpf_offloaded_map *offmap;
592 	struct bpf_map_info *info;
593 };
594 
595 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
596 {
597 	struct ns_get_path_bpf_map_args *args = private_data;
598 	struct ns_common *ns;
599 	struct net *net;
600 
601 	rtnl_lock();
602 	down_read(&bpf_devs_lock);
603 
604 	if (args->offmap->netdev) {
605 		args->info->ifindex = args->offmap->netdev->ifindex;
606 		net = dev_net(args->offmap->netdev);
607 		get_net(net);
608 		ns = &net->ns;
609 	} else {
610 		args->info->ifindex = 0;
611 		ns = NULL;
612 	}
613 
614 	up_read(&bpf_devs_lock);
615 	rtnl_unlock();
616 
617 	return ns;
618 }
619 
620 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
621 {
622 	struct ns_get_path_bpf_map_args args = {
623 		.offmap	= map_to_offmap(map),
624 		.info	= info,
625 	};
626 	struct inode *ns_inode;
627 	struct path ns_path;
628 	int res;
629 
630 	res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
631 	if (res) {
632 		if (!info->ifindex)
633 			return -ENODEV;
634 		return res;
635 	}
636 
637 	ns_inode = ns_path.dentry->d_inode;
638 	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
639 	info->netns_ino = ns_inode->i_ino;
640 	path_put(&ns_path);
641 
642 	return 0;
643 }
644 
645 static bool __bpf_offload_dev_match(struct bpf_prog *prog,
646 				    struct net_device *netdev)
647 {
648 	struct bpf_offload_netdev *ondev1, *ondev2;
649 	struct bpf_prog_offload *offload;
650 
651 	if (!bpf_prog_is_dev_bound(prog->aux))
652 		return false;
653 
654 	offload = prog->aux->offload;
655 	if (!offload)
656 		return false;
657 	if (offload->netdev == netdev)
658 		return true;
659 
660 	ondev1 = bpf_offload_find_netdev(offload->netdev);
661 	ondev2 = bpf_offload_find_netdev(netdev);
662 
663 	return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
664 }
665 
666 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
667 {
668 	bool ret;
669 
670 	down_read(&bpf_devs_lock);
671 	ret = __bpf_offload_dev_match(prog, netdev);
672 	up_read(&bpf_devs_lock);
673 
674 	return ret;
675 }
676 EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
677 
678 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
679 {
680 	struct bpf_offloaded_map *offmap;
681 	bool ret;
682 
683 	if (!bpf_map_is_offloaded(map))
684 		return bpf_map_offload_neutral(map);
685 	offmap = map_to_offmap(map);
686 
687 	down_read(&bpf_devs_lock);
688 	ret = __bpf_offload_dev_match(prog, offmap->netdev);
689 	up_read(&bpf_devs_lock);
690 
691 	return ret;
692 }
693 
694 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
695 				    struct net_device *netdev)
696 {
697 	int err;
698 
699 	down_write(&bpf_devs_lock);
700 	err = __bpf_offload_dev_netdev_register(offdev, netdev);
701 	up_write(&bpf_devs_lock);
702 	return err;
703 }
704 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
705 
706 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
707 				       struct net_device *netdev)
708 {
709 	down_write(&bpf_devs_lock);
710 	__bpf_offload_dev_netdev_unregister(offdev, netdev);
711 	up_write(&bpf_devs_lock);
712 }
713 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
714 
715 struct bpf_offload_dev *
716 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv)
717 {
718 	struct bpf_offload_dev *offdev;
719 
720 	offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
721 	if (!offdev)
722 		return ERR_PTR(-ENOMEM);
723 
724 	offdev->ops = ops;
725 	offdev->priv = priv;
726 	INIT_LIST_HEAD(&offdev->netdevs);
727 
728 	return offdev;
729 }
730 EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
731 
732 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
733 {
734 	WARN_ON(!list_empty(&offdev->netdevs));
735 	kfree(offdev);
736 }
737 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);
738 
739 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev)
740 {
741 	return offdev->priv;
742 }
743 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv);
744 
745 void bpf_dev_bound_netdev_unregister(struct net_device *dev)
746 {
747 	struct bpf_offload_netdev *ondev;
748 
749 	ASSERT_RTNL();
750 
751 	down_write(&bpf_devs_lock);
752 	ondev = bpf_offload_find_netdev(dev);
753 	if (ondev && !ondev->offdev)
754 		__bpf_offload_dev_netdev_unregister(NULL, ondev->netdev);
755 	up_write(&bpf_devs_lock);
756 }
757 
758 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
759 			      struct bpf_prog_aux *prog_aux)
760 {
761 	if (!bpf_prog_is_dev_bound(prog_aux)) {
762 		bpf_log(log, "metadata kfuncs require device-bound program\n");
763 		return -EINVAL;
764 	}
765 
766 	if (bpf_prog_is_offloaded(prog_aux)) {
767 		bpf_log(log, "metadata kfuncs can't be offloaded\n");
768 		return -EINVAL;
769 	}
770 
771 	return 0;
772 }
773 
774 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id)
775 {
776 	const struct xdp_metadata_ops *ops;
777 	void *p = NULL;
778 
779 	/* We don't hold bpf_devs_lock while resolving several
780 	 * kfuncs and can race with the unregister_netdevice().
781 	 * We rely on bpf_dev_bound_match() check at attach
782 	 * to render this program unusable.
783 	 */
784 	down_read(&bpf_devs_lock);
785 	if (!prog->aux->offload)
786 		goto out;
787 
788 	ops = prog->aux->offload->netdev->xdp_metadata_ops;
789 	if (!ops)
790 		goto out;
791 
792 	if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP))
793 		p = ops->xmo_rx_timestamp;
794 	else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH))
795 		p = ops->xmo_rx_hash;
796 out:
797 	up_read(&bpf_devs_lock);
798 
799 	return p;
800 }
801 
802 static int __init bpf_offload_init(void)
803 {
804 	return rhashtable_init(&offdevs, &offdevs_params);
805 }
806 
807 late_initcall(bpf_offload_init);
808