1 /* 2 * Copyright (C) 2017-2018 Netronome Systems, Inc. 3 * 4 * This software is licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree. 7 * 8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" 9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, 10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE 12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME 13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 14 */ 15 16 #include <linux/bpf.h> 17 #include <linux/bpf_verifier.h> 18 #include <linux/bug.h> 19 #include <linux/kdev_t.h> 20 #include <linux/list.h> 21 #include <linux/lockdep.h> 22 #include <linux/netdevice.h> 23 #include <linux/printk.h> 24 #include <linux/proc_ns.h> 25 #include <linux/rhashtable.h> 26 #include <linux/rtnetlink.h> 27 #include <linux/rwsem.h> 28 29 /* Protects offdevs, members of bpf_offload_netdev and offload members 30 * of all progs. 31 * RTNL lock cannot be taken when holding this lock. 32 */ 33 static DECLARE_RWSEM(bpf_devs_lock); 34 35 struct bpf_offload_dev { 36 const struct bpf_prog_offload_ops *ops; 37 struct list_head netdevs; 38 void *priv; 39 }; 40 41 struct bpf_offload_netdev { 42 struct rhash_head l; 43 struct net_device *netdev; 44 struct bpf_offload_dev *offdev; /* NULL when bound-only */ 45 struct list_head progs; 46 struct list_head maps; 47 struct list_head offdev_netdevs; 48 }; 49 50 static const struct rhashtable_params offdevs_params = { 51 .nelem_hint = 4, 52 .key_len = sizeof(struct net_device *), 53 .key_offset = offsetof(struct bpf_offload_netdev, netdev), 54 .head_offset = offsetof(struct bpf_offload_netdev, l), 55 .automatic_shrinking = true, 56 }; 57 58 static struct rhashtable offdevs; 59 60 static int bpf_dev_offload_check(struct net_device *netdev) 61 { 62 if (!netdev) 63 return -EINVAL; 64 if (!netdev->netdev_ops->ndo_bpf) 65 return -EOPNOTSUPP; 66 return 0; 67 } 68 69 static struct bpf_offload_netdev * 70 bpf_offload_find_netdev(struct net_device *netdev) 71 { 72 lockdep_assert_held(&bpf_devs_lock); 73 74 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 75 } 76 77 static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 78 struct net_device *netdev) 79 { 80 struct bpf_offload_netdev *ondev; 81 int err; 82 83 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); 84 if (!ondev) 85 return -ENOMEM; 86 87 ondev->netdev = netdev; 88 ondev->offdev = offdev; 89 INIT_LIST_HEAD(&ondev->progs); 90 INIT_LIST_HEAD(&ondev->maps); 91 92 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); 93 if (err) { 94 netdev_warn(netdev, "failed to register for BPF offload\n"); 95 goto err_free; 96 } 97 98 if (offdev) 99 list_add(&ondev->offdev_netdevs, &offdev->netdevs); 100 return 0; 101 102 err_free: 103 kfree(ondev); 104 return err; 105 } 106 107 static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 108 { 109 struct bpf_prog_offload *offload = prog->aux->offload; 110 111 if (offload->dev_state) 112 offload->offdev->ops->destroy(prog); 113 114 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 115 bpf_prog_free_id(prog, true); 116 117 list_del_init(&offload->offloads); 118 kfree(offload); 119 prog->aux->offload = NULL; 120 } 121 122 static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 123 enum bpf_netdev_command cmd) 124 { 125 struct netdev_bpf data = {}; 126 struct net_device *netdev; 127 128 ASSERT_RTNL(); 129 130 data.command = cmd; 131 data.offmap = offmap; 132 /* Caller must make sure netdev is valid */ 133 netdev = offmap->netdev; 134 135 return netdev->netdev_ops->ndo_bpf(netdev, &data); 136 } 137 138 static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 139 { 140 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 141 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 142 bpf_map_free_id(&offmap->map, true); 143 list_del_init(&offmap->offloads); 144 offmap->netdev = NULL; 145 } 146 147 static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 148 struct net_device *netdev) 149 { 150 struct bpf_offload_netdev *ondev, *altdev = NULL; 151 struct bpf_offloaded_map *offmap, *mtmp; 152 struct bpf_prog_offload *offload, *ptmp; 153 154 ASSERT_RTNL(); 155 156 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 157 if (WARN_ON(!ondev)) 158 return; 159 160 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); 161 162 /* Try to move the objects to another netdev of the device */ 163 if (offdev) { 164 list_del(&ondev->offdev_netdevs); 165 altdev = list_first_entry_or_null(&offdev->netdevs, 166 struct bpf_offload_netdev, 167 offdev_netdevs); 168 } 169 170 if (altdev) { 171 list_for_each_entry(offload, &ondev->progs, offloads) 172 offload->netdev = altdev->netdev; 173 list_splice_init(&ondev->progs, &altdev->progs); 174 175 list_for_each_entry(offmap, &ondev->maps, offloads) 176 offmap->netdev = altdev->netdev; 177 list_splice_init(&ondev->maps, &altdev->maps); 178 } else { 179 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) 180 __bpf_prog_offload_destroy(offload->prog); 181 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) 182 __bpf_map_offload_destroy(offmap); 183 } 184 185 WARN_ON(!list_empty(&ondev->progs)); 186 WARN_ON(!list_empty(&ondev->maps)); 187 kfree(ondev); 188 } 189 190 static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev) 191 { 192 struct bpf_offload_netdev *ondev; 193 struct bpf_prog_offload *offload; 194 int err; 195 196 offload = kzalloc(sizeof(*offload), GFP_USER); 197 if (!offload) 198 return -ENOMEM; 199 200 offload->prog = prog; 201 offload->netdev = netdev; 202 203 ondev = bpf_offload_find_netdev(offload->netdev); 204 if (!ondev) { 205 if (bpf_prog_is_offloaded(prog->aux)) { 206 err = -EINVAL; 207 goto err_free; 208 } 209 210 /* When only binding to the device, explicitly 211 * create an entry in the hashtable. 212 */ 213 err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); 214 if (err) 215 goto err_free; 216 ondev = bpf_offload_find_netdev(offload->netdev); 217 } 218 offload->offdev = ondev->offdev; 219 prog->aux->offload = offload; 220 list_add_tail(&offload->offloads, &ondev->progs); 221 222 return 0; 223 err_free: 224 kfree(offload); 225 return err; 226 } 227 228 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) 229 { 230 struct net_device *netdev; 231 int err; 232 233 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 234 attr->prog_type != BPF_PROG_TYPE_XDP) 235 return -EINVAL; 236 237 if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) 238 return -EINVAL; 239 240 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && 241 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) 242 return -EINVAL; 243 244 netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex); 245 if (!netdev) 246 return -EINVAL; 247 248 err = bpf_dev_offload_check(netdev); 249 if (err) 250 goto out; 251 252 prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); 253 254 down_write(&bpf_devs_lock); 255 err = __bpf_prog_dev_bound_init(prog, netdev); 256 up_write(&bpf_devs_lock); 257 258 out: 259 dev_put(netdev); 260 return err; 261 } 262 263 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog) 264 { 265 int err; 266 267 if (!bpf_prog_is_dev_bound(old_prog->aux)) 268 return 0; 269 270 if (bpf_prog_is_offloaded(old_prog->aux)) 271 return -EINVAL; 272 273 new_prog->aux->dev_bound = old_prog->aux->dev_bound; 274 new_prog->aux->offload_requested = old_prog->aux->offload_requested; 275 276 down_write(&bpf_devs_lock); 277 if (!old_prog->aux->offload) { 278 err = -EINVAL; 279 goto out; 280 } 281 282 err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev); 283 284 out: 285 up_write(&bpf_devs_lock); 286 return err; 287 } 288 289 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) 290 { 291 struct bpf_prog_offload *offload; 292 int ret = -ENODEV; 293 294 down_read(&bpf_devs_lock); 295 offload = prog->aux->offload; 296 if (offload) { 297 ret = offload->offdev->ops->prepare(prog); 298 offload->dev_state = !ret; 299 } 300 up_read(&bpf_devs_lock); 301 302 return ret; 303 } 304 305 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 306 int insn_idx, int prev_insn_idx) 307 { 308 struct bpf_prog_offload *offload; 309 int ret = -ENODEV; 310 311 down_read(&bpf_devs_lock); 312 offload = env->prog->aux->offload; 313 if (offload) 314 ret = offload->offdev->ops->insn_hook(env, insn_idx, 315 prev_insn_idx); 316 up_read(&bpf_devs_lock); 317 318 return ret; 319 } 320 321 int bpf_prog_offload_finalize(struct bpf_verifier_env *env) 322 { 323 struct bpf_prog_offload *offload; 324 int ret = -ENODEV; 325 326 down_read(&bpf_devs_lock); 327 offload = env->prog->aux->offload; 328 if (offload) { 329 if (offload->offdev->ops->finalize) 330 ret = offload->offdev->ops->finalize(env); 331 else 332 ret = 0; 333 } 334 up_read(&bpf_devs_lock); 335 336 return ret; 337 } 338 339 void 340 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 341 struct bpf_insn *insn) 342 { 343 const struct bpf_prog_offload_ops *ops; 344 struct bpf_prog_offload *offload; 345 int ret = -EOPNOTSUPP; 346 347 down_read(&bpf_devs_lock); 348 offload = env->prog->aux->offload; 349 if (offload) { 350 ops = offload->offdev->ops; 351 if (!offload->opt_failed && ops->replace_insn) 352 ret = ops->replace_insn(env, off, insn); 353 offload->opt_failed |= ret; 354 } 355 up_read(&bpf_devs_lock); 356 } 357 358 void 359 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 360 { 361 struct bpf_prog_offload *offload; 362 int ret = -EOPNOTSUPP; 363 364 down_read(&bpf_devs_lock); 365 offload = env->prog->aux->offload; 366 if (offload) { 367 if (!offload->opt_failed && offload->offdev->ops->remove_insns) 368 ret = offload->offdev->ops->remove_insns(env, off, cnt); 369 offload->opt_failed |= ret; 370 } 371 up_read(&bpf_devs_lock); 372 } 373 374 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) 375 { 376 struct bpf_offload_netdev *ondev; 377 struct net_device *netdev; 378 379 rtnl_lock(); 380 down_write(&bpf_devs_lock); 381 if (prog->aux->offload) { 382 list_del_init(&prog->aux->offload->offloads); 383 384 netdev = prog->aux->offload->netdev; 385 __bpf_prog_offload_destroy(prog); 386 387 ondev = bpf_offload_find_netdev(netdev); 388 if (!ondev->offdev && list_empty(&ondev->progs)) 389 __bpf_offload_dev_netdev_unregister(NULL, netdev); 390 } 391 up_write(&bpf_devs_lock); 392 rtnl_unlock(); 393 } 394 395 static int bpf_prog_offload_translate(struct bpf_prog *prog) 396 { 397 struct bpf_prog_offload *offload; 398 int ret = -ENODEV; 399 400 down_read(&bpf_devs_lock); 401 offload = prog->aux->offload; 402 if (offload) 403 ret = offload->offdev->ops->translate(prog); 404 up_read(&bpf_devs_lock); 405 406 return ret; 407 } 408 409 static unsigned int bpf_prog_warn_on_exec(const void *ctx, 410 const struct bpf_insn *insn) 411 { 412 WARN(1, "attempt to execute device eBPF program on the host!"); 413 return 0; 414 } 415 416 int bpf_prog_offload_compile(struct bpf_prog *prog) 417 { 418 prog->bpf_func = bpf_prog_warn_on_exec; 419 420 return bpf_prog_offload_translate(prog); 421 } 422 423 struct ns_get_path_bpf_prog_args { 424 struct bpf_prog *prog; 425 struct bpf_prog_info *info; 426 }; 427 428 static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) 429 { 430 struct ns_get_path_bpf_prog_args *args = private_data; 431 struct bpf_prog_aux *aux = args->prog->aux; 432 struct ns_common *ns; 433 struct net *net; 434 435 rtnl_lock(); 436 down_read(&bpf_devs_lock); 437 438 if (aux->offload) { 439 args->info->ifindex = aux->offload->netdev->ifindex; 440 net = dev_net(aux->offload->netdev); 441 get_net(net); 442 ns = &net->ns; 443 } else { 444 args->info->ifindex = 0; 445 ns = NULL; 446 } 447 448 up_read(&bpf_devs_lock); 449 rtnl_unlock(); 450 451 return ns; 452 } 453 454 int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 455 struct bpf_prog *prog) 456 { 457 struct ns_get_path_bpf_prog_args args = { 458 .prog = prog, 459 .info = info, 460 }; 461 struct bpf_prog_aux *aux = prog->aux; 462 struct inode *ns_inode; 463 struct path ns_path; 464 char __user *uinsns; 465 int res; 466 u32 ulen; 467 468 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); 469 if (res) { 470 if (!info->ifindex) 471 return -ENODEV; 472 return res; 473 } 474 475 down_read(&bpf_devs_lock); 476 477 if (!aux->offload) { 478 up_read(&bpf_devs_lock); 479 return -ENODEV; 480 } 481 482 ulen = info->jited_prog_len; 483 info->jited_prog_len = aux->offload->jited_len; 484 if (info->jited_prog_len && ulen) { 485 uinsns = u64_to_user_ptr(info->jited_prog_insns); 486 ulen = min_t(u32, info->jited_prog_len, ulen); 487 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { 488 up_read(&bpf_devs_lock); 489 return -EFAULT; 490 } 491 } 492 493 up_read(&bpf_devs_lock); 494 495 ns_inode = ns_path.dentry->d_inode; 496 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 497 info->netns_ino = ns_inode->i_ino; 498 path_put(&ns_path); 499 500 return 0; 501 } 502 503 const struct bpf_prog_ops bpf_offload_prog_ops = { 504 }; 505 506 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 507 { 508 struct net *net = current->nsproxy->net_ns; 509 struct bpf_offload_netdev *ondev; 510 struct bpf_offloaded_map *offmap; 511 int err; 512 513 if (!capable(CAP_SYS_ADMIN)) 514 return ERR_PTR(-EPERM); 515 if (attr->map_type != BPF_MAP_TYPE_ARRAY && 516 attr->map_type != BPF_MAP_TYPE_HASH) 517 return ERR_PTR(-EINVAL); 518 519 offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE); 520 if (!offmap) 521 return ERR_PTR(-ENOMEM); 522 523 bpf_map_init_from_attr(&offmap->map, attr); 524 525 rtnl_lock(); 526 down_write(&bpf_devs_lock); 527 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); 528 err = bpf_dev_offload_check(offmap->netdev); 529 if (err) 530 goto err_unlock; 531 532 ondev = bpf_offload_find_netdev(offmap->netdev); 533 if (!ondev) { 534 err = -EINVAL; 535 goto err_unlock; 536 } 537 538 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); 539 if (err) 540 goto err_unlock; 541 542 list_add_tail(&offmap->offloads, &ondev->maps); 543 up_write(&bpf_devs_lock); 544 rtnl_unlock(); 545 546 return &offmap->map; 547 548 err_unlock: 549 up_write(&bpf_devs_lock); 550 rtnl_unlock(); 551 bpf_map_area_free(offmap); 552 return ERR_PTR(err); 553 } 554 555 void bpf_map_offload_map_free(struct bpf_map *map) 556 { 557 struct bpf_offloaded_map *offmap = map_to_offmap(map); 558 559 rtnl_lock(); 560 down_write(&bpf_devs_lock); 561 if (offmap->netdev) 562 __bpf_map_offload_destroy(offmap); 563 up_write(&bpf_devs_lock); 564 rtnl_unlock(); 565 566 bpf_map_area_free(offmap); 567 } 568 569 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) 570 { 571 struct bpf_offloaded_map *offmap = map_to_offmap(map); 572 int ret = -ENODEV; 573 574 down_read(&bpf_devs_lock); 575 if (offmap->netdev) 576 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); 577 up_read(&bpf_devs_lock); 578 579 return ret; 580 } 581 582 int bpf_map_offload_update_elem(struct bpf_map *map, 583 void *key, void *value, u64 flags) 584 { 585 struct bpf_offloaded_map *offmap = map_to_offmap(map); 586 int ret = -ENODEV; 587 588 if (unlikely(flags > BPF_EXIST)) 589 return -EINVAL; 590 591 down_read(&bpf_devs_lock); 592 if (offmap->netdev) 593 ret = offmap->dev_ops->map_update_elem(offmap, key, value, 594 flags); 595 up_read(&bpf_devs_lock); 596 597 return ret; 598 } 599 600 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) 601 { 602 struct bpf_offloaded_map *offmap = map_to_offmap(map); 603 int ret = -ENODEV; 604 605 down_read(&bpf_devs_lock); 606 if (offmap->netdev) 607 ret = offmap->dev_ops->map_delete_elem(offmap, key); 608 up_read(&bpf_devs_lock); 609 610 return ret; 611 } 612 613 int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) 614 { 615 struct bpf_offloaded_map *offmap = map_to_offmap(map); 616 int ret = -ENODEV; 617 618 down_read(&bpf_devs_lock); 619 if (offmap->netdev) 620 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); 621 up_read(&bpf_devs_lock); 622 623 return ret; 624 } 625 626 struct ns_get_path_bpf_map_args { 627 struct bpf_offloaded_map *offmap; 628 struct bpf_map_info *info; 629 }; 630 631 static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) 632 { 633 struct ns_get_path_bpf_map_args *args = private_data; 634 struct ns_common *ns; 635 struct net *net; 636 637 rtnl_lock(); 638 down_read(&bpf_devs_lock); 639 640 if (args->offmap->netdev) { 641 args->info->ifindex = args->offmap->netdev->ifindex; 642 net = dev_net(args->offmap->netdev); 643 get_net(net); 644 ns = &net->ns; 645 } else { 646 args->info->ifindex = 0; 647 ns = NULL; 648 } 649 650 up_read(&bpf_devs_lock); 651 rtnl_unlock(); 652 653 return ns; 654 } 655 656 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) 657 { 658 struct ns_get_path_bpf_map_args args = { 659 .offmap = map_to_offmap(map), 660 .info = info, 661 }; 662 struct inode *ns_inode; 663 struct path ns_path; 664 int res; 665 666 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); 667 if (res) { 668 if (!info->ifindex) 669 return -ENODEV; 670 return res; 671 } 672 673 ns_inode = ns_path.dentry->d_inode; 674 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 675 info->netns_ino = ns_inode->i_ino; 676 path_put(&ns_path); 677 678 return 0; 679 } 680 681 static bool __bpf_offload_dev_match(struct bpf_prog *prog, 682 struct net_device *netdev) 683 { 684 struct bpf_offload_netdev *ondev1, *ondev2; 685 struct bpf_prog_offload *offload; 686 687 if (!bpf_prog_is_dev_bound(prog->aux)) 688 return false; 689 690 offload = prog->aux->offload; 691 if (!offload) 692 return false; 693 if (offload->netdev == netdev) 694 return true; 695 696 ondev1 = bpf_offload_find_netdev(offload->netdev); 697 ondev2 = bpf_offload_find_netdev(netdev); 698 699 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; 700 } 701 702 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) 703 { 704 bool ret; 705 706 down_read(&bpf_devs_lock); 707 ret = __bpf_offload_dev_match(prog, netdev); 708 up_read(&bpf_devs_lock); 709 710 return ret; 711 } 712 EXPORT_SYMBOL_GPL(bpf_offload_dev_match); 713 714 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) 715 { 716 bool ret; 717 718 if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux)) 719 return false; 720 721 down_read(&bpf_devs_lock); 722 ret = lhs->aux->offload && rhs->aux->offload && 723 lhs->aux->offload->netdev && 724 lhs->aux->offload->netdev == rhs->aux->offload->netdev; 725 up_read(&bpf_devs_lock); 726 727 return ret; 728 } 729 730 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) 731 { 732 struct bpf_offloaded_map *offmap; 733 bool ret; 734 735 if (!bpf_map_is_offloaded(map)) 736 return bpf_map_offload_neutral(map); 737 offmap = map_to_offmap(map); 738 739 down_read(&bpf_devs_lock); 740 ret = __bpf_offload_dev_match(prog, offmap->netdev); 741 up_read(&bpf_devs_lock); 742 743 return ret; 744 } 745 746 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 747 struct net_device *netdev) 748 { 749 int err; 750 751 down_write(&bpf_devs_lock); 752 err = __bpf_offload_dev_netdev_register(offdev, netdev); 753 up_write(&bpf_devs_lock); 754 return err; 755 } 756 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); 757 758 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 759 struct net_device *netdev) 760 { 761 down_write(&bpf_devs_lock); 762 __bpf_offload_dev_netdev_unregister(offdev, netdev); 763 up_write(&bpf_devs_lock); 764 } 765 EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); 766 767 struct bpf_offload_dev * 768 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) 769 { 770 struct bpf_offload_dev *offdev; 771 772 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); 773 if (!offdev) 774 return ERR_PTR(-ENOMEM); 775 776 offdev->ops = ops; 777 offdev->priv = priv; 778 INIT_LIST_HEAD(&offdev->netdevs); 779 780 return offdev; 781 } 782 EXPORT_SYMBOL_GPL(bpf_offload_dev_create); 783 784 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) 785 { 786 WARN_ON(!list_empty(&offdev->netdevs)); 787 kfree(offdev); 788 } 789 EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); 790 791 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) 792 { 793 return offdev->priv; 794 } 795 EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); 796 797 void bpf_dev_bound_netdev_unregister(struct net_device *dev) 798 { 799 struct bpf_offload_netdev *ondev; 800 801 ASSERT_RTNL(); 802 803 down_write(&bpf_devs_lock); 804 ondev = bpf_offload_find_netdev(dev); 805 if (ondev && !ondev->offdev) 806 __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev); 807 up_write(&bpf_devs_lock); 808 } 809 810 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 811 struct bpf_prog_aux *prog_aux) 812 { 813 if (!bpf_prog_is_dev_bound(prog_aux)) { 814 bpf_log(log, "metadata kfuncs require device-bound program\n"); 815 return -EINVAL; 816 } 817 818 if (bpf_prog_is_offloaded(prog_aux)) { 819 bpf_log(log, "metadata kfuncs can't be offloaded\n"); 820 return -EINVAL; 821 } 822 823 return 0; 824 } 825 826 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id) 827 { 828 const struct xdp_metadata_ops *ops; 829 void *p = NULL; 830 831 /* We don't hold bpf_devs_lock while resolving several 832 * kfuncs and can race with the unregister_netdevice(). 833 * We rely on bpf_dev_bound_match() check at attach 834 * to render this program unusable. 835 */ 836 down_read(&bpf_devs_lock); 837 if (!prog->aux->offload) 838 goto out; 839 840 ops = prog->aux->offload->netdev->xdp_metadata_ops; 841 if (!ops) 842 goto out; 843 844 if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP)) 845 p = ops->xmo_rx_timestamp; 846 else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH)) 847 p = ops->xmo_rx_hash; 848 out: 849 up_read(&bpf_devs_lock); 850 851 return p; 852 } 853 854 static int __init bpf_offload_init(void) 855 { 856 return rhashtable_init(&offdevs, &offdevs_params); 857 } 858 859 late_initcall(bpf_offload_init); 860