1a39e17b2SJakub Kicinski /* 20cd3cbedSJakub Kicinski * Copyright (C) 2017-2018 Netronome Systems, Inc. 3a39e17b2SJakub Kicinski * 4a39e17b2SJakub Kicinski * This software is licensed under the GNU General License Version 2, 5a39e17b2SJakub Kicinski * June 1991 as shown in the file COPYING in the top-level directory of this 6a39e17b2SJakub Kicinski * source tree. 7a39e17b2SJakub Kicinski * 8a39e17b2SJakub Kicinski * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" 9a39e17b2SJakub Kicinski * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, 10a39e17b2SJakub Kicinski * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 11a39e17b2SJakub Kicinski * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE 12a39e17b2SJakub Kicinski * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME 13a39e17b2SJakub Kicinski * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 14a39e17b2SJakub Kicinski */ 15a39e17b2SJakub Kicinski 16ab3f0063SJakub Kicinski #include <linux/bpf.h> 17ab3f0063SJakub Kicinski #include <linux/bpf_verifier.h> 18ab3f0063SJakub Kicinski #include <linux/bug.h> 19675fc275SJakub Kicinski #include <linux/kdev_t.h> 20ab3f0063SJakub Kicinski #include <linux/list.h> 219fd7c555SJakub Kicinski #include <linux/lockdep.h> 22ab3f0063SJakub Kicinski #include <linux/netdevice.h> 23ab3f0063SJakub Kicinski #include <linux/printk.h> 24675fc275SJakub Kicinski #include <linux/proc_ns.h> 259fd7c555SJakub Kicinski #include <linux/rhashtable.h> 26ab3f0063SJakub Kicinski #include <linux/rtnetlink.h> 27e0d3974aSJakub Kicinski #include <linux/rwsem.h> 28ab3f0063SJakub Kicinski 299fd7c555SJakub Kicinski /* Protects offdevs, members of bpf_offload_netdev and offload members 30a3884572SJakub Kicinski * of all progs. 31e0d3974aSJakub Kicinski * RTNL lock cannot be taken when holding this lock. 32e0d3974aSJakub Kicinski */ 33e0d3974aSJakub Kicinski static DECLARE_RWSEM(bpf_devs_lock); 349fd7c555SJakub Kicinski 35602144c2SJakub Kicinski struct bpf_offload_dev { 361385d755SQuentin Monnet const struct bpf_prog_offload_ops *ops; 37602144c2SJakub Kicinski struct list_head netdevs; 38dd27c2e3SJakub Kicinski void *priv; 39602144c2SJakub Kicinski }; 40602144c2SJakub Kicinski 419fd7c555SJakub Kicinski struct bpf_offload_netdev { 429fd7c555SJakub Kicinski struct rhash_head l; 439fd7c555SJakub Kicinski struct net_device *netdev; 442b3486bcSStanislav Fomichev struct bpf_offload_dev *offdev; /* NULL when bound-only */ 459fd7c555SJakub Kicinski struct list_head progs; 469fd7c555SJakub Kicinski struct list_head maps; 47602144c2SJakub Kicinski struct list_head offdev_netdevs; 489fd7c555SJakub Kicinski }; 499fd7c555SJakub Kicinski 509fd7c555SJakub Kicinski static const struct rhashtable_params offdevs_params = { 519fd7c555SJakub Kicinski .nelem_hint = 4, 529fd7c555SJakub Kicinski .key_len = sizeof(struct net_device *), 539fd7c555SJakub Kicinski .key_offset = offsetof(struct bpf_offload_netdev, netdev), 549fd7c555SJakub Kicinski .head_offset = offsetof(struct bpf_offload_netdev, l), 559fd7c555SJakub Kicinski .automatic_shrinking = true, 569fd7c555SJakub Kicinski }; 579fd7c555SJakub Kicinski 589fd7c555SJakub Kicinski static struct rhashtable offdevs; 59ab3f0063SJakub Kicinski 605bc2d55cSJakub Kicinski static int bpf_dev_offload_check(struct net_device *netdev) 615bc2d55cSJakub Kicinski { 625bc2d55cSJakub Kicinski if (!netdev) 635bc2d55cSJakub Kicinski return -EINVAL; 645bc2d55cSJakub Kicinski if (!netdev->netdev_ops->ndo_bpf) 655bc2d55cSJakub Kicinski return -EOPNOTSUPP; 665bc2d55cSJakub Kicinski return 0; 675bc2d55cSJakub Kicinski } 685bc2d55cSJakub Kicinski 699fd7c555SJakub Kicinski static struct bpf_offload_netdev * 709fd7c555SJakub Kicinski bpf_offload_find_netdev(struct net_device *netdev) 719fd7c555SJakub Kicinski { 729fd7c555SJakub Kicinski lockdep_assert_held(&bpf_devs_lock); 739fd7c555SJakub Kicinski 749fd7c555SJakub Kicinski return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 759fd7c555SJakub Kicinski } 769fd7c555SJakub Kicinski 7789bbc53aSStanislav Fomichev static int __bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 7889bbc53aSStanislav Fomichev struct net_device *netdev) 7989bbc53aSStanislav Fomichev { 8089bbc53aSStanislav Fomichev struct bpf_offload_netdev *ondev; 8189bbc53aSStanislav Fomichev int err; 8289bbc53aSStanislav Fomichev 8389bbc53aSStanislav Fomichev ondev = kzalloc(sizeof(*ondev), GFP_KERNEL); 8489bbc53aSStanislav Fomichev if (!ondev) 8589bbc53aSStanislav Fomichev return -ENOMEM; 8689bbc53aSStanislav Fomichev 8789bbc53aSStanislav Fomichev ondev->netdev = netdev; 8889bbc53aSStanislav Fomichev ondev->offdev = offdev; 8989bbc53aSStanislav Fomichev INIT_LIST_HEAD(&ondev->progs); 9089bbc53aSStanislav Fomichev INIT_LIST_HEAD(&ondev->maps); 9189bbc53aSStanislav Fomichev 9289bbc53aSStanislav Fomichev err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params); 9389bbc53aSStanislav Fomichev if (err) { 9489bbc53aSStanislav Fomichev netdev_warn(netdev, "failed to register for BPF offload\n"); 952b3486bcSStanislav Fomichev goto err_free; 9689bbc53aSStanislav Fomichev } 9789bbc53aSStanislav Fomichev 982b3486bcSStanislav Fomichev if (offdev) 9989bbc53aSStanislav Fomichev list_add(&ondev->offdev_netdevs, &offdev->netdevs); 10089bbc53aSStanislav Fomichev return 0; 10189bbc53aSStanislav Fomichev 1022b3486bcSStanislav Fomichev err_free: 10389bbc53aSStanislav Fomichev kfree(ondev); 10489bbc53aSStanislav Fomichev return err; 10589bbc53aSStanislav Fomichev } 10689bbc53aSStanislav Fomichev 10789bbc53aSStanislav Fomichev static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 10889bbc53aSStanislav Fomichev { 10989bbc53aSStanislav Fomichev struct bpf_prog_offload *offload = prog->aux->offload; 11089bbc53aSStanislav Fomichev 11189bbc53aSStanislav Fomichev if (offload->dev_state) 11289bbc53aSStanislav Fomichev offload->offdev->ops->destroy(prog); 11389bbc53aSStanislav Fomichev 11489bbc53aSStanislav Fomichev /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 11589bbc53aSStanislav Fomichev bpf_prog_free_id(prog, true); 11689bbc53aSStanislav Fomichev 11789bbc53aSStanislav Fomichev list_del_init(&offload->offloads); 11889bbc53aSStanislav Fomichev kfree(offload); 11989bbc53aSStanislav Fomichev prog->aux->offload = NULL; 12089bbc53aSStanislav Fomichev } 12189bbc53aSStanislav Fomichev 12289bbc53aSStanislav Fomichev static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 12389bbc53aSStanislav Fomichev enum bpf_netdev_command cmd) 12489bbc53aSStanislav Fomichev { 12589bbc53aSStanislav Fomichev struct netdev_bpf data = {}; 12689bbc53aSStanislav Fomichev struct net_device *netdev; 12789bbc53aSStanislav Fomichev 12889bbc53aSStanislav Fomichev ASSERT_RTNL(); 12989bbc53aSStanislav Fomichev 13089bbc53aSStanislav Fomichev data.command = cmd; 13189bbc53aSStanislav Fomichev data.offmap = offmap; 13289bbc53aSStanislav Fomichev /* Caller must make sure netdev is valid */ 13389bbc53aSStanislav Fomichev netdev = offmap->netdev; 13489bbc53aSStanislav Fomichev 13589bbc53aSStanislav Fomichev return netdev->netdev_ops->ndo_bpf(netdev, &data); 13689bbc53aSStanislav Fomichev } 13789bbc53aSStanislav Fomichev 13889bbc53aSStanislav Fomichev static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 13989bbc53aSStanislav Fomichev { 14089bbc53aSStanislav Fomichev WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 14189bbc53aSStanislav Fomichev /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 14289bbc53aSStanislav Fomichev bpf_map_free_id(&offmap->map, true); 14389bbc53aSStanislav Fomichev list_del_init(&offmap->offloads); 14489bbc53aSStanislav Fomichev offmap->netdev = NULL; 14589bbc53aSStanislav Fomichev } 14689bbc53aSStanislav Fomichev 14789bbc53aSStanislav Fomichev static void __bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 14889bbc53aSStanislav Fomichev struct net_device *netdev) 14989bbc53aSStanislav Fomichev { 1502b3486bcSStanislav Fomichev struct bpf_offload_netdev *ondev, *altdev = NULL; 15189bbc53aSStanislav Fomichev struct bpf_offloaded_map *offmap, *mtmp; 15289bbc53aSStanislav Fomichev struct bpf_prog_offload *offload, *ptmp; 15389bbc53aSStanislav Fomichev 15489bbc53aSStanislav Fomichev ASSERT_RTNL(); 15589bbc53aSStanislav Fomichev 15689bbc53aSStanislav Fomichev ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params); 15789bbc53aSStanislav Fomichev if (WARN_ON(!ondev)) 1582b3486bcSStanislav Fomichev return; 15989bbc53aSStanislav Fomichev 16089bbc53aSStanislav Fomichev WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params)); 16189bbc53aSStanislav Fomichev 16289bbc53aSStanislav Fomichev /* Try to move the objects to another netdev of the device */ 1632b3486bcSStanislav Fomichev if (offdev) { 1642b3486bcSStanislav Fomichev list_del(&ondev->offdev_netdevs); 16589bbc53aSStanislav Fomichev altdev = list_first_entry_or_null(&offdev->netdevs, 16689bbc53aSStanislav Fomichev struct bpf_offload_netdev, 16789bbc53aSStanislav Fomichev offdev_netdevs); 1682b3486bcSStanislav Fomichev } 1692b3486bcSStanislav Fomichev 17089bbc53aSStanislav Fomichev if (altdev) { 17189bbc53aSStanislav Fomichev list_for_each_entry(offload, &ondev->progs, offloads) 17289bbc53aSStanislav Fomichev offload->netdev = altdev->netdev; 17389bbc53aSStanislav Fomichev list_splice_init(&ondev->progs, &altdev->progs); 17489bbc53aSStanislav Fomichev 17589bbc53aSStanislav Fomichev list_for_each_entry(offmap, &ondev->maps, offloads) 17689bbc53aSStanislav Fomichev offmap->netdev = altdev->netdev; 17789bbc53aSStanislav Fomichev list_splice_init(&ondev->maps, &altdev->maps); 17889bbc53aSStanislav Fomichev } else { 17989bbc53aSStanislav Fomichev list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) 18089bbc53aSStanislav Fomichev __bpf_prog_offload_destroy(offload->prog); 18189bbc53aSStanislav Fomichev list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads) 18289bbc53aSStanislav Fomichev __bpf_map_offload_destroy(offmap); 18389bbc53aSStanislav Fomichev } 18489bbc53aSStanislav Fomichev 18589bbc53aSStanislav Fomichev WARN_ON(!list_empty(&ondev->progs)); 18689bbc53aSStanislav Fomichev WARN_ON(!list_empty(&ondev->maps)); 18789bbc53aSStanislav Fomichev kfree(ondev); 18889bbc53aSStanislav Fomichev } 18989bbc53aSStanislav Fomichev 1902b3486bcSStanislav Fomichev int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr) 191ab3f0063SJakub Kicinski { 1929fd7c555SJakub Kicinski struct bpf_offload_netdev *ondev; 1930a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 1945bc2d55cSJakub Kicinski int err; 195ab3f0063SJakub Kicinski 196649f11dcSJakub Kicinski if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 197649f11dcSJakub Kicinski attr->prog_type != BPF_PROG_TYPE_XDP) 198649f11dcSJakub Kicinski return -EINVAL; 199ab3f0063SJakub Kicinski 2002b3486bcSStanislav Fomichev if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY) 2012b3486bcSStanislav Fomichev return -EINVAL; 2022b3486bcSStanislav Fomichev 2032b3486bcSStanislav Fomichev if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS && 2042b3486bcSStanislav Fomichev attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY) 205ab3f0063SJakub Kicinski return -EINVAL; 206ab3f0063SJakub Kicinski 207ab3f0063SJakub Kicinski offload = kzalloc(sizeof(*offload), GFP_USER); 208ab3f0063SJakub Kicinski if (!offload) 209ab3f0063SJakub Kicinski return -ENOMEM; 210ab3f0063SJakub Kicinski 211ab3f0063SJakub Kicinski offload->prog = prog; 212ab3f0063SJakub Kicinski 213e0d3974aSJakub Kicinski offload->netdev = dev_get_by_index(current->nsproxy->net_ns, 214e0d3974aSJakub Kicinski attr->prog_ifindex); 2155bc2d55cSJakub Kicinski err = bpf_dev_offload_check(offload->netdev); 2165bc2d55cSJakub Kicinski if (err) 2175bc2d55cSJakub Kicinski goto err_maybe_put; 218ab3f0063SJakub Kicinski 2192b3486bcSStanislav Fomichev prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY); 2202b3486bcSStanislav Fomichev 221e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 2229fd7c555SJakub Kicinski ondev = bpf_offload_find_netdev(offload->netdev); 2239fd7c555SJakub Kicinski if (!ondev) { 2242b3486bcSStanislav Fomichev if (bpf_prog_is_offloaded(prog->aux)) { 2255bc2d55cSJakub Kicinski err = -EINVAL; 226e0d3974aSJakub Kicinski goto err_unlock; 2275bc2d55cSJakub Kicinski } 2282b3486bcSStanislav Fomichev 2292b3486bcSStanislav Fomichev /* When only binding to the device, explicitly 2302b3486bcSStanislav Fomichev * create an entry in the hashtable. 2312b3486bcSStanislav Fomichev */ 2322b3486bcSStanislav Fomichev err = __bpf_offload_dev_netdev_register(NULL, offload->netdev); 2332b3486bcSStanislav Fomichev if (err) 2342b3486bcSStanislav Fomichev goto err_unlock; 2352b3486bcSStanislav Fomichev ondev = bpf_offload_find_netdev(offload->netdev); 2362b3486bcSStanislav Fomichev } 237341b3e7bSQuentin Monnet offload->offdev = ondev->offdev; 238ab3f0063SJakub Kicinski prog->aux->offload = offload; 2399fd7c555SJakub Kicinski list_add_tail(&offload->offloads, &ondev->progs); 240e0d3974aSJakub Kicinski dev_put(offload->netdev); 241e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 242ab3f0063SJakub Kicinski 243ab3f0063SJakub Kicinski return 0; 244e0d3974aSJakub Kicinski err_unlock: 245e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 2465bc2d55cSJakub Kicinski err_maybe_put: 2475bc2d55cSJakub Kicinski if (offload->netdev) 248e0d3974aSJakub Kicinski dev_put(offload->netdev); 249e0d3974aSJakub Kicinski kfree(offload); 2505bc2d55cSJakub Kicinski return err; 251ab3f0063SJakub Kicinski } 252ab3f0063SJakub Kicinski 253a40a2632SQuentin Monnet int bpf_prog_offload_verifier_prep(struct bpf_prog *prog) 254ab3f0063SJakub Kicinski { 25500db12c3SQuentin Monnet struct bpf_prog_offload *offload; 25600db12c3SQuentin Monnet int ret = -ENODEV; 257ab3f0063SJakub Kicinski 25800db12c3SQuentin Monnet down_read(&bpf_devs_lock); 259a40a2632SQuentin Monnet offload = prog->aux->offload; 260592ee43fSColin Ian King if (offload) { 26116a8cb5cSQuentin Monnet ret = offload->offdev->ops->prepare(prog); 26200db12c3SQuentin Monnet offload->dev_state = !ret; 263592ee43fSColin Ian King } 26400db12c3SQuentin Monnet up_read(&bpf_devs_lock); 265ab3f0063SJakub Kicinski 26600db12c3SQuentin Monnet return ret; 267ab3f0063SJakub Kicinski } 268ab3f0063SJakub Kicinski 269cae1927cSJakub Kicinski int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 270cae1927cSJakub Kicinski int insn_idx, int prev_insn_idx) 271cae1927cSJakub Kicinski { 2720a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 273cae1927cSJakub Kicinski int ret = -ENODEV; 274cae1927cSJakub Kicinski 275cae1927cSJakub Kicinski down_read(&bpf_devs_lock); 276cae1927cSJakub Kicinski offload = env->prog->aux->offload; 277ce3b9db4SJakub Kicinski if (offload) 278341b3e7bSQuentin Monnet ret = offload->offdev->ops->insn_hook(env, insn_idx, 279341b3e7bSQuentin Monnet prev_insn_idx); 280cae1927cSJakub Kicinski up_read(&bpf_devs_lock); 281cae1927cSJakub Kicinski 282cae1927cSJakub Kicinski return ret; 283cae1927cSJakub Kicinski } 284cae1927cSJakub Kicinski 285c941ce9cSQuentin Monnet int bpf_prog_offload_finalize(struct bpf_verifier_env *env) 286c941ce9cSQuentin Monnet { 287c941ce9cSQuentin Monnet struct bpf_prog_offload *offload; 288c941ce9cSQuentin Monnet int ret = -ENODEV; 289c941ce9cSQuentin Monnet 290c941ce9cSQuentin Monnet down_read(&bpf_devs_lock); 291c941ce9cSQuentin Monnet offload = env->prog->aux->offload; 292c941ce9cSQuentin Monnet if (offload) { 2936dc18fa6SQuentin Monnet if (offload->offdev->ops->finalize) 2946dc18fa6SQuentin Monnet ret = offload->offdev->ops->finalize(env); 295c941ce9cSQuentin Monnet else 296c941ce9cSQuentin Monnet ret = 0; 297c941ce9cSQuentin Monnet } 298c941ce9cSQuentin Monnet up_read(&bpf_devs_lock); 299c941ce9cSQuentin Monnet 300c941ce9cSQuentin Monnet return ret; 301c941ce9cSQuentin Monnet } 302c941ce9cSQuentin Monnet 30308ca90afSJakub Kicinski void 30408ca90afSJakub Kicinski bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 30508ca90afSJakub Kicinski struct bpf_insn *insn) 30608ca90afSJakub Kicinski { 30708ca90afSJakub Kicinski const struct bpf_prog_offload_ops *ops; 30808ca90afSJakub Kicinski struct bpf_prog_offload *offload; 30908ca90afSJakub Kicinski int ret = -EOPNOTSUPP; 31008ca90afSJakub Kicinski 31108ca90afSJakub Kicinski down_read(&bpf_devs_lock); 31208ca90afSJakub Kicinski offload = env->prog->aux->offload; 31308ca90afSJakub Kicinski if (offload) { 31408ca90afSJakub Kicinski ops = offload->offdev->ops; 31508ca90afSJakub Kicinski if (!offload->opt_failed && ops->replace_insn) 31608ca90afSJakub Kicinski ret = ops->replace_insn(env, off, insn); 31708ca90afSJakub Kicinski offload->opt_failed |= ret; 31808ca90afSJakub Kicinski } 31908ca90afSJakub Kicinski up_read(&bpf_devs_lock); 32008ca90afSJakub Kicinski } 32108ca90afSJakub Kicinski 32208ca90afSJakub Kicinski void 32308ca90afSJakub Kicinski bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 32408ca90afSJakub Kicinski { 32508ca90afSJakub Kicinski struct bpf_prog_offload *offload; 32608ca90afSJakub Kicinski int ret = -EOPNOTSUPP; 32708ca90afSJakub Kicinski 32808ca90afSJakub Kicinski down_read(&bpf_devs_lock); 32908ca90afSJakub Kicinski offload = env->prog->aux->offload; 33008ca90afSJakub Kicinski if (offload) { 33108ca90afSJakub Kicinski if (!offload->opt_failed && offload->offdev->ops->remove_insns) 33208ca90afSJakub Kicinski ret = offload->offdev->ops->remove_insns(env, off, cnt); 33308ca90afSJakub Kicinski offload->opt_failed |= ret; 33408ca90afSJakub Kicinski } 33508ca90afSJakub Kicinski up_read(&bpf_devs_lock); 33608ca90afSJakub Kicinski } 33708ca90afSJakub Kicinski 3382b3486bcSStanislav Fomichev void bpf_prog_dev_bound_destroy(struct bpf_prog *prog) 339ab3f0063SJakub Kicinski { 3402b3486bcSStanislav Fomichev struct bpf_offload_netdev *ondev; 3412b3486bcSStanislav Fomichev struct net_device *netdev; 3422b3486bcSStanislav Fomichev 3432b3486bcSStanislav Fomichev rtnl_lock(); 344e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 3452b3486bcSStanislav Fomichev if (prog->aux->offload) { 3462b3486bcSStanislav Fomichev list_del_init(&prog->aux->offload->offloads); 3472b3486bcSStanislav Fomichev 3482b3486bcSStanislav Fomichev netdev = prog->aux->offload->netdev; 349ab3f0063SJakub Kicinski __bpf_prog_offload_destroy(prog); 3502b3486bcSStanislav Fomichev 3512b3486bcSStanislav Fomichev ondev = bpf_offload_find_netdev(netdev); 3522b3486bcSStanislav Fomichev if (!ondev->offdev && list_empty(&ondev->progs)) 3532b3486bcSStanislav Fomichev __bpf_offload_dev_netdev_unregister(NULL, netdev); 3542b3486bcSStanislav Fomichev } 355e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 3562b3486bcSStanislav Fomichev rtnl_unlock(); 357ab3f0063SJakub Kicinski } 358ab3f0063SJakub Kicinski 359ab3f0063SJakub Kicinski static int bpf_prog_offload_translate(struct bpf_prog *prog) 360ab3f0063SJakub Kicinski { 361b07ade27SQuentin Monnet struct bpf_prog_offload *offload; 362b07ade27SQuentin Monnet int ret = -ENODEV; 363ab3f0063SJakub Kicinski 364b07ade27SQuentin Monnet down_read(&bpf_devs_lock); 365b07ade27SQuentin Monnet offload = prog->aux->offload; 366b07ade27SQuentin Monnet if (offload) 36716a8cb5cSQuentin Monnet ret = offload->offdev->ops->translate(prog); 368b07ade27SQuentin Monnet up_read(&bpf_devs_lock); 369ab3f0063SJakub Kicinski 370ab3f0063SJakub Kicinski return ret; 371ab3f0063SJakub Kicinski } 372ab3f0063SJakub Kicinski 373ab3f0063SJakub Kicinski static unsigned int bpf_prog_warn_on_exec(const void *ctx, 374ab3f0063SJakub Kicinski const struct bpf_insn *insn) 375ab3f0063SJakub Kicinski { 376ab3f0063SJakub Kicinski WARN(1, "attempt to execute device eBPF program on the host!"); 377ab3f0063SJakub Kicinski return 0; 378ab3f0063SJakub Kicinski } 379ab3f0063SJakub Kicinski 380ab3f0063SJakub Kicinski int bpf_prog_offload_compile(struct bpf_prog *prog) 381ab3f0063SJakub Kicinski { 382ab3f0063SJakub Kicinski prog->bpf_func = bpf_prog_warn_on_exec; 383ab3f0063SJakub Kicinski 384ab3f0063SJakub Kicinski return bpf_prog_offload_translate(prog); 385ab3f0063SJakub Kicinski } 386ab3f0063SJakub Kicinski 387675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args { 388675fc275SJakub Kicinski struct bpf_prog *prog; 389675fc275SJakub Kicinski struct bpf_prog_info *info; 390675fc275SJakub Kicinski }; 391675fc275SJakub Kicinski 392675fc275SJakub Kicinski static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) 393675fc275SJakub Kicinski { 394675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args *args = private_data; 395675fc275SJakub Kicinski struct bpf_prog_aux *aux = args->prog->aux; 396675fc275SJakub Kicinski struct ns_common *ns; 397675fc275SJakub Kicinski struct net *net; 398675fc275SJakub Kicinski 399675fc275SJakub Kicinski rtnl_lock(); 400675fc275SJakub Kicinski down_read(&bpf_devs_lock); 401675fc275SJakub Kicinski 402675fc275SJakub Kicinski if (aux->offload) { 403675fc275SJakub Kicinski args->info->ifindex = aux->offload->netdev->ifindex; 404675fc275SJakub Kicinski net = dev_net(aux->offload->netdev); 405675fc275SJakub Kicinski get_net(net); 406675fc275SJakub Kicinski ns = &net->ns; 407675fc275SJakub Kicinski } else { 408675fc275SJakub Kicinski args->info->ifindex = 0; 409675fc275SJakub Kicinski ns = NULL; 410675fc275SJakub Kicinski } 411675fc275SJakub Kicinski 412675fc275SJakub Kicinski up_read(&bpf_devs_lock); 413675fc275SJakub Kicinski rtnl_unlock(); 414675fc275SJakub Kicinski 415675fc275SJakub Kicinski return ns; 416675fc275SJakub Kicinski } 417675fc275SJakub Kicinski 418675fc275SJakub Kicinski int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 419675fc275SJakub Kicinski struct bpf_prog *prog) 420675fc275SJakub Kicinski { 421675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args args = { 422675fc275SJakub Kicinski .prog = prog, 423675fc275SJakub Kicinski .info = info, 424675fc275SJakub Kicinski }; 425fcfb126dSJiong Wang struct bpf_prog_aux *aux = prog->aux; 426675fc275SJakub Kicinski struct inode *ns_inode; 427675fc275SJakub Kicinski struct path ns_path; 428fcfb126dSJiong Wang char __user *uinsns; 429ce623f89SAleksa Sarai int res; 430fcfb126dSJiong Wang u32 ulen; 431675fc275SJakub Kicinski 432675fc275SJakub Kicinski res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); 433ce623f89SAleksa Sarai if (res) { 434675fc275SJakub Kicinski if (!info->ifindex) 435675fc275SJakub Kicinski return -ENODEV; 436ce623f89SAleksa Sarai return res; 437675fc275SJakub Kicinski } 438675fc275SJakub Kicinski 439fcfb126dSJiong Wang down_read(&bpf_devs_lock); 440fcfb126dSJiong Wang 441fcfb126dSJiong Wang if (!aux->offload) { 442fcfb126dSJiong Wang up_read(&bpf_devs_lock); 443fcfb126dSJiong Wang return -ENODEV; 444fcfb126dSJiong Wang } 445fcfb126dSJiong Wang 446fcfb126dSJiong Wang ulen = info->jited_prog_len; 447fcfb126dSJiong Wang info->jited_prog_len = aux->offload->jited_len; 448e20d3a05SJohannes Krude if (info->jited_prog_len && ulen) { 449fcfb126dSJiong Wang uinsns = u64_to_user_ptr(info->jited_prog_insns); 450fcfb126dSJiong Wang ulen = min_t(u32, info->jited_prog_len, ulen); 451fcfb126dSJiong Wang if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { 452fcfb126dSJiong Wang up_read(&bpf_devs_lock); 453fcfb126dSJiong Wang return -EFAULT; 454fcfb126dSJiong Wang } 455fcfb126dSJiong Wang } 456fcfb126dSJiong Wang 457fcfb126dSJiong Wang up_read(&bpf_devs_lock); 458fcfb126dSJiong Wang 459675fc275SJakub Kicinski ns_inode = ns_path.dentry->d_inode; 460675fc275SJakub Kicinski info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 461675fc275SJakub Kicinski info->netns_ino = ns_inode->i_ino; 462675fc275SJakub Kicinski path_put(&ns_path); 463675fc275SJakub Kicinski 464675fc275SJakub Kicinski return 0; 465675fc275SJakub Kicinski } 466675fc275SJakub Kicinski 467ab3f0063SJakub Kicinski const struct bpf_prog_ops bpf_offload_prog_ops = { 468ab3f0063SJakub Kicinski }; 469ab3f0063SJakub Kicinski 470a3884572SJakub Kicinski struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 471a3884572SJakub Kicinski { 472a3884572SJakub Kicinski struct net *net = current->nsproxy->net_ns; 4739fd7c555SJakub Kicinski struct bpf_offload_netdev *ondev; 474a3884572SJakub Kicinski struct bpf_offloaded_map *offmap; 475a3884572SJakub Kicinski int err; 476a3884572SJakub Kicinski 477a3884572SJakub Kicinski if (!capable(CAP_SYS_ADMIN)) 478a3884572SJakub Kicinski return ERR_PTR(-EPERM); 4797a0ef693SJakub Kicinski if (attr->map_type != BPF_MAP_TYPE_ARRAY && 4807a0ef693SJakub Kicinski attr->map_type != BPF_MAP_TYPE_HASH) 481a3884572SJakub Kicinski return ERR_PTR(-EINVAL); 482a3884572SJakub Kicinski 48373cf09a3SYafang Shao offmap = bpf_map_area_alloc(sizeof(*offmap), NUMA_NO_NODE); 484a3884572SJakub Kicinski if (!offmap) 485a3884572SJakub Kicinski return ERR_PTR(-ENOMEM); 486a3884572SJakub Kicinski 487a3884572SJakub Kicinski bpf_map_init_from_attr(&offmap->map, attr); 488a3884572SJakub Kicinski 489a3884572SJakub Kicinski rtnl_lock(); 490a3884572SJakub Kicinski down_write(&bpf_devs_lock); 491a3884572SJakub Kicinski offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); 492a3884572SJakub Kicinski err = bpf_dev_offload_check(offmap->netdev); 493a3884572SJakub Kicinski if (err) 494a3884572SJakub Kicinski goto err_unlock; 495a3884572SJakub Kicinski 4969fd7c555SJakub Kicinski ondev = bpf_offload_find_netdev(offmap->netdev); 4979fd7c555SJakub Kicinski if (!ondev) { 4989fd7c555SJakub Kicinski err = -EINVAL; 4999fd7c555SJakub Kicinski goto err_unlock; 5009fd7c555SJakub Kicinski } 5019fd7c555SJakub Kicinski 502a3884572SJakub Kicinski err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); 503a3884572SJakub Kicinski if (err) 504a3884572SJakub Kicinski goto err_unlock; 505a3884572SJakub Kicinski 5069fd7c555SJakub Kicinski list_add_tail(&offmap->offloads, &ondev->maps); 507a3884572SJakub Kicinski up_write(&bpf_devs_lock); 508a3884572SJakub Kicinski rtnl_unlock(); 509a3884572SJakub Kicinski 510a3884572SJakub Kicinski return &offmap->map; 511a3884572SJakub Kicinski 512a3884572SJakub Kicinski err_unlock: 513a3884572SJakub Kicinski up_write(&bpf_devs_lock); 514a3884572SJakub Kicinski rtnl_unlock(); 51573cf09a3SYafang Shao bpf_map_area_free(offmap); 516a3884572SJakub Kicinski return ERR_PTR(err); 517a3884572SJakub Kicinski } 518a3884572SJakub Kicinski 519a3884572SJakub Kicinski void bpf_map_offload_map_free(struct bpf_map *map) 520a3884572SJakub Kicinski { 521a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 522a3884572SJakub Kicinski 523a3884572SJakub Kicinski rtnl_lock(); 524a3884572SJakub Kicinski down_write(&bpf_devs_lock); 525a3884572SJakub Kicinski if (offmap->netdev) 526a3884572SJakub Kicinski __bpf_map_offload_destroy(offmap); 527a3884572SJakub Kicinski up_write(&bpf_devs_lock); 528a3884572SJakub Kicinski rtnl_unlock(); 529a3884572SJakub Kicinski 53073cf09a3SYafang Shao bpf_map_area_free(offmap); 531a3884572SJakub Kicinski } 532a3884572SJakub Kicinski 533a3884572SJakub Kicinski int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) 534a3884572SJakub Kicinski { 535a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 536a3884572SJakub Kicinski int ret = -ENODEV; 537a3884572SJakub Kicinski 538a3884572SJakub Kicinski down_read(&bpf_devs_lock); 539a3884572SJakub Kicinski if (offmap->netdev) 540a3884572SJakub Kicinski ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); 541a3884572SJakub Kicinski up_read(&bpf_devs_lock); 542a3884572SJakub Kicinski 543a3884572SJakub Kicinski return ret; 544a3884572SJakub Kicinski } 545a3884572SJakub Kicinski 546a3884572SJakub Kicinski int bpf_map_offload_update_elem(struct bpf_map *map, 547a3884572SJakub Kicinski void *key, void *value, u64 flags) 548a3884572SJakub Kicinski { 549a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 550a3884572SJakub Kicinski int ret = -ENODEV; 551a3884572SJakub Kicinski 552a3884572SJakub Kicinski if (unlikely(flags > BPF_EXIST)) 553a3884572SJakub Kicinski return -EINVAL; 554a3884572SJakub Kicinski 555a3884572SJakub Kicinski down_read(&bpf_devs_lock); 556a3884572SJakub Kicinski if (offmap->netdev) 557a3884572SJakub Kicinski ret = offmap->dev_ops->map_update_elem(offmap, key, value, 558a3884572SJakub Kicinski flags); 559a3884572SJakub Kicinski up_read(&bpf_devs_lock); 560a3884572SJakub Kicinski 561a3884572SJakub Kicinski return ret; 562a3884572SJakub Kicinski } 563a3884572SJakub Kicinski 564a3884572SJakub Kicinski int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) 565a3884572SJakub Kicinski { 566a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 567a3884572SJakub Kicinski int ret = -ENODEV; 568a3884572SJakub Kicinski 569a3884572SJakub Kicinski down_read(&bpf_devs_lock); 570a3884572SJakub Kicinski if (offmap->netdev) 571a3884572SJakub Kicinski ret = offmap->dev_ops->map_delete_elem(offmap, key); 572a3884572SJakub Kicinski up_read(&bpf_devs_lock); 573a3884572SJakub Kicinski 574a3884572SJakub Kicinski return ret; 575a3884572SJakub Kicinski } 576a3884572SJakub Kicinski 577a3884572SJakub Kicinski int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) 578a3884572SJakub Kicinski { 579a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 580a3884572SJakub Kicinski int ret = -ENODEV; 581a3884572SJakub Kicinski 582a3884572SJakub Kicinski down_read(&bpf_devs_lock); 583a3884572SJakub Kicinski if (offmap->netdev) 584a3884572SJakub Kicinski ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); 585a3884572SJakub Kicinski up_read(&bpf_devs_lock); 586a3884572SJakub Kicinski 587a3884572SJakub Kicinski return ret; 588a3884572SJakub Kicinski } 589a3884572SJakub Kicinski 59052775b33SJakub Kicinski struct ns_get_path_bpf_map_args { 59152775b33SJakub Kicinski struct bpf_offloaded_map *offmap; 59252775b33SJakub Kicinski struct bpf_map_info *info; 59352775b33SJakub Kicinski }; 59452775b33SJakub Kicinski 59552775b33SJakub Kicinski static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data) 59652775b33SJakub Kicinski { 59752775b33SJakub Kicinski struct ns_get_path_bpf_map_args *args = private_data; 59852775b33SJakub Kicinski struct ns_common *ns; 59952775b33SJakub Kicinski struct net *net; 60052775b33SJakub Kicinski 60152775b33SJakub Kicinski rtnl_lock(); 60252775b33SJakub Kicinski down_read(&bpf_devs_lock); 60352775b33SJakub Kicinski 60452775b33SJakub Kicinski if (args->offmap->netdev) { 60552775b33SJakub Kicinski args->info->ifindex = args->offmap->netdev->ifindex; 60652775b33SJakub Kicinski net = dev_net(args->offmap->netdev); 60752775b33SJakub Kicinski get_net(net); 60852775b33SJakub Kicinski ns = &net->ns; 60952775b33SJakub Kicinski } else { 61052775b33SJakub Kicinski args->info->ifindex = 0; 61152775b33SJakub Kicinski ns = NULL; 61252775b33SJakub Kicinski } 61352775b33SJakub Kicinski 61452775b33SJakub Kicinski up_read(&bpf_devs_lock); 61552775b33SJakub Kicinski rtnl_unlock(); 61652775b33SJakub Kicinski 61752775b33SJakub Kicinski return ns; 61852775b33SJakub Kicinski } 61952775b33SJakub Kicinski 62052775b33SJakub Kicinski int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) 62152775b33SJakub Kicinski { 62252775b33SJakub Kicinski struct ns_get_path_bpf_map_args args = { 62352775b33SJakub Kicinski .offmap = map_to_offmap(map), 62452775b33SJakub Kicinski .info = info, 62552775b33SJakub Kicinski }; 62652775b33SJakub Kicinski struct inode *ns_inode; 62752775b33SJakub Kicinski struct path ns_path; 628ce623f89SAleksa Sarai int res; 62952775b33SJakub Kicinski 63052775b33SJakub Kicinski res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args); 631ce623f89SAleksa Sarai if (res) { 63252775b33SJakub Kicinski if (!info->ifindex) 63352775b33SJakub Kicinski return -ENODEV; 634ce623f89SAleksa Sarai return res; 63552775b33SJakub Kicinski } 63652775b33SJakub Kicinski 63752775b33SJakub Kicinski ns_inode = ns_path.dentry->d_inode; 63852775b33SJakub Kicinski info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 63952775b33SJakub Kicinski info->netns_ino = ns_inode->i_ino; 64052775b33SJakub Kicinski path_put(&ns_path); 64152775b33SJakub Kicinski 64252775b33SJakub Kicinski return 0; 64352775b33SJakub Kicinski } 64452775b33SJakub Kicinski 645fd4f227dSJakub Kicinski static bool __bpf_offload_dev_match(struct bpf_prog *prog, 646fd4f227dSJakub Kicinski struct net_device *netdev) 647a3884572SJakub Kicinski { 648fd4f227dSJakub Kicinski struct bpf_offload_netdev *ondev1, *ondev2; 649a3884572SJakub Kicinski struct bpf_prog_offload *offload; 650a3884572SJakub Kicinski 6512b3486bcSStanislav Fomichev if (!bpf_prog_is_dev_bound(prog->aux)) 652a3884572SJakub Kicinski return false; 653fd4f227dSJakub Kicinski 654fd4f227dSJakub Kicinski offload = prog->aux->offload; 655fd4f227dSJakub Kicinski if (!offload) 656fd4f227dSJakub Kicinski return false; 657fd4f227dSJakub Kicinski if (offload->netdev == netdev) 658fd4f227dSJakub Kicinski return true; 659fd4f227dSJakub Kicinski 660fd4f227dSJakub Kicinski ondev1 = bpf_offload_find_netdev(offload->netdev); 661fd4f227dSJakub Kicinski ondev2 = bpf_offload_find_netdev(netdev); 662fd4f227dSJakub Kicinski 663fd4f227dSJakub Kicinski return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev; 664fd4f227dSJakub Kicinski } 665fd4f227dSJakub Kicinski 666fd4f227dSJakub Kicinski bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev) 667fd4f227dSJakub Kicinski { 668fd4f227dSJakub Kicinski bool ret; 669a3884572SJakub Kicinski 670a3884572SJakub Kicinski down_read(&bpf_devs_lock); 671fd4f227dSJakub Kicinski ret = __bpf_offload_dev_match(prog, netdev); 672fd4f227dSJakub Kicinski up_read(&bpf_devs_lock); 673fd4f227dSJakub Kicinski 674fd4f227dSJakub Kicinski return ret; 675fd4f227dSJakub Kicinski } 676fd4f227dSJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_match); 677fd4f227dSJakub Kicinski 678fd4f227dSJakub Kicinski bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) 679fd4f227dSJakub Kicinski { 680fd4f227dSJakub Kicinski struct bpf_offloaded_map *offmap; 681fd4f227dSJakub Kicinski bool ret; 682fd4f227dSJakub Kicinski 6839d03ebc7SStanislav Fomichev if (!bpf_map_is_offloaded(map)) 684fd4f227dSJakub Kicinski return bpf_map_offload_neutral(map); 685a3884572SJakub Kicinski offmap = map_to_offmap(map); 686a3884572SJakub Kicinski 687fd4f227dSJakub Kicinski down_read(&bpf_devs_lock); 688fd4f227dSJakub Kicinski ret = __bpf_offload_dev_match(prog, offmap->netdev); 689a3884572SJakub Kicinski up_read(&bpf_devs_lock); 690a3884572SJakub Kicinski 691a3884572SJakub Kicinski return ret; 692a3884572SJakub Kicinski } 693a3884572SJakub Kicinski 694602144c2SJakub Kicinski int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, 695602144c2SJakub Kicinski struct net_device *netdev) 696a3884572SJakub Kicinski { 6972b3486bcSStanislav Fomichev int err; 6982b3486bcSStanislav Fomichev 6992b3486bcSStanislav Fomichev down_write(&bpf_devs_lock); 7002b3486bcSStanislav Fomichev err = __bpf_offload_dev_netdev_register(offdev, netdev); 7012b3486bcSStanislav Fomichev up_write(&bpf_devs_lock); 7022b3486bcSStanislav Fomichev return err; 703a3884572SJakub Kicinski } 7049fd7c555SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register); 705a3884572SJakub Kicinski 706602144c2SJakub Kicinski void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, 707602144c2SJakub Kicinski struct net_device *netdev) 708ab3f0063SJakub Kicinski { 7092b3486bcSStanislav Fomichev down_write(&bpf_devs_lock); 71089bbc53aSStanislav Fomichev __bpf_offload_dev_netdev_unregister(offdev, netdev); 7112b3486bcSStanislav Fomichev up_write(&bpf_devs_lock); 712ab3f0063SJakub Kicinski } 7139fd7c555SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); 714602144c2SJakub Kicinski 7151385d755SQuentin Monnet struct bpf_offload_dev * 716dd27c2e3SJakub Kicinski bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) 717602144c2SJakub Kicinski { 718602144c2SJakub Kicinski struct bpf_offload_dev *offdev; 719602144c2SJakub Kicinski 720602144c2SJakub Kicinski offdev = kzalloc(sizeof(*offdev), GFP_KERNEL); 721602144c2SJakub Kicinski if (!offdev) 722602144c2SJakub Kicinski return ERR_PTR(-ENOMEM); 723602144c2SJakub Kicinski 7241385d755SQuentin Monnet offdev->ops = ops; 725dd27c2e3SJakub Kicinski offdev->priv = priv; 726602144c2SJakub Kicinski INIT_LIST_HEAD(&offdev->netdevs); 727602144c2SJakub Kicinski 728602144c2SJakub Kicinski return offdev; 729602144c2SJakub Kicinski } 730602144c2SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_create); 731602144c2SJakub Kicinski 732602144c2SJakub Kicinski void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) 733602144c2SJakub Kicinski { 734602144c2SJakub Kicinski WARN_ON(!list_empty(&offdev->netdevs)); 735602144c2SJakub Kicinski kfree(offdev); 736602144c2SJakub Kicinski } 737602144c2SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); 738dd27c2e3SJakub Kicinski 739dd27c2e3SJakub Kicinski void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) 740dd27c2e3SJakub Kicinski { 741dd27c2e3SJakub Kicinski return offdev->priv; 742dd27c2e3SJakub Kicinski } 743dd27c2e3SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); 744f1fc43d0SStanislav Fomichev 7452b3486bcSStanislav Fomichev void bpf_dev_bound_netdev_unregister(struct net_device *dev) 7462b3486bcSStanislav Fomichev { 7472b3486bcSStanislav Fomichev struct bpf_offload_netdev *ondev; 7482b3486bcSStanislav Fomichev 7492b3486bcSStanislav Fomichev ASSERT_RTNL(); 7502b3486bcSStanislav Fomichev 7512b3486bcSStanislav Fomichev down_write(&bpf_devs_lock); 7522b3486bcSStanislav Fomichev ondev = bpf_offload_find_netdev(dev); 7532b3486bcSStanislav Fomichev if (ondev && !ondev->offdev) 7542b3486bcSStanislav Fomichev __bpf_offload_dev_netdev_unregister(NULL, ondev->netdev); 7552b3486bcSStanislav Fomichev up_write(&bpf_devs_lock); 7562b3486bcSStanislav Fomichev } 7572b3486bcSStanislav Fomichev 758*3d76a4d3SStanislav Fomichev int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, 759*3d76a4d3SStanislav Fomichev struct bpf_prog_aux *prog_aux) 760*3d76a4d3SStanislav Fomichev { 761*3d76a4d3SStanislav Fomichev if (!bpf_prog_is_dev_bound(prog_aux)) { 762*3d76a4d3SStanislav Fomichev bpf_log(log, "metadata kfuncs require device-bound program\n"); 763*3d76a4d3SStanislav Fomichev return -EINVAL; 764*3d76a4d3SStanislav Fomichev } 765*3d76a4d3SStanislav Fomichev 766*3d76a4d3SStanislav Fomichev if (bpf_prog_is_offloaded(prog_aux)) { 767*3d76a4d3SStanislav Fomichev bpf_log(log, "metadata kfuncs can't be offloaded\n"); 768*3d76a4d3SStanislav Fomichev return -EINVAL; 769*3d76a4d3SStanislav Fomichev } 770*3d76a4d3SStanislav Fomichev 771*3d76a4d3SStanislav Fomichev return 0; 772*3d76a4d3SStanislav Fomichev } 773*3d76a4d3SStanislav Fomichev 774*3d76a4d3SStanislav Fomichev void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id) 775*3d76a4d3SStanislav Fomichev { 776*3d76a4d3SStanislav Fomichev const struct xdp_metadata_ops *ops; 777*3d76a4d3SStanislav Fomichev void *p = NULL; 778*3d76a4d3SStanislav Fomichev 779*3d76a4d3SStanislav Fomichev /* We don't hold bpf_devs_lock while resolving several 780*3d76a4d3SStanislav Fomichev * kfuncs and can race with the unregister_netdevice(). 781*3d76a4d3SStanislav Fomichev * We rely on bpf_dev_bound_match() check at attach 782*3d76a4d3SStanislav Fomichev * to render this program unusable. 783*3d76a4d3SStanislav Fomichev */ 784*3d76a4d3SStanislav Fomichev down_read(&bpf_devs_lock); 785*3d76a4d3SStanislav Fomichev if (!prog->aux->offload) 786*3d76a4d3SStanislav Fomichev goto out; 787*3d76a4d3SStanislav Fomichev 788*3d76a4d3SStanislav Fomichev ops = prog->aux->offload->netdev->xdp_metadata_ops; 789*3d76a4d3SStanislav Fomichev if (!ops) 790*3d76a4d3SStanislav Fomichev goto out; 791*3d76a4d3SStanislav Fomichev 792*3d76a4d3SStanislav Fomichev if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_TIMESTAMP)) 793*3d76a4d3SStanislav Fomichev p = ops->xmo_rx_timestamp; 794*3d76a4d3SStanislav Fomichev else if (func_id == bpf_xdp_metadata_kfunc_id(XDP_METADATA_KFUNC_RX_HASH)) 795*3d76a4d3SStanislav Fomichev p = ops->xmo_rx_hash; 796*3d76a4d3SStanislav Fomichev out: 797*3d76a4d3SStanislav Fomichev up_read(&bpf_devs_lock); 798*3d76a4d3SStanislav Fomichev 799*3d76a4d3SStanislav Fomichev return p; 800*3d76a4d3SStanislav Fomichev } 801*3d76a4d3SStanislav Fomichev 802f1fc43d0SStanislav Fomichev static int __init bpf_offload_init(void) 803f1fc43d0SStanislav Fomichev { 804f1fc43d0SStanislav Fomichev return rhashtable_init(&offdevs, &offdevs_params); 805f1fc43d0SStanislav Fomichev } 806f1fc43d0SStanislav Fomichev 807f1fc43d0SStanislav Fomichev late_initcall(bpf_offload_init); 808