1a39e17b2SJakub Kicinski /* 2a39e17b2SJakub Kicinski * Copyright (C) 2017 Netronome Systems, Inc. 3a39e17b2SJakub Kicinski * 4a39e17b2SJakub Kicinski * This software is licensed under the GNU General License Version 2, 5a39e17b2SJakub Kicinski * June 1991 as shown in the file COPYING in the top-level directory of this 6a39e17b2SJakub Kicinski * source tree. 7a39e17b2SJakub Kicinski * 8a39e17b2SJakub Kicinski * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" 9a39e17b2SJakub Kicinski * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, 10a39e17b2SJakub Kicinski * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 11a39e17b2SJakub Kicinski * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE 12a39e17b2SJakub Kicinski * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME 13a39e17b2SJakub Kicinski * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 14a39e17b2SJakub Kicinski */ 15a39e17b2SJakub Kicinski 16ab3f0063SJakub Kicinski #include <linux/bpf.h> 17ab3f0063SJakub Kicinski #include <linux/bpf_verifier.h> 18ab3f0063SJakub Kicinski #include <linux/bug.h> 19675fc275SJakub Kicinski #include <linux/kdev_t.h> 20ab3f0063SJakub Kicinski #include <linux/list.h> 21ab3f0063SJakub Kicinski #include <linux/netdevice.h> 22ab3f0063SJakub Kicinski #include <linux/printk.h> 23675fc275SJakub Kicinski #include <linux/proc_ns.h> 24ab3f0063SJakub Kicinski #include <linux/rtnetlink.h> 25e0d3974aSJakub Kicinski #include <linux/rwsem.h> 26ab3f0063SJakub Kicinski 27*a3884572SJakub Kicinski /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members 28*a3884572SJakub Kicinski * of all progs. 29e0d3974aSJakub Kicinski * RTNL lock cannot be taken when holding this lock. 30e0d3974aSJakub Kicinski */ 31e0d3974aSJakub Kicinski static DECLARE_RWSEM(bpf_devs_lock); 32ab3f0063SJakub Kicinski static LIST_HEAD(bpf_prog_offload_devs); 33*a3884572SJakub Kicinski static LIST_HEAD(bpf_map_offload_devs); 34ab3f0063SJakub Kicinski 355bc2d55cSJakub Kicinski static int bpf_dev_offload_check(struct net_device *netdev) 365bc2d55cSJakub Kicinski { 375bc2d55cSJakub Kicinski if (!netdev) 385bc2d55cSJakub Kicinski return -EINVAL; 395bc2d55cSJakub Kicinski if (!netdev->netdev_ops->ndo_bpf) 405bc2d55cSJakub Kicinski return -EOPNOTSUPP; 415bc2d55cSJakub Kicinski return 0; 425bc2d55cSJakub Kicinski } 435bc2d55cSJakub Kicinski 44ab3f0063SJakub Kicinski int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) 45ab3f0063SJakub Kicinski { 460a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 475bc2d55cSJakub Kicinski int err; 48ab3f0063SJakub Kicinski 49649f11dcSJakub Kicinski if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 50649f11dcSJakub Kicinski attr->prog_type != BPF_PROG_TYPE_XDP) 51649f11dcSJakub Kicinski return -EINVAL; 52ab3f0063SJakub Kicinski 53ab3f0063SJakub Kicinski if (attr->prog_flags) 54ab3f0063SJakub Kicinski return -EINVAL; 55ab3f0063SJakub Kicinski 56ab3f0063SJakub Kicinski offload = kzalloc(sizeof(*offload), GFP_USER); 57ab3f0063SJakub Kicinski if (!offload) 58ab3f0063SJakub Kicinski return -ENOMEM; 59ab3f0063SJakub Kicinski 60ab3f0063SJakub Kicinski offload->prog = prog; 61ab3f0063SJakub Kicinski 62e0d3974aSJakub Kicinski offload->netdev = dev_get_by_index(current->nsproxy->net_ns, 63e0d3974aSJakub Kicinski attr->prog_ifindex); 645bc2d55cSJakub Kicinski err = bpf_dev_offload_check(offload->netdev); 655bc2d55cSJakub Kicinski if (err) 665bc2d55cSJakub Kicinski goto err_maybe_put; 67ab3f0063SJakub Kicinski 68e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 695bc2d55cSJakub Kicinski if (offload->netdev->reg_state != NETREG_REGISTERED) { 705bc2d55cSJakub Kicinski err = -EINVAL; 71e0d3974aSJakub Kicinski goto err_unlock; 725bc2d55cSJakub Kicinski } 73ab3f0063SJakub Kicinski prog->aux->offload = offload; 74ab3f0063SJakub Kicinski list_add_tail(&offload->offloads, &bpf_prog_offload_devs); 75e0d3974aSJakub Kicinski dev_put(offload->netdev); 76e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 77ab3f0063SJakub Kicinski 78ab3f0063SJakub Kicinski return 0; 79e0d3974aSJakub Kicinski err_unlock: 80e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 815bc2d55cSJakub Kicinski err_maybe_put: 825bc2d55cSJakub Kicinski if (offload->netdev) 83e0d3974aSJakub Kicinski dev_put(offload->netdev); 84e0d3974aSJakub Kicinski kfree(offload); 855bc2d55cSJakub Kicinski return err; 86ab3f0063SJakub Kicinski } 87ab3f0063SJakub Kicinski 88ab3f0063SJakub Kicinski static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, 89ab3f0063SJakub Kicinski struct netdev_bpf *data) 90ab3f0063SJakub Kicinski { 910a9c1991SJakub Kicinski struct bpf_prog_offload *offload = prog->aux->offload; 92ce3b9db4SJakub Kicinski struct net_device *netdev; 93ab3f0063SJakub Kicinski 94ab3f0063SJakub Kicinski ASSERT_RTNL(); 95ab3f0063SJakub Kicinski 96ce3b9db4SJakub Kicinski if (!offload) 97ab3f0063SJakub Kicinski return -ENODEV; 98ce3b9db4SJakub Kicinski netdev = offload->netdev; 99ab3f0063SJakub Kicinski 100ab3f0063SJakub Kicinski data->command = cmd; 101ab3f0063SJakub Kicinski 102ab3f0063SJakub Kicinski return netdev->netdev_ops->ndo_bpf(netdev, data); 103ab3f0063SJakub Kicinski } 104ab3f0063SJakub Kicinski 105ab3f0063SJakub Kicinski int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 106ab3f0063SJakub Kicinski { 107ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 108ab3f0063SJakub Kicinski int err; 109ab3f0063SJakub Kicinski 110ab3f0063SJakub Kicinski data.verifier.prog = env->prog; 111ab3f0063SJakub Kicinski 112ab3f0063SJakub Kicinski rtnl_lock(); 113ab3f0063SJakub Kicinski err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data); 114ab3f0063SJakub Kicinski if (err) 115ab3f0063SJakub Kicinski goto exit_unlock; 116ab3f0063SJakub Kicinski 117cae1927cSJakub Kicinski env->prog->aux->offload->dev_ops = data.verifier.ops; 118ab3f0063SJakub Kicinski env->prog->aux->offload->dev_state = true; 119ab3f0063SJakub Kicinski exit_unlock: 120ab3f0063SJakub Kicinski rtnl_unlock(); 121ab3f0063SJakub Kicinski return err; 122ab3f0063SJakub Kicinski } 123ab3f0063SJakub Kicinski 124cae1927cSJakub Kicinski int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 125cae1927cSJakub Kicinski int insn_idx, int prev_insn_idx) 126cae1927cSJakub Kicinski { 1270a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 128cae1927cSJakub Kicinski int ret = -ENODEV; 129cae1927cSJakub Kicinski 130cae1927cSJakub Kicinski down_read(&bpf_devs_lock); 131cae1927cSJakub Kicinski offload = env->prog->aux->offload; 132ce3b9db4SJakub Kicinski if (offload) 133cae1927cSJakub Kicinski ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); 134cae1927cSJakub Kicinski up_read(&bpf_devs_lock); 135cae1927cSJakub Kicinski 136cae1927cSJakub Kicinski return ret; 137cae1927cSJakub Kicinski } 138cae1927cSJakub Kicinski 139ab3f0063SJakub Kicinski static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 140ab3f0063SJakub Kicinski { 1410a9c1991SJakub Kicinski struct bpf_prog_offload *offload = prog->aux->offload; 142ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 143ab3f0063SJakub Kicinski 144ab3f0063SJakub Kicinski data.offload.prog = prog; 145ab3f0063SJakub Kicinski 146ab3f0063SJakub Kicinski if (offload->dev_state) 147ab3f0063SJakub Kicinski WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); 148ab3f0063SJakub Kicinski 149ad8ad79fSJakub Kicinski /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 150ad8ad79fSJakub Kicinski bpf_prog_free_id(prog, true); 151ad8ad79fSJakub Kicinski 152ab3f0063SJakub Kicinski list_del_init(&offload->offloads); 153ce3b9db4SJakub Kicinski kfree(offload); 154ce3b9db4SJakub Kicinski prog->aux->offload = NULL; 155ab3f0063SJakub Kicinski } 156ab3f0063SJakub Kicinski 157ab3f0063SJakub Kicinski void bpf_prog_offload_destroy(struct bpf_prog *prog) 158ab3f0063SJakub Kicinski { 159ab3f0063SJakub Kicinski rtnl_lock(); 160e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 161ce3b9db4SJakub Kicinski if (prog->aux->offload) 162ab3f0063SJakub Kicinski __bpf_prog_offload_destroy(prog); 163e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 164ab3f0063SJakub Kicinski rtnl_unlock(); 165ab3f0063SJakub Kicinski } 166ab3f0063SJakub Kicinski 167ab3f0063SJakub Kicinski static int bpf_prog_offload_translate(struct bpf_prog *prog) 168ab3f0063SJakub Kicinski { 169ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 170ab3f0063SJakub Kicinski int ret; 171ab3f0063SJakub Kicinski 172ab3f0063SJakub Kicinski data.offload.prog = prog; 173ab3f0063SJakub Kicinski 174ab3f0063SJakub Kicinski rtnl_lock(); 175ab3f0063SJakub Kicinski ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); 176ab3f0063SJakub Kicinski rtnl_unlock(); 177ab3f0063SJakub Kicinski 178ab3f0063SJakub Kicinski return ret; 179ab3f0063SJakub Kicinski } 180ab3f0063SJakub Kicinski 181ab3f0063SJakub Kicinski static unsigned int bpf_prog_warn_on_exec(const void *ctx, 182ab3f0063SJakub Kicinski const struct bpf_insn *insn) 183ab3f0063SJakub Kicinski { 184ab3f0063SJakub Kicinski WARN(1, "attempt to execute device eBPF program on the host!"); 185ab3f0063SJakub Kicinski return 0; 186ab3f0063SJakub Kicinski } 187ab3f0063SJakub Kicinski 188ab3f0063SJakub Kicinski int bpf_prog_offload_compile(struct bpf_prog *prog) 189ab3f0063SJakub Kicinski { 190ab3f0063SJakub Kicinski prog->bpf_func = bpf_prog_warn_on_exec; 191ab3f0063SJakub Kicinski 192ab3f0063SJakub Kicinski return bpf_prog_offload_translate(prog); 193ab3f0063SJakub Kicinski } 194ab3f0063SJakub Kicinski 195675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args { 196675fc275SJakub Kicinski struct bpf_prog *prog; 197675fc275SJakub Kicinski struct bpf_prog_info *info; 198675fc275SJakub Kicinski }; 199675fc275SJakub Kicinski 200675fc275SJakub Kicinski static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) 201675fc275SJakub Kicinski { 202675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args *args = private_data; 203675fc275SJakub Kicinski struct bpf_prog_aux *aux = args->prog->aux; 204675fc275SJakub Kicinski struct ns_common *ns; 205675fc275SJakub Kicinski struct net *net; 206675fc275SJakub Kicinski 207675fc275SJakub Kicinski rtnl_lock(); 208675fc275SJakub Kicinski down_read(&bpf_devs_lock); 209675fc275SJakub Kicinski 210675fc275SJakub Kicinski if (aux->offload) { 211675fc275SJakub Kicinski args->info->ifindex = aux->offload->netdev->ifindex; 212675fc275SJakub Kicinski net = dev_net(aux->offload->netdev); 213675fc275SJakub Kicinski get_net(net); 214675fc275SJakub Kicinski ns = &net->ns; 215675fc275SJakub Kicinski } else { 216675fc275SJakub Kicinski args->info->ifindex = 0; 217675fc275SJakub Kicinski ns = NULL; 218675fc275SJakub Kicinski } 219675fc275SJakub Kicinski 220675fc275SJakub Kicinski up_read(&bpf_devs_lock); 221675fc275SJakub Kicinski rtnl_unlock(); 222675fc275SJakub Kicinski 223675fc275SJakub Kicinski return ns; 224675fc275SJakub Kicinski } 225675fc275SJakub Kicinski 226675fc275SJakub Kicinski int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 227675fc275SJakub Kicinski struct bpf_prog *prog) 228675fc275SJakub Kicinski { 229675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args args = { 230675fc275SJakub Kicinski .prog = prog, 231675fc275SJakub Kicinski .info = info, 232675fc275SJakub Kicinski }; 233675fc275SJakub Kicinski struct inode *ns_inode; 234675fc275SJakub Kicinski struct path ns_path; 235675fc275SJakub Kicinski void *res; 236675fc275SJakub Kicinski 237675fc275SJakub Kicinski res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); 238675fc275SJakub Kicinski if (IS_ERR(res)) { 239675fc275SJakub Kicinski if (!info->ifindex) 240675fc275SJakub Kicinski return -ENODEV; 241675fc275SJakub Kicinski return PTR_ERR(res); 242675fc275SJakub Kicinski } 243675fc275SJakub Kicinski 244675fc275SJakub Kicinski ns_inode = ns_path.dentry->d_inode; 245675fc275SJakub Kicinski info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 246675fc275SJakub Kicinski info->netns_ino = ns_inode->i_ino; 247675fc275SJakub Kicinski path_put(&ns_path); 248675fc275SJakub Kicinski 249675fc275SJakub Kicinski return 0; 250675fc275SJakub Kicinski } 251675fc275SJakub Kicinski 252ab3f0063SJakub Kicinski const struct bpf_prog_ops bpf_offload_prog_ops = { 253ab3f0063SJakub Kicinski }; 254ab3f0063SJakub Kicinski 255*a3884572SJakub Kicinski static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 256*a3884572SJakub Kicinski enum bpf_netdev_command cmd) 257*a3884572SJakub Kicinski { 258*a3884572SJakub Kicinski struct netdev_bpf data = {}; 259*a3884572SJakub Kicinski struct net_device *netdev; 260*a3884572SJakub Kicinski 261*a3884572SJakub Kicinski ASSERT_RTNL(); 262*a3884572SJakub Kicinski 263*a3884572SJakub Kicinski data.command = cmd; 264*a3884572SJakub Kicinski data.offmap = offmap; 265*a3884572SJakub Kicinski /* Caller must make sure netdev is valid */ 266*a3884572SJakub Kicinski netdev = offmap->netdev; 267*a3884572SJakub Kicinski 268*a3884572SJakub Kicinski return netdev->netdev_ops->ndo_bpf(netdev, &data); 269*a3884572SJakub Kicinski } 270*a3884572SJakub Kicinski 271*a3884572SJakub Kicinski struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 272*a3884572SJakub Kicinski { 273*a3884572SJakub Kicinski struct net *net = current->nsproxy->net_ns; 274*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap; 275*a3884572SJakub Kicinski int err; 276*a3884572SJakub Kicinski 277*a3884572SJakub Kicinski if (!capable(CAP_SYS_ADMIN)) 278*a3884572SJakub Kicinski return ERR_PTR(-EPERM); 279*a3884572SJakub Kicinski if (attr->map_type != BPF_MAP_TYPE_HASH) 280*a3884572SJakub Kicinski return ERR_PTR(-EINVAL); 281*a3884572SJakub Kicinski 282*a3884572SJakub Kicinski offmap = kzalloc(sizeof(*offmap), GFP_USER); 283*a3884572SJakub Kicinski if (!offmap) 284*a3884572SJakub Kicinski return ERR_PTR(-ENOMEM); 285*a3884572SJakub Kicinski 286*a3884572SJakub Kicinski bpf_map_init_from_attr(&offmap->map, attr); 287*a3884572SJakub Kicinski 288*a3884572SJakub Kicinski rtnl_lock(); 289*a3884572SJakub Kicinski down_write(&bpf_devs_lock); 290*a3884572SJakub Kicinski offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); 291*a3884572SJakub Kicinski err = bpf_dev_offload_check(offmap->netdev); 292*a3884572SJakub Kicinski if (err) 293*a3884572SJakub Kicinski goto err_unlock; 294*a3884572SJakub Kicinski 295*a3884572SJakub Kicinski err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); 296*a3884572SJakub Kicinski if (err) 297*a3884572SJakub Kicinski goto err_unlock; 298*a3884572SJakub Kicinski 299*a3884572SJakub Kicinski list_add_tail(&offmap->offloads, &bpf_map_offload_devs); 300*a3884572SJakub Kicinski up_write(&bpf_devs_lock); 301*a3884572SJakub Kicinski rtnl_unlock(); 302*a3884572SJakub Kicinski 303*a3884572SJakub Kicinski return &offmap->map; 304*a3884572SJakub Kicinski 305*a3884572SJakub Kicinski err_unlock: 306*a3884572SJakub Kicinski up_write(&bpf_devs_lock); 307*a3884572SJakub Kicinski rtnl_unlock(); 308*a3884572SJakub Kicinski kfree(offmap); 309*a3884572SJakub Kicinski return ERR_PTR(err); 310*a3884572SJakub Kicinski } 311*a3884572SJakub Kicinski 312*a3884572SJakub Kicinski static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 313*a3884572SJakub Kicinski { 314*a3884572SJakub Kicinski WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 315*a3884572SJakub Kicinski /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 316*a3884572SJakub Kicinski bpf_map_free_id(&offmap->map, true); 317*a3884572SJakub Kicinski list_del_init(&offmap->offloads); 318*a3884572SJakub Kicinski offmap->netdev = NULL; 319*a3884572SJakub Kicinski } 320*a3884572SJakub Kicinski 321*a3884572SJakub Kicinski void bpf_map_offload_map_free(struct bpf_map *map) 322*a3884572SJakub Kicinski { 323*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 324*a3884572SJakub Kicinski 325*a3884572SJakub Kicinski rtnl_lock(); 326*a3884572SJakub Kicinski down_write(&bpf_devs_lock); 327*a3884572SJakub Kicinski if (offmap->netdev) 328*a3884572SJakub Kicinski __bpf_map_offload_destroy(offmap); 329*a3884572SJakub Kicinski up_write(&bpf_devs_lock); 330*a3884572SJakub Kicinski rtnl_unlock(); 331*a3884572SJakub Kicinski 332*a3884572SJakub Kicinski kfree(offmap); 333*a3884572SJakub Kicinski } 334*a3884572SJakub Kicinski 335*a3884572SJakub Kicinski int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) 336*a3884572SJakub Kicinski { 337*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 338*a3884572SJakub Kicinski int ret = -ENODEV; 339*a3884572SJakub Kicinski 340*a3884572SJakub Kicinski down_read(&bpf_devs_lock); 341*a3884572SJakub Kicinski if (offmap->netdev) 342*a3884572SJakub Kicinski ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); 343*a3884572SJakub Kicinski up_read(&bpf_devs_lock); 344*a3884572SJakub Kicinski 345*a3884572SJakub Kicinski return ret; 346*a3884572SJakub Kicinski } 347*a3884572SJakub Kicinski 348*a3884572SJakub Kicinski int bpf_map_offload_update_elem(struct bpf_map *map, 349*a3884572SJakub Kicinski void *key, void *value, u64 flags) 350*a3884572SJakub Kicinski { 351*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 352*a3884572SJakub Kicinski int ret = -ENODEV; 353*a3884572SJakub Kicinski 354*a3884572SJakub Kicinski if (unlikely(flags > BPF_EXIST)) 355*a3884572SJakub Kicinski return -EINVAL; 356*a3884572SJakub Kicinski 357*a3884572SJakub Kicinski down_read(&bpf_devs_lock); 358*a3884572SJakub Kicinski if (offmap->netdev) 359*a3884572SJakub Kicinski ret = offmap->dev_ops->map_update_elem(offmap, key, value, 360*a3884572SJakub Kicinski flags); 361*a3884572SJakub Kicinski up_read(&bpf_devs_lock); 362*a3884572SJakub Kicinski 363*a3884572SJakub Kicinski return ret; 364*a3884572SJakub Kicinski } 365*a3884572SJakub Kicinski 366*a3884572SJakub Kicinski int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) 367*a3884572SJakub Kicinski { 368*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 369*a3884572SJakub Kicinski int ret = -ENODEV; 370*a3884572SJakub Kicinski 371*a3884572SJakub Kicinski down_read(&bpf_devs_lock); 372*a3884572SJakub Kicinski if (offmap->netdev) 373*a3884572SJakub Kicinski ret = offmap->dev_ops->map_delete_elem(offmap, key); 374*a3884572SJakub Kicinski up_read(&bpf_devs_lock); 375*a3884572SJakub Kicinski 376*a3884572SJakub Kicinski return ret; 377*a3884572SJakub Kicinski } 378*a3884572SJakub Kicinski 379*a3884572SJakub Kicinski int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) 380*a3884572SJakub Kicinski { 381*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 382*a3884572SJakub Kicinski int ret = -ENODEV; 383*a3884572SJakub Kicinski 384*a3884572SJakub Kicinski down_read(&bpf_devs_lock); 385*a3884572SJakub Kicinski if (offmap->netdev) 386*a3884572SJakub Kicinski ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); 387*a3884572SJakub Kicinski up_read(&bpf_devs_lock); 388*a3884572SJakub Kicinski 389*a3884572SJakub Kicinski return ret; 390*a3884572SJakub Kicinski } 391*a3884572SJakub Kicinski 392*a3884572SJakub Kicinski bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map) 393*a3884572SJakub Kicinski { 394*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap; 395*a3884572SJakub Kicinski struct bpf_prog_offload *offload; 396*a3884572SJakub Kicinski bool ret; 397*a3884572SJakub Kicinski 398*a3884572SJakub Kicinski if (!!bpf_prog_is_dev_bound(prog->aux) != !!bpf_map_is_dev_bound(map)) 399*a3884572SJakub Kicinski return false; 400*a3884572SJakub Kicinski if (!bpf_prog_is_dev_bound(prog->aux)) 401*a3884572SJakub Kicinski return true; 402*a3884572SJakub Kicinski 403*a3884572SJakub Kicinski down_read(&bpf_devs_lock); 404*a3884572SJakub Kicinski offload = prog->aux->offload; 405*a3884572SJakub Kicinski offmap = map_to_offmap(map); 406*a3884572SJakub Kicinski 407*a3884572SJakub Kicinski ret = offload && offload->netdev == offmap->netdev; 408*a3884572SJakub Kicinski up_read(&bpf_devs_lock); 409*a3884572SJakub Kicinski 410*a3884572SJakub Kicinski return ret; 411*a3884572SJakub Kicinski } 412*a3884572SJakub Kicinski 413*a3884572SJakub Kicinski static void bpf_offload_orphan_all_progs(struct net_device *netdev) 414*a3884572SJakub Kicinski { 415*a3884572SJakub Kicinski struct bpf_prog_offload *offload, *tmp; 416*a3884572SJakub Kicinski 417*a3884572SJakub Kicinski list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) 418*a3884572SJakub Kicinski if (offload->netdev == netdev) 419*a3884572SJakub Kicinski __bpf_prog_offload_destroy(offload->prog); 420*a3884572SJakub Kicinski } 421*a3884572SJakub Kicinski 422*a3884572SJakub Kicinski static void bpf_offload_orphan_all_maps(struct net_device *netdev) 423*a3884572SJakub Kicinski { 424*a3884572SJakub Kicinski struct bpf_offloaded_map *offmap, *tmp; 425*a3884572SJakub Kicinski 426*a3884572SJakub Kicinski list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads) 427*a3884572SJakub Kicinski if (offmap->netdev == netdev) 428*a3884572SJakub Kicinski __bpf_map_offload_destroy(offmap); 429*a3884572SJakub Kicinski } 430*a3884572SJakub Kicinski 431ab3f0063SJakub Kicinski static int bpf_offload_notification(struct notifier_block *notifier, 432ab3f0063SJakub Kicinski ulong event, void *ptr) 433ab3f0063SJakub Kicinski { 434ab3f0063SJakub Kicinski struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 435ab3f0063SJakub Kicinski 436ab3f0063SJakub Kicinski ASSERT_RTNL(); 437ab3f0063SJakub Kicinski 438ab3f0063SJakub Kicinski switch (event) { 439ab3f0063SJakub Kicinski case NETDEV_UNREGISTER: 44062c71b45SJakub Kicinski /* ignore namespace changes */ 44162c71b45SJakub Kicinski if (netdev->reg_state != NETREG_UNREGISTERING) 44262c71b45SJakub Kicinski break; 44362c71b45SJakub Kicinski 444e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 445*a3884572SJakub Kicinski bpf_offload_orphan_all_progs(netdev); 446*a3884572SJakub Kicinski bpf_offload_orphan_all_maps(netdev); 447e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 448ab3f0063SJakub Kicinski break; 449ab3f0063SJakub Kicinski default: 450ab3f0063SJakub Kicinski break; 451ab3f0063SJakub Kicinski } 452ab3f0063SJakub Kicinski return NOTIFY_OK; 453ab3f0063SJakub Kicinski } 454ab3f0063SJakub Kicinski 455ab3f0063SJakub Kicinski static struct notifier_block bpf_offload_notifier = { 456ab3f0063SJakub Kicinski .notifier_call = bpf_offload_notification, 457ab3f0063SJakub Kicinski }; 458ab3f0063SJakub Kicinski 459ab3f0063SJakub Kicinski static int __init bpf_offload_init(void) 460ab3f0063SJakub Kicinski { 461ab3f0063SJakub Kicinski register_netdevice_notifier(&bpf_offload_notifier); 462ab3f0063SJakub Kicinski return 0; 463ab3f0063SJakub Kicinski } 464ab3f0063SJakub Kicinski 465ab3f0063SJakub Kicinski subsys_initcall(bpf_offload_init); 466