1a39e17b2SJakub Kicinski /* 2a39e17b2SJakub Kicinski * Copyright (C) 2017 Netronome Systems, Inc. 3a39e17b2SJakub Kicinski * 4a39e17b2SJakub Kicinski * This software is licensed under the GNU General License Version 2, 5a39e17b2SJakub Kicinski * June 1991 as shown in the file COPYING in the top-level directory of this 6a39e17b2SJakub Kicinski * source tree. 7a39e17b2SJakub Kicinski * 8a39e17b2SJakub Kicinski * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" 9a39e17b2SJakub Kicinski * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, 10a39e17b2SJakub Kicinski * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 11a39e17b2SJakub Kicinski * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE 12a39e17b2SJakub Kicinski * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME 13a39e17b2SJakub Kicinski * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 14a39e17b2SJakub Kicinski */ 15a39e17b2SJakub Kicinski 16ab3f0063SJakub Kicinski #include <linux/bpf.h> 17ab3f0063SJakub Kicinski #include <linux/bpf_verifier.h> 18ab3f0063SJakub Kicinski #include <linux/bug.h> 19675fc275SJakub Kicinski #include <linux/kdev_t.h> 20ab3f0063SJakub Kicinski #include <linux/list.h> 21ab3f0063SJakub Kicinski #include <linux/netdevice.h> 22ab3f0063SJakub Kicinski #include <linux/printk.h> 23675fc275SJakub Kicinski #include <linux/proc_ns.h> 24ab3f0063SJakub Kicinski #include <linux/rtnetlink.h> 25e0d3974aSJakub Kicinski #include <linux/rwsem.h> 26ab3f0063SJakub Kicinski 27a3884572SJakub Kicinski /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members 28a3884572SJakub Kicinski * of all progs. 29e0d3974aSJakub Kicinski * RTNL lock cannot be taken when holding this lock. 30e0d3974aSJakub Kicinski */ 31e0d3974aSJakub Kicinski static DECLARE_RWSEM(bpf_devs_lock); 32ab3f0063SJakub Kicinski static LIST_HEAD(bpf_prog_offload_devs); 33a3884572SJakub Kicinski static LIST_HEAD(bpf_map_offload_devs); 34ab3f0063SJakub Kicinski 355bc2d55cSJakub Kicinski static int bpf_dev_offload_check(struct net_device *netdev) 365bc2d55cSJakub Kicinski { 375bc2d55cSJakub Kicinski if (!netdev) 385bc2d55cSJakub Kicinski return -EINVAL; 395bc2d55cSJakub Kicinski if (!netdev->netdev_ops->ndo_bpf) 405bc2d55cSJakub Kicinski return -EOPNOTSUPP; 415bc2d55cSJakub Kicinski return 0; 425bc2d55cSJakub Kicinski } 435bc2d55cSJakub Kicinski 44ab3f0063SJakub Kicinski int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) 45ab3f0063SJakub Kicinski { 460a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 475bc2d55cSJakub Kicinski int err; 48ab3f0063SJakub Kicinski 49649f11dcSJakub Kicinski if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 50649f11dcSJakub Kicinski attr->prog_type != BPF_PROG_TYPE_XDP) 51649f11dcSJakub Kicinski return -EINVAL; 52ab3f0063SJakub Kicinski 53ab3f0063SJakub Kicinski if (attr->prog_flags) 54ab3f0063SJakub Kicinski return -EINVAL; 55ab3f0063SJakub Kicinski 56ab3f0063SJakub Kicinski offload = kzalloc(sizeof(*offload), GFP_USER); 57ab3f0063SJakub Kicinski if (!offload) 58ab3f0063SJakub Kicinski return -ENOMEM; 59ab3f0063SJakub Kicinski 60ab3f0063SJakub Kicinski offload->prog = prog; 61ab3f0063SJakub Kicinski 62e0d3974aSJakub Kicinski offload->netdev = dev_get_by_index(current->nsproxy->net_ns, 63e0d3974aSJakub Kicinski attr->prog_ifindex); 645bc2d55cSJakub Kicinski err = bpf_dev_offload_check(offload->netdev); 655bc2d55cSJakub Kicinski if (err) 665bc2d55cSJakub Kicinski goto err_maybe_put; 67ab3f0063SJakub Kicinski 68e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 695bc2d55cSJakub Kicinski if (offload->netdev->reg_state != NETREG_REGISTERED) { 705bc2d55cSJakub Kicinski err = -EINVAL; 71e0d3974aSJakub Kicinski goto err_unlock; 725bc2d55cSJakub Kicinski } 73ab3f0063SJakub Kicinski prog->aux->offload = offload; 74ab3f0063SJakub Kicinski list_add_tail(&offload->offloads, &bpf_prog_offload_devs); 75e0d3974aSJakub Kicinski dev_put(offload->netdev); 76e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 77ab3f0063SJakub Kicinski 78ab3f0063SJakub Kicinski return 0; 79e0d3974aSJakub Kicinski err_unlock: 80e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 815bc2d55cSJakub Kicinski err_maybe_put: 825bc2d55cSJakub Kicinski if (offload->netdev) 83e0d3974aSJakub Kicinski dev_put(offload->netdev); 84e0d3974aSJakub Kicinski kfree(offload); 855bc2d55cSJakub Kicinski return err; 86ab3f0063SJakub Kicinski } 87ab3f0063SJakub Kicinski 88ab3f0063SJakub Kicinski static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, 89ab3f0063SJakub Kicinski struct netdev_bpf *data) 90ab3f0063SJakub Kicinski { 910a9c1991SJakub Kicinski struct bpf_prog_offload *offload = prog->aux->offload; 92ce3b9db4SJakub Kicinski struct net_device *netdev; 93ab3f0063SJakub Kicinski 94ab3f0063SJakub Kicinski ASSERT_RTNL(); 95ab3f0063SJakub Kicinski 96ce3b9db4SJakub Kicinski if (!offload) 97ab3f0063SJakub Kicinski return -ENODEV; 98ce3b9db4SJakub Kicinski netdev = offload->netdev; 99ab3f0063SJakub Kicinski 100ab3f0063SJakub Kicinski data->command = cmd; 101ab3f0063SJakub Kicinski 102ab3f0063SJakub Kicinski return netdev->netdev_ops->ndo_bpf(netdev, data); 103ab3f0063SJakub Kicinski } 104ab3f0063SJakub Kicinski 105ab3f0063SJakub Kicinski int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 106ab3f0063SJakub Kicinski { 107ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 108ab3f0063SJakub Kicinski int err; 109ab3f0063SJakub Kicinski 110ab3f0063SJakub Kicinski data.verifier.prog = env->prog; 111ab3f0063SJakub Kicinski 112ab3f0063SJakub Kicinski rtnl_lock(); 113ab3f0063SJakub Kicinski err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data); 114ab3f0063SJakub Kicinski if (err) 115ab3f0063SJakub Kicinski goto exit_unlock; 116ab3f0063SJakub Kicinski 117cae1927cSJakub Kicinski env->prog->aux->offload->dev_ops = data.verifier.ops; 118ab3f0063SJakub Kicinski env->prog->aux->offload->dev_state = true; 119ab3f0063SJakub Kicinski exit_unlock: 120ab3f0063SJakub Kicinski rtnl_unlock(); 121ab3f0063SJakub Kicinski return err; 122ab3f0063SJakub Kicinski } 123ab3f0063SJakub Kicinski 124cae1927cSJakub Kicinski int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 125cae1927cSJakub Kicinski int insn_idx, int prev_insn_idx) 126cae1927cSJakub Kicinski { 1270a9c1991SJakub Kicinski struct bpf_prog_offload *offload; 128cae1927cSJakub Kicinski int ret = -ENODEV; 129cae1927cSJakub Kicinski 130cae1927cSJakub Kicinski down_read(&bpf_devs_lock); 131cae1927cSJakub Kicinski offload = env->prog->aux->offload; 132ce3b9db4SJakub Kicinski if (offload) 133cae1927cSJakub Kicinski ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); 134cae1927cSJakub Kicinski up_read(&bpf_devs_lock); 135cae1927cSJakub Kicinski 136cae1927cSJakub Kicinski return ret; 137cae1927cSJakub Kicinski } 138cae1927cSJakub Kicinski 139ab3f0063SJakub Kicinski static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 140ab3f0063SJakub Kicinski { 1410a9c1991SJakub Kicinski struct bpf_prog_offload *offload = prog->aux->offload; 142ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 143ab3f0063SJakub Kicinski 144ab3f0063SJakub Kicinski data.offload.prog = prog; 145ab3f0063SJakub Kicinski 146ab3f0063SJakub Kicinski if (offload->dev_state) 147ab3f0063SJakub Kicinski WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); 148ab3f0063SJakub Kicinski 149ad8ad79fSJakub Kicinski /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 150ad8ad79fSJakub Kicinski bpf_prog_free_id(prog, true); 151ad8ad79fSJakub Kicinski 152ab3f0063SJakub Kicinski list_del_init(&offload->offloads); 153ce3b9db4SJakub Kicinski kfree(offload); 154ce3b9db4SJakub Kicinski prog->aux->offload = NULL; 155ab3f0063SJakub Kicinski } 156ab3f0063SJakub Kicinski 157ab3f0063SJakub Kicinski void bpf_prog_offload_destroy(struct bpf_prog *prog) 158ab3f0063SJakub Kicinski { 159ab3f0063SJakub Kicinski rtnl_lock(); 160e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 161ce3b9db4SJakub Kicinski if (prog->aux->offload) 162ab3f0063SJakub Kicinski __bpf_prog_offload_destroy(prog); 163e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 164ab3f0063SJakub Kicinski rtnl_unlock(); 165ab3f0063SJakub Kicinski } 166ab3f0063SJakub Kicinski 167ab3f0063SJakub Kicinski static int bpf_prog_offload_translate(struct bpf_prog *prog) 168ab3f0063SJakub Kicinski { 169ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 170ab3f0063SJakub Kicinski int ret; 171ab3f0063SJakub Kicinski 172ab3f0063SJakub Kicinski data.offload.prog = prog; 173ab3f0063SJakub Kicinski 174ab3f0063SJakub Kicinski rtnl_lock(); 175ab3f0063SJakub Kicinski ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); 176ab3f0063SJakub Kicinski rtnl_unlock(); 177ab3f0063SJakub Kicinski 178ab3f0063SJakub Kicinski return ret; 179ab3f0063SJakub Kicinski } 180ab3f0063SJakub Kicinski 181ab3f0063SJakub Kicinski static unsigned int bpf_prog_warn_on_exec(const void *ctx, 182ab3f0063SJakub Kicinski const struct bpf_insn *insn) 183ab3f0063SJakub Kicinski { 184ab3f0063SJakub Kicinski WARN(1, "attempt to execute device eBPF program on the host!"); 185ab3f0063SJakub Kicinski return 0; 186ab3f0063SJakub Kicinski } 187ab3f0063SJakub Kicinski 188ab3f0063SJakub Kicinski int bpf_prog_offload_compile(struct bpf_prog *prog) 189ab3f0063SJakub Kicinski { 190ab3f0063SJakub Kicinski prog->bpf_func = bpf_prog_warn_on_exec; 191ab3f0063SJakub Kicinski 192ab3f0063SJakub Kicinski return bpf_prog_offload_translate(prog); 193ab3f0063SJakub Kicinski } 194ab3f0063SJakub Kicinski 195675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args { 196675fc275SJakub Kicinski struct bpf_prog *prog; 197675fc275SJakub Kicinski struct bpf_prog_info *info; 198675fc275SJakub Kicinski }; 199675fc275SJakub Kicinski 200675fc275SJakub Kicinski static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) 201675fc275SJakub Kicinski { 202675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args *args = private_data; 203675fc275SJakub Kicinski struct bpf_prog_aux *aux = args->prog->aux; 204675fc275SJakub Kicinski struct ns_common *ns; 205675fc275SJakub Kicinski struct net *net; 206675fc275SJakub Kicinski 207675fc275SJakub Kicinski rtnl_lock(); 208675fc275SJakub Kicinski down_read(&bpf_devs_lock); 209675fc275SJakub Kicinski 210675fc275SJakub Kicinski if (aux->offload) { 211675fc275SJakub Kicinski args->info->ifindex = aux->offload->netdev->ifindex; 212675fc275SJakub Kicinski net = dev_net(aux->offload->netdev); 213675fc275SJakub Kicinski get_net(net); 214675fc275SJakub Kicinski ns = &net->ns; 215675fc275SJakub Kicinski } else { 216675fc275SJakub Kicinski args->info->ifindex = 0; 217675fc275SJakub Kicinski ns = NULL; 218675fc275SJakub Kicinski } 219675fc275SJakub Kicinski 220675fc275SJakub Kicinski up_read(&bpf_devs_lock); 221675fc275SJakub Kicinski rtnl_unlock(); 222675fc275SJakub Kicinski 223675fc275SJakub Kicinski return ns; 224675fc275SJakub Kicinski } 225675fc275SJakub Kicinski 226675fc275SJakub Kicinski int bpf_prog_offload_info_fill(struct bpf_prog_info *info, 227675fc275SJakub Kicinski struct bpf_prog *prog) 228675fc275SJakub Kicinski { 229675fc275SJakub Kicinski struct ns_get_path_bpf_prog_args args = { 230675fc275SJakub Kicinski .prog = prog, 231675fc275SJakub Kicinski .info = info, 232675fc275SJakub Kicinski }; 233*fcfb126dSJiong Wang struct bpf_prog_aux *aux = prog->aux; 234675fc275SJakub Kicinski struct inode *ns_inode; 235675fc275SJakub Kicinski struct path ns_path; 236*fcfb126dSJiong Wang char __user *uinsns; 237675fc275SJakub Kicinski void *res; 238*fcfb126dSJiong Wang u32 ulen; 239675fc275SJakub Kicinski 240675fc275SJakub Kicinski res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); 241675fc275SJakub Kicinski if (IS_ERR(res)) { 242675fc275SJakub Kicinski if (!info->ifindex) 243675fc275SJakub Kicinski return -ENODEV; 244675fc275SJakub Kicinski return PTR_ERR(res); 245675fc275SJakub Kicinski } 246675fc275SJakub Kicinski 247*fcfb126dSJiong Wang down_read(&bpf_devs_lock); 248*fcfb126dSJiong Wang 249*fcfb126dSJiong Wang if (!aux->offload) { 250*fcfb126dSJiong Wang up_read(&bpf_devs_lock); 251*fcfb126dSJiong Wang return -ENODEV; 252*fcfb126dSJiong Wang } 253*fcfb126dSJiong Wang 254*fcfb126dSJiong Wang ulen = info->jited_prog_len; 255*fcfb126dSJiong Wang info->jited_prog_len = aux->offload->jited_len; 256*fcfb126dSJiong Wang if (info->jited_prog_len & ulen) { 257*fcfb126dSJiong Wang uinsns = u64_to_user_ptr(info->jited_prog_insns); 258*fcfb126dSJiong Wang ulen = min_t(u32, info->jited_prog_len, ulen); 259*fcfb126dSJiong Wang if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { 260*fcfb126dSJiong Wang up_read(&bpf_devs_lock); 261*fcfb126dSJiong Wang return -EFAULT; 262*fcfb126dSJiong Wang } 263*fcfb126dSJiong Wang } 264*fcfb126dSJiong Wang 265*fcfb126dSJiong Wang up_read(&bpf_devs_lock); 266*fcfb126dSJiong Wang 267675fc275SJakub Kicinski ns_inode = ns_path.dentry->d_inode; 268675fc275SJakub Kicinski info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); 269675fc275SJakub Kicinski info->netns_ino = ns_inode->i_ino; 270675fc275SJakub Kicinski path_put(&ns_path); 271675fc275SJakub Kicinski 272675fc275SJakub Kicinski return 0; 273675fc275SJakub Kicinski } 274675fc275SJakub Kicinski 275ab3f0063SJakub Kicinski const struct bpf_prog_ops bpf_offload_prog_ops = { 276ab3f0063SJakub Kicinski }; 277ab3f0063SJakub Kicinski 278a3884572SJakub Kicinski static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap, 279a3884572SJakub Kicinski enum bpf_netdev_command cmd) 280a3884572SJakub Kicinski { 281a3884572SJakub Kicinski struct netdev_bpf data = {}; 282a3884572SJakub Kicinski struct net_device *netdev; 283a3884572SJakub Kicinski 284a3884572SJakub Kicinski ASSERT_RTNL(); 285a3884572SJakub Kicinski 286a3884572SJakub Kicinski data.command = cmd; 287a3884572SJakub Kicinski data.offmap = offmap; 288a3884572SJakub Kicinski /* Caller must make sure netdev is valid */ 289a3884572SJakub Kicinski netdev = offmap->netdev; 290a3884572SJakub Kicinski 291a3884572SJakub Kicinski return netdev->netdev_ops->ndo_bpf(netdev, &data); 292a3884572SJakub Kicinski } 293a3884572SJakub Kicinski 294a3884572SJakub Kicinski struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 295a3884572SJakub Kicinski { 296a3884572SJakub Kicinski struct net *net = current->nsproxy->net_ns; 297a3884572SJakub Kicinski struct bpf_offloaded_map *offmap; 298a3884572SJakub Kicinski int err; 299a3884572SJakub Kicinski 300a3884572SJakub Kicinski if (!capable(CAP_SYS_ADMIN)) 301a3884572SJakub Kicinski return ERR_PTR(-EPERM); 302a3884572SJakub Kicinski if (attr->map_type != BPF_MAP_TYPE_HASH) 303a3884572SJakub Kicinski return ERR_PTR(-EINVAL); 304a3884572SJakub Kicinski 305a3884572SJakub Kicinski offmap = kzalloc(sizeof(*offmap), GFP_USER); 306a3884572SJakub Kicinski if (!offmap) 307a3884572SJakub Kicinski return ERR_PTR(-ENOMEM); 308a3884572SJakub Kicinski 309a3884572SJakub Kicinski bpf_map_init_from_attr(&offmap->map, attr); 310a3884572SJakub Kicinski 311a3884572SJakub Kicinski rtnl_lock(); 312a3884572SJakub Kicinski down_write(&bpf_devs_lock); 313a3884572SJakub Kicinski offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); 314a3884572SJakub Kicinski err = bpf_dev_offload_check(offmap->netdev); 315a3884572SJakub Kicinski if (err) 316a3884572SJakub Kicinski goto err_unlock; 317a3884572SJakub Kicinski 318a3884572SJakub Kicinski err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC); 319a3884572SJakub Kicinski if (err) 320a3884572SJakub Kicinski goto err_unlock; 321a3884572SJakub Kicinski 322a3884572SJakub Kicinski list_add_tail(&offmap->offloads, &bpf_map_offload_devs); 323a3884572SJakub Kicinski up_write(&bpf_devs_lock); 324a3884572SJakub Kicinski rtnl_unlock(); 325a3884572SJakub Kicinski 326a3884572SJakub Kicinski return &offmap->map; 327a3884572SJakub Kicinski 328a3884572SJakub Kicinski err_unlock: 329a3884572SJakub Kicinski up_write(&bpf_devs_lock); 330a3884572SJakub Kicinski rtnl_unlock(); 331a3884572SJakub Kicinski kfree(offmap); 332a3884572SJakub Kicinski return ERR_PTR(err); 333a3884572SJakub Kicinski } 334a3884572SJakub Kicinski 335a3884572SJakub Kicinski static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) 336a3884572SJakub Kicinski { 337a3884572SJakub Kicinski WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); 338a3884572SJakub Kicinski /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ 339a3884572SJakub Kicinski bpf_map_free_id(&offmap->map, true); 340a3884572SJakub Kicinski list_del_init(&offmap->offloads); 341a3884572SJakub Kicinski offmap->netdev = NULL; 342a3884572SJakub Kicinski } 343a3884572SJakub Kicinski 344a3884572SJakub Kicinski void bpf_map_offload_map_free(struct bpf_map *map) 345a3884572SJakub Kicinski { 346a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 347a3884572SJakub Kicinski 348a3884572SJakub Kicinski rtnl_lock(); 349a3884572SJakub Kicinski down_write(&bpf_devs_lock); 350a3884572SJakub Kicinski if (offmap->netdev) 351a3884572SJakub Kicinski __bpf_map_offload_destroy(offmap); 352a3884572SJakub Kicinski up_write(&bpf_devs_lock); 353a3884572SJakub Kicinski rtnl_unlock(); 354a3884572SJakub Kicinski 355a3884572SJakub Kicinski kfree(offmap); 356a3884572SJakub Kicinski } 357a3884572SJakub Kicinski 358a3884572SJakub Kicinski int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) 359a3884572SJakub Kicinski { 360a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 361a3884572SJakub Kicinski int ret = -ENODEV; 362a3884572SJakub Kicinski 363a3884572SJakub Kicinski down_read(&bpf_devs_lock); 364a3884572SJakub Kicinski if (offmap->netdev) 365a3884572SJakub Kicinski ret = offmap->dev_ops->map_lookup_elem(offmap, key, value); 366a3884572SJakub Kicinski up_read(&bpf_devs_lock); 367a3884572SJakub Kicinski 368a3884572SJakub Kicinski return ret; 369a3884572SJakub Kicinski } 370a3884572SJakub Kicinski 371a3884572SJakub Kicinski int bpf_map_offload_update_elem(struct bpf_map *map, 372a3884572SJakub Kicinski void *key, void *value, u64 flags) 373a3884572SJakub Kicinski { 374a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 375a3884572SJakub Kicinski int ret = -ENODEV; 376a3884572SJakub Kicinski 377a3884572SJakub Kicinski if (unlikely(flags > BPF_EXIST)) 378a3884572SJakub Kicinski return -EINVAL; 379a3884572SJakub Kicinski 380a3884572SJakub Kicinski down_read(&bpf_devs_lock); 381a3884572SJakub Kicinski if (offmap->netdev) 382a3884572SJakub Kicinski ret = offmap->dev_ops->map_update_elem(offmap, key, value, 383a3884572SJakub Kicinski flags); 384a3884572SJakub Kicinski up_read(&bpf_devs_lock); 385a3884572SJakub Kicinski 386a3884572SJakub Kicinski return ret; 387a3884572SJakub Kicinski } 388a3884572SJakub Kicinski 389a3884572SJakub Kicinski int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) 390a3884572SJakub Kicinski { 391a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 392a3884572SJakub Kicinski int ret = -ENODEV; 393a3884572SJakub Kicinski 394a3884572SJakub Kicinski down_read(&bpf_devs_lock); 395a3884572SJakub Kicinski if (offmap->netdev) 396a3884572SJakub Kicinski ret = offmap->dev_ops->map_delete_elem(offmap, key); 397a3884572SJakub Kicinski up_read(&bpf_devs_lock); 398a3884572SJakub Kicinski 399a3884572SJakub Kicinski return ret; 400a3884572SJakub Kicinski } 401a3884572SJakub Kicinski 402a3884572SJakub Kicinski int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) 403a3884572SJakub Kicinski { 404a3884572SJakub Kicinski struct bpf_offloaded_map *offmap = map_to_offmap(map); 405a3884572SJakub Kicinski int ret = -ENODEV; 406a3884572SJakub Kicinski 407a3884572SJakub Kicinski down_read(&bpf_devs_lock); 408a3884572SJakub Kicinski if (offmap->netdev) 409a3884572SJakub Kicinski ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key); 410a3884572SJakub Kicinski up_read(&bpf_devs_lock); 411a3884572SJakub Kicinski 412a3884572SJakub Kicinski return ret; 413a3884572SJakub Kicinski } 414a3884572SJakub Kicinski 415a3884572SJakub Kicinski bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map) 416a3884572SJakub Kicinski { 417a3884572SJakub Kicinski struct bpf_offloaded_map *offmap; 418a3884572SJakub Kicinski struct bpf_prog_offload *offload; 419a3884572SJakub Kicinski bool ret; 420a3884572SJakub Kicinski 4210a2d28ffSJakub Kicinski if (!bpf_prog_is_dev_bound(prog->aux) || !bpf_map_is_dev_bound(map)) 422a3884572SJakub Kicinski return false; 423a3884572SJakub Kicinski 424a3884572SJakub Kicinski down_read(&bpf_devs_lock); 425a3884572SJakub Kicinski offload = prog->aux->offload; 426a3884572SJakub Kicinski offmap = map_to_offmap(map); 427a3884572SJakub Kicinski 428a3884572SJakub Kicinski ret = offload && offload->netdev == offmap->netdev; 429a3884572SJakub Kicinski up_read(&bpf_devs_lock); 430a3884572SJakub Kicinski 431a3884572SJakub Kicinski return ret; 432a3884572SJakub Kicinski } 433a3884572SJakub Kicinski 434a3884572SJakub Kicinski static void bpf_offload_orphan_all_progs(struct net_device *netdev) 435a3884572SJakub Kicinski { 436a3884572SJakub Kicinski struct bpf_prog_offload *offload, *tmp; 437a3884572SJakub Kicinski 438a3884572SJakub Kicinski list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) 439a3884572SJakub Kicinski if (offload->netdev == netdev) 440a3884572SJakub Kicinski __bpf_prog_offload_destroy(offload->prog); 441a3884572SJakub Kicinski } 442a3884572SJakub Kicinski 443a3884572SJakub Kicinski static void bpf_offload_orphan_all_maps(struct net_device *netdev) 444a3884572SJakub Kicinski { 445a3884572SJakub Kicinski struct bpf_offloaded_map *offmap, *tmp; 446a3884572SJakub Kicinski 447a3884572SJakub Kicinski list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads) 448a3884572SJakub Kicinski if (offmap->netdev == netdev) 449a3884572SJakub Kicinski __bpf_map_offload_destroy(offmap); 450a3884572SJakub Kicinski } 451a3884572SJakub Kicinski 452ab3f0063SJakub Kicinski static int bpf_offload_notification(struct notifier_block *notifier, 453ab3f0063SJakub Kicinski ulong event, void *ptr) 454ab3f0063SJakub Kicinski { 455ab3f0063SJakub Kicinski struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 456ab3f0063SJakub Kicinski 457ab3f0063SJakub Kicinski ASSERT_RTNL(); 458ab3f0063SJakub Kicinski 459ab3f0063SJakub Kicinski switch (event) { 460ab3f0063SJakub Kicinski case NETDEV_UNREGISTER: 46162c71b45SJakub Kicinski /* ignore namespace changes */ 46262c71b45SJakub Kicinski if (netdev->reg_state != NETREG_UNREGISTERING) 46362c71b45SJakub Kicinski break; 46462c71b45SJakub Kicinski 465e0d3974aSJakub Kicinski down_write(&bpf_devs_lock); 466a3884572SJakub Kicinski bpf_offload_orphan_all_progs(netdev); 467a3884572SJakub Kicinski bpf_offload_orphan_all_maps(netdev); 468e0d3974aSJakub Kicinski up_write(&bpf_devs_lock); 469ab3f0063SJakub Kicinski break; 470ab3f0063SJakub Kicinski default: 471ab3f0063SJakub Kicinski break; 472ab3f0063SJakub Kicinski } 473ab3f0063SJakub Kicinski return NOTIFY_OK; 474ab3f0063SJakub Kicinski } 475ab3f0063SJakub Kicinski 476ab3f0063SJakub Kicinski static struct notifier_block bpf_offload_notifier = { 477ab3f0063SJakub Kicinski .notifier_call = bpf_offload_notification, 478ab3f0063SJakub Kicinski }; 479ab3f0063SJakub Kicinski 480ab3f0063SJakub Kicinski static int __init bpf_offload_init(void) 481ab3f0063SJakub Kicinski { 482ab3f0063SJakub Kicinski register_netdevice_notifier(&bpf_offload_notifier); 483ab3f0063SJakub Kicinski return 0; 484ab3f0063SJakub Kicinski } 485ab3f0063SJakub Kicinski 486ab3f0063SJakub Kicinski subsys_initcall(bpf_offload_init); 487