1ab3f0063SJakub Kicinski #include <linux/bpf.h> 2ab3f0063SJakub Kicinski #include <linux/bpf_verifier.h> 3ab3f0063SJakub Kicinski #include <linux/bug.h> 4ab3f0063SJakub Kicinski #include <linux/list.h> 5ab3f0063SJakub Kicinski #include <linux/netdevice.h> 6ab3f0063SJakub Kicinski #include <linux/printk.h> 7ab3f0063SJakub Kicinski #include <linux/rtnetlink.h> 8ab3f0063SJakub Kicinski 9ab3f0063SJakub Kicinski /* protected by RTNL */ 10ab3f0063SJakub Kicinski static LIST_HEAD(bpf_prog_offload_devs); 11ab3f0063SJakub Kicinski 12ab3f0063SJakub Kicinski int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) 13ab3f0063SJakub Kicinski { 14ab3f0063SJakub Kicinski struct net *net = current->nsproxy->net_ns; 15ab3f0063SJakub Kicinski struct bpf_dev_offload *offload; 16ab3f0063SJakub Kicinski 17649f11dcSJakub Kicinski if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 18649f11dcSJakub Kicinski attr->prog_type != BPF_PROG_TYPE_XDP) 19649f11dcSJakub Kicinski return -EINVAL; 20ab3f0063SJakub Kicinski 21ab3f0063SJakub Kicinski if (attr->prog_flags) 22ab3f0063SJakub Kicinski return -EINVAL; 23ab3f0063SJakub Kicinski 24ab3f0063SJakub Kicinski offload = kzalloc(sizeof(*offload), GFP_USER); 25ab3f0063SJakub Kicinski if (!offload) 26ab3f0063SJakub Kicinski return -ENOMEM; 27ab3f0063SJakub Kicinski 28ab3f0063SJakub Kicinski offload->prog = prog; 29ab3f0063SJakub Kicinski init_waitqueue_head(&offload->verifier_done); 30ab3f0063SJakub Kicinski 31ab3f0063SJakub Kicinski rtnl_lock(); 321f6f4cb7SJakub Kicinski offload->netdev = __dev_get_by_index(net, attr->prog_ifindex); 33ab3f0063SJakub Kicinski if (!offload->netdev) { 34ab3f0063SJakub Kicinski rtnl_unlock(); 35ab3f0063SJakub Kicinski kfree(offload); 36ab3f0063SJakub Kicinski return -EINVAL; 37ab3f0063SJakub Kicinski } 38ab3f0063SJakub Kicinski 39ab3f0063SJakub Kicinski prog->aux->offload = offload; 40ab3f0063SJakub Kicinski list_add_tail(&offload->offloads, &bpf_prog_offload_devs); 41ab3f0063SJakub Kicinski rtnl_unlock(); 42ab3f0063SJakub Kicinski 43ab3f0063SJakub Kicinski return 0; 44ab3f0063SJakub Kicinski } 45ab3f0063SJakub Kicinski 46ab3f0063SJakub Kicinski static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, 47ab3f0063SJakub Kicinski struct netdev_bpf *data) 48ab3f0063SJakub Kicinski { 49ab3f0063SJakub Kicinski struct net_device *netdev = prog->aux->offload->netdev; 50ab3f0063SJakub Kicinski 51ab3f0063SJakub Kicinski ASSERT_RTNL(); 52ab3f0063SJakub Kicinski 53ab3f0063SJakub Kicinski if (!netdev) 54ab3f0063SJakub Kicinski return -ENODEV; 55ab3f0063SJakub Kicinski if (!netdev->netdev_ops->ndo_bpf) 56ab3f0063SJakub Kicinski return -EOPNOTSUPP; 57ab3f0063SJakub Kicinski 58ab3f0063SJakub Kicinski data->command = cmd; 59ab3f0063SJakub Kicinski 60ab3f0063SJakub Kicinski return netdev->netdev_ops->ndo_bpf(netdev, data); 61ab3f0063SJakub Kicinski } 62ab3f0063SJakub Kicinski 63ab3f0063SJakub Kicinski int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 64ab3f0063SJakub Kicinski { 65ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 66ab3f0063SJakub Kicinski int err; 67ab3f0063SJakub Kicinski 68ab3f0063SJakub Kicinski data.verifier.prog = env->prog; 69ab3f0063SJakub Kicinski 70ab3f0063SJakub Kicinski rtnl_lock(); 71ab3f0063SJakub Kicinski err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data); 72ab3f0063SJakub Kicinski if (err) 73ab3f0063SJakub Kicinski goto exit_unlock; 74ab3f0063SJakub Kicinski 75ab3f0063SJakub Kicinski env->dev_ops = data.verifier.ops; 76ab3f0063SJakub Kicinski 77ab3f0063SJakub Kicinski env->prog->aux->offload->dev_state = true; 78ab3f0063SJakub Kicinski env->prog->aux->offload->verifier_running = true; 79ab3f0063SJakub Kicinski exit_unlock: 80ab3f0063SJakub Kicinski rtnl_unlock(); 81ab3f0063SJakub Kicinski return err; 82ab3f0063SJakub Kicinski } 83ab3f0063SJakub Kicinski 84ab3f0063SJakub Kicinski static void __bpf_prog_offload_destroy(struct bpf_prog *prog) 85ab3f0063SJakub Kicinski { 86ab3f0063SJakub Kicinski struct bpf_dev_offload *offload = prog->aux->offload; 87ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 88ab3f0063SJakub Kicinski 8913a9c48aSJakub Kicinski /* Caution - if netdev is destroyed before the program, this function 9013a9c48aSJakub Kicinski * will be called twice. 9113a9c48aSJakub Kicinski */ 9213a9c48aSJakub Kicinski 93ab3f0063SJakub Kicinski data.offload.prog = prog; 94ab3f0063SJakub Kicinski 95ab3f0063SJakub Kicinski if (offload->verifier_running) 96ab3f0063SJakub Kicinski wait_event(offload->verifier_done, !offload->verifier_running); 97ab3f0063SJakub Kicinski 98ab3f0063SJakub Kicinski if (offload->dev_state) 99ab3f0063SJakub Kicinski WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); 100ab3f0063SJakub Kicinski 101ab3f0063SJakub Kicinski offload->dev_state = false; 102ab3f0063SJakub Kicinski list_del_init(&offload->offloads); 103ab3f0063SJakub Kicinski offload->netdev = NULL; 104ab3f0063SJakub Kicinski } 105ab3f0063SJakub Kicinski 106ab3f0063SJakub Kicinski void bpf_prog_offload_destroy(struct bpf_prog *prog) 107ab3f0063SJakub Kicinski { 108ab3f0063SJakub Kicinski struct bpf_dev_offload *offload = prog->aux->offload; 109ab3f0063SJakub Kicinski 110ab3f0063SJakub Kicinski offload->verifier_running = false; 111ab3f0063SJakub Kicinski wake_up(&offload->verifier_done); 112ab3f0063SJakub Kicinski 113ab3f0063SJakub Kicinski rtnl_lock(); 114ab3f0063SJakub Kicinski __bpf_prog_offload_destroy(prog); 115ab3f0063SJakub Kicinski rtnl_unlock(); 116ab3f0063SJakub Kicinski 117ab3f0063SJakub Kicinski kfree(offload); 118ab3f0063SJakub Kicinski } 119ab3f0063SJakub Kicinski 120ab3f0063SJakub Kicinski static int bpf_prog_offload_translate(struct bpf_prog *prog) 121ab3f0063SJakub Kicinski { 122ab3f0063SJakub Kicinski struct bpf_dev_offload *offload = prog->aux->offload; 123ab3f0063SJakub Kicinski struct netdev_bpf data = {}; 124ab3f0063SJakub Kicinski int ret; 125ab3f0063SJakub Kicinski 126ab3f0063SJakub Kicinski data.offload.prog = prog; 127ab3f0063SJakub Kicinski 128ab3f0063SJakub Kicinski offload->verifier_running = false; 129ab3f0063SJakub Kicinski wake_up(&offload->verifier_done); 130ab3f0063SJakub Kicinski 131ab3f0063SJakub Kicinski rtnl_lock(); 132ab3f0063SJakub Kicinski ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); 133ab3f0063SJakub Kicinski rtnl_unlock(); 134ab3f0063SJakub Kicinski 135ab3f0063SJakub Kicinski return ret; 136ab3f0063SJakub Kicinski } 137ab3f0063SJakub Kicinski 138ab3f0063SJakub Kicinski static unsigned int bpf_prog_warn_on_exec(const void *ctx, 139ab3f0063SJakub Kicinski const struct bpf_insn *insn) 140ab3f0063SJakub Kicinski { 141ab3f0063SJakub Kicinski WARN(1, "attempt to execute device eBPF program on the host!"); 142ab3f0063SJakub Kicinski return 0; 143ab3f0063SJakub Kicinski } 144ab3f0063SJakub Kicinski 145ab3f0063SJakub Kicinski int bpf_prog_offload_compile(struct bpf_prog *prog) 146ab3f0063SJakub Kicinski { 147ab3f0063SJakub Kicinski prog->bpf_func = bpf_prog_warn_on_exec; 148ab3f0063SJakub Kicinski 149ab3f0063SJakub Kicinski return bpf_prog_offload_translate(prog); 150ab3f0063SJakub Kicinski } 151ab3f0063SJakub Kicinski 152bd601b6aSJakub Kicinski u32 bpf_prog_offload_ifindex(struct bpf_prog *prog) 153bd601b6aSJakub Kicinski { 154bd601b6aSJakub Kicinski struct bpf_dev_offload *offload = prog->aux->offload; 155bd601b6aSJakub Kicinski u32 ifindex; 156bd601b6aSJakub Kicinski 157bd601b6aSJakub Kicinski rtnl_lock(); 158bd601b6aSJakub Kicinski ifindex = offload->netdev ? offload->netdev->ifindex : 0; 159bd601b6aSJakub Kicinski rtnl_unlock(); 160bd601b6aSJakub Kicinski 161bd601b6aSJakub Kicinski return ifindex; 162bd601b6aSJakub Kicinski } 163bd601b6aSJakub Kicinski 164ab3f0063SJakub Kicinski const struct bpf_prog_ops bpf_offload_prog_ops = { 165ab3f0063SJakub Kicinski }; 166ab3f0063SJakub Kicinski 167ab3f0063SJakub Kicinski static int bpf_offload_notification(struct notifier_block *notifier, 168ab3f0063SJakub Kicinski ulong event, void *ptr) 169ab3f0063SJakub Kicinski { 170ab3f0063SJakub Kicinski struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 171ab3f0063SJakub Kicinski struct bpf_dev_offload *offload, *tmp; 172ab3f0063SJakub Kicinski 173ab3f0063SJakub Kicinski ASSERT_RTNL(); 174ab3f0063SJakub Kicinski 175ab3f0063SJakub Kicinski switch (event) { 176ab3f0063SJakub Kicinski case NETDEV_UNREGISTER: 177*62c71b45SJakub Kicinski /* ignore namespace changes */ 178*62c71b45SJakub Kicinski if (netdev->reg_state != NETREG_UNREGISTERING) 179*62c71b45SJakub Kicinski break; 180*62c71b45SJakub Kicinski 181ab3f0063SJakub Kicinski list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, 182ab3f0063SJakub Kicinski offloads) { 183ab3f0063SJakub Kicinski if (offload->netdev == netdev) 184ab3f0063SJakub Kicinski __bpf_prog_offload_destroy(offload->prog); 185ab3f0063SJakub Kicinski } 186ab3f0063SJakub Kicinski break; 187ab3f0063SJakub Kicinski default: 188ab3f0063SJakub Kicinski break; 189ab3f0063SJakub Kicinski } 190ab3f0063SJakub Kicinski return NOTIFY_OK; 191ab3f0063SJakub Kicinski } 192ab3f0063SJakub Kicinski 193ab3f0063SJakub Kicinski static struct notifier_block bpf_offload_notifier = { 194ab3f0063SJakub Kicinski .notifier_call = bpf_offload_notification, 195ab3f0063SJakub Kicinski }; 196ab3f0063SJakub Kicinski 197ab3f0063SJakub Kicinski static int __init bpf_offload_init(void) 198ab3f0063SJakub Kicinski { 199ab3f0063SJakub Kicinski register_netdevice_notifier(&bpf_offload_notifier); 200ab3f0063SJakub Kicinski return 0; 201ab3f0063SJakub Kicinski } 202ab3f0063SJakub Kicinski 203ab3f0063SJakub Kicinski subsys_initcall(bpf_offload_init); 204