xref: /openbmc/linux/kernel/bpf/offload.c (revision ce3b9db4db0e0e2b9761c56d08615ea0159e4a1b)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree.
7  *
8  * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9  * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10  * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11  * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12  * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14  */
15 
16 #include <linux/bpf.h>
17 #include <linux/bpf_verifier.h>
18 #include <linux/bug.h>
19 #include <linux/list.h>
20 #include <linux/netdevice.h>
21 #include <linux/printk.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rwsem.h>
24 
25 /* Protects bpf_prog_offload_devs and offload members of all progs.
26  * RTNL lock cannot be taken when holding this lock.
27  */
28 static DECLARE_RWSEM(bpf_devs_lock);
29 static LIST_HEAD(bpf_prog_offload_devs);
30 
31 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
32 {
33 	struct bpf_dev_offload *offload;
34 
35 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
36 	    attr->prog_type != BPF_PROG_TYPE_XDP)
37 		return -EINVAL;
38 
39 	if (attr->prog_flags)
40 		return -EINVAL;
41 
42 	offload = kzalloc(sizeof(*offload), GFP_USER);
43 	if (!offload)
44 		return -ENOMEM;
45 
46 	offload->prog = prog;
47 
48 	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
49 					   attr->prog_ifindex);
50 	if (!offload->netdev)
51 		goto err_free;
52 
53 	down_write(&bpf_devs_lock);
54 	if (offload->netdev->reg_state != NETREG_REGISTERED)
55 		goto err_unlock;
56 	prog->aux->offload = offload;
57 	list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
58 	dev_put(offload->netdev);
59 	up_write(&bpf_devs_lock);
60 
61 	return 0;
62 err_unlock:
63 	up_write(&bpf_devs_lock);
64 	dev_put(offload->netdev);
65 err_free:
66 	kfree(offload);
67 	return -EINVAL;
68 }
69 
70 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
71 			     struct netdev_bpf *data)
72 {
73 	struct bpf_dev_offload *offload = prog->aux->offload;
74 	struct net_device *netdev;
75 
76 	ASSERT_RTNL();
77 
78 	if (!offload)
79 		return -ENODEV;
80 	netdev = offload->netdev;
81 	if (!netdev->netdev_ops->ndo_bpf)
82 		return -EOPNOTSUPP;
83 
84 	data->command = cmd;
85 
86 	return netdev->netdev_ops->ndo_bpf(netdev, data);
87 }
88 
89 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
90 {
91 	struct netdev_bpf data = {};
92 	int err;
93 
94 	data.verifier.prog = env->prog;
95 
96 	rtnl_lock();
97 	err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
98 	if (err)
99 		goto exit_unlock;
100 
101 	env->prog->aux->offload->dev_ops = data.verifier.ops;
102 	env->prog->aux->offload->dev_state = true;
103 exit_unlock:
104 	rtnl_unlock();
105 	return err;
106 }
107 
108 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
109 				 int insn_idx, int prev_insn_idx)
110 {
111 	struct bpf_dev_offload *offload;
112 	int ret = -ENODEV;
113 
114 	down_read(&bpf_devs_lock);
115 	offload = env->prog->aux->offload;
116 	if (offload)
117 		ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
118 	up_read(&bpf_devs_lock);
119 
120 	return ret;
121 }
122 
123 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
124 {
125 	struct bpf_dev_offload *offload = prog->aux->offload;
126 	struct netdev_bpf data = {};
127 
128 	data.offload.prog = prog;
129 
130 	if (offload->dev_state)
131 		WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
132 
133 	list_del_init(&offload->offloads);
134 	kfree(offload);
135 	prog->aux->offload = NULL;
136 }
137 
138 void bpf_prog_offload_destroy(struct bpf_prog *prog)
139 {
140 	rtnl_lock();
141 	down_write(&bpf_devs_lock);
142 	if (prog->aux->offload)
143 		__bpf_prog_offload_destroy(prog);
144 	up_write(&bpf_devs_lock);
145 	rtnl_unlock();
146 }
147 
148 static int bpf_prog_offload_translate(struct bpf_prog *prog)
149 {
150 	struct netdev_bpf data = {};
151 	int ret;
152 
153 	data.offload.prog = prog;
154 
155 	rtnl_lock();
156 	ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
157 	rtnl_unlock();
158 
159 	return ret;
160 }
161 
162 static unsigned int bpf_prog_warn_on_exec(const void *ctx,
163 					  const struct bpf_insn *insn)
164 {
165 	WARN(1, "attempt to execute device eBPF program on the host!");
166 	return 0;
167 }
168 
169 int bpf_prog_offload_compile(struct bpf_prog *prog)
170 {
171 	prog->bpf_func = bpf_prog_warn_on_exec;
172 
173 	return bpf_prog_offload_translate(prog);
174 }
175 
176 const struct bpf_prog_ops bpf_offload_prog_ops = {
177 };
178 
179 static int bpf_offload_notification(struct notifier_block *notifier,
180 				    ulong event, void *ptr)
181 {
182 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
183 	struct bpf_dev_offload *offload, *tmp;
184 
185 	ASSERT_RTNL();
186 
187 	switch (event) {
188 	case NETDEV_UNREGISTER:
189 		/* ignore namespace changes */
190 		if (netdev->reg_state != NETREG_UNREGISTERING)
191 			break;
192 
193 		down_write(&bpf_devs_lock);
194 		list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
195 					 offloads) {
196 			if (offload->netdev == netdev)
197 				__bpf_prog_offload_destroy(offload->prog);
198 		}
199 		up_write(&bpf_devs_lock);
200 		break;
201 	default:
202 		break;
203 	}
204 	return NOTIFY_OK;
205 }
206 
207 static struct notifier_block bpf_offload_notifier = {
208 	.notifier_call = bpf_offload_notification,
209 };
210 
211 static int __init bpf_offload_init(void)
212 {
213 	register_netdevice_notifier(&bpf_offload_notifier);
214 	return 0;
215 }
216 
217 subsys_initcall(bpf_offload_init);
218