offload.c (3d76a4d3d4e591af3e789698affaad88a5a8e8ab) offload.c (fd7c211d6875013f81acc09868effe199b5d2c0c)
1/*
2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"

--- 173 unchanged lines hidden (view full) ---

182 __bpf_map_offload_destroy(offmap);
183 }
184
185 WARN_ON(!list_empty(&ondev->progs));
186 WARN_ON(!list_empty(&ondev->maps));
187 kfree(ondev);
188}
189
1/*
2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"

--- 173 unchanged lines hidden (view full) ---

182 __bpf_map_offload_destroy(offmap);
183 }
184
185 WARN_ON(!list_empty(&ondev->progs));
186 WARN_ON(!list_empty(&ondev->maps));
187 kfree(ondev);
188}
189
190int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
190static int __bpf_prog_dev_bound_init(struct bpf_prog *prog, struct net_device *netdev)
191{
192 struct bpf_offload_netdev *ondev;
193 struct bpf_prog_offload *offload;
194 int err;
195
191{
192 struct bpf_offload_netdev *ondev;
193 struct bpf_prog_offload *offload;
194 int err;
195
196 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
197 attr->prog_type != BPF_PROG_TYPE_XDP)
198 return -EINVAL;
199
200 if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
201 return -EINVAL;
202
203 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
204 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
205 return -EINVAL;
206
207 offload = kzalloc(sizeof(*offload), GFP_USER);
208 if (!offload)
209 return -ENOMEM;
210
211 offload->prog = prog;
196 offload = kzalloc(sizeof(*offload), GFP_USER);
197 if (!offload)
198 return -ENOMEM;
199
200 offload->prog = prog;
201 offload->netdev = netdev;
212
202
213 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
214 attr->prog_ifindex);
215 err = bpf_dev_offload_check(offload->netdev);
216 if (err)
217 goto err_maybe_put;
218
219 prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
220
221 down_write(&bpf_devs_lock);
222 ondev = bpf_offload_find_netdev(offload->netdev);
223 if (!ondev) {
224 if (bpf_prog_is_offloaded(prog->aux)) {
225 err = -EINVAL;
203 ondev = bpf_offload_find_netdev(offload->netdev);
204 if (!ondev) {
205 if (bpf_prog_is_offloaded(prog->aux)) {
206 err = -EINVAL;
226 goto err_unlock;
207 goto err_free;
227 }
228
229 /* When only binding to the device, explicitly
230 * create an entry in the hashtable.
231 */
232 err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
233 if (err)
208 }
209
210 /* When only binding to the device, explicitly
211 * create an entry in the hashtable.
212 */
213 err = __bpf_offload_dev_netdev_register(NULL, offload->netdev);
214 if (err)
234 goto err_unlock;
215 goto err_free;
235 ondev = bpf_offload_find_netdev(offload->netdev);
236 }
237 offload->offdev = ondev->offdev;
238 prog->aux->offload = offload;
239 list_add_tail(&offload->offloads, &ondev->progs);
216 ondev = bpf_offload_find_netdev(offload->netdev);
217 }
218 offload->offdev = ondev->offdev;
219 prog->aux->offload = offload;
220 list_add_tail(&offload->offloads, &ondev->progs);
240 dev_put(offload->netdev);
241 up_write(&bpf_devs_lock);
242
243 return 0;
221
222 return 0;
244err_unlock:
245 up_write(&bpf_devs_lock);
246err_maybe_put:
247 if (offload->netdev)
248 dev_put(offload->netdev);
223err_free:
249 kfree(offload);
250 return err;
251}
252
224 kfree(offload);
225 return err;
226}
227
228int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr)
229{
230 struct net_device *netdev;
231 int err;
232
233 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
234 attr->prog_type != BPF_PROG_TYPE_XDP)
235 return -EINVAL;
236
237 if (attr->prog_flags & ~BPF_F_XDP_DEV_BOUND_ONLY)
238 return -EINVAL;
239
240 if (attr->prog_type == BPF_PROG_TYPE_SCHED_CLS &&
241 attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY)
242 return -EINVAL;
243
244 netdev = dev_get_by_index(current->nsproxy->net_ns, attr->prog_ifindex);
245 if (!netdev)
246 return -EINVAL;
247
248 err = bpf_dev_offload_check(netdev);
249 if (err)
250 goto out;
251
252 prog->aux->offload_requested = !(attr->prog_flags & BPF_F_XDP_DEV_BOUND_ONLY);
253
254 down_write(&bpf_devs_lock);
255 err = __bpf_prog_dev_bound_init(prog, netdev);
256 up_write(&bpf_devs_lock);
257
258out:
259 dev_put(netdev);
260 return err;
261}
262
263int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog)
264{
265 int err;
266
267 if (!bpf_prog_is_dev_bound(old_prog->aux))
268 return 0;
269
270 if (bpf_prog_is_offloaded(old_prog->aux))
271 return -EINVAL;
272
273 new_prog->aux->dev_bound = old_prog->aux->dev_bound;
274 new_prog->aux->offload_requested = old_prog->aux->offload_requested;
275
276 down_write(&bpf_devs_lock);
277 if (!old_prog->aux->offload) {
278 err = -EINVAL;
279 goto out;
280 }
281
282 err = __bpf_prog_dev_bound_init(new_prog, old_prog->aux->offload->netdev);
283
284out:
285 up_write(&bpf_devs_lock);
286 return err;
287}
288
253int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
254{
255 struct bpf_prog_offload *offload;
256 int ret = -ENODEV;
257
258 down_read(&bpf_devs_lock);
259 offload = prog->aux->offload;
260 if (offload) {

--- 409 unchanged lines hidden (view full) ---

670 down_read(&bpf_devs_lock);
671 ret = __bpf_offload_dev_match(prog, netdev);
672 up_read(&bpf_devs_lock);
673
674 return ret;
675}
676EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
677
289int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
290{
291 struct bpf_prog_offload *offload;
292 int ret = -ENODEV;
293
294 down_read(&bpf_devs_lock);
295 offload = prog->aux->offload;
296 if (offload) {

--- 409 unchanged lines hidden (view full) ---

706 down_read(&bpf_devs_lock);
707 ret = __bpf_offload_dev_match(prog, netdev);
708 up_read(&bpf_devs_lock);
709
710 return ret;
711}
712EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
713
714bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
715{
716 bool ret;
717
718 if (bpf_prog_is_offloaded(lhs->aux) != bpf_prog_is_offloaded(rhs->aux))
719 return false;
720
721 down_read(&bpf_devs_lock);
722 ret = lhs->aux->offload && rhs->aux->offload &&
723 lhs->aux->offload->netdev &&
724 lhs->aux->offload->netdev == rhs->aux->offload->netdev;
725 up_read(&bpf_devs_lock);
726
727 return ret;
728}
729
678bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
679{
680 struct bpf_offloaded_map *offmap;
681 bool ret;
682
683 if (!bpf_map_is_offloaded(map))
684 return bpf_map_offload_neutral(map);
685 offmap = map_to_offmap(map);

--- 122 unchanged lines hidden ---
730bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
731{
732 struct bpf_offloaded_map *offmap;
733 bool ret;
734
735 if (!bpf_map_is_offloaded(map))
736 return bpf_map_offload_neutral(map);
737 offmap = map_to_offmap(map);

--- 122 unchanged lines hidden ---