xref: /openbmc/linux/net/sched/cls_bpf.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Berkeley Packet Filter based traffic classifier
4  *
5  * Might be used to classify traffic through flexible, user-defined and
6  * possibly JIT-ed BPF filters for traffic control as an alternative to
7  * ematches.
8  *
9  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/skbuff.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/idr.h>
18 
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/sock.h>
22 #include <net/tc_wrapper.h>
23 
24 MODULE_LICENSE("GPL");
25 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
26 MODULE_DESCRIPTION("TC BPF based classifier");
27 
28 #define CLS_BPF_NAME_LEN	256
29 #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
30 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
31 
32 struct cls_bpf_head {
33 	struct list_head plist;
34 	struct idr handle_idr;
35 	struct rcu_head rcu;
36 };
37 
38 struct cls_bpf_prog {
39 	struct bpf_prog *filter;
40 	struct list_head link;
41 	struct tcf_result res;
42 	bool exts_integrated;
43 	u32 gen_flags;
44 	unsigned int in_hw_count;
45 	struct tcf_exts exts;
46 	u32 handle;
47 	u16 bpf_num_ops;
48 	struct sock_filter *bpf_ops;
49 	const char *bpf_name;
50 	struct tcf_proto *tp;
51 	struct rcu_work rwork;
52 };
53 
54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
56 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
57 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
58 	[TCA_BPF_FD]		= { .type = NLA_U32 },
59 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
60 				    .len = CLS_BPF_NAME_LEN },
61 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
62 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
63 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
64 };
65 
66 static int cls_bpf_exec_opcode(int code)
67 {
68 	switch (code) {
69 	case TC_ACT_OK:
70 	case TC_ACT_SHOT:
71 	case TC_ACT_STOLEN:
72 	case TC_ACT_TRAP:
73 	case TC_ACT_REDIRECT:
74 	case TC_ACT_UNSPEC:
75 		return code;
76 	default:
77 		return TC_ACT_UNSPEC;
78 	}
79 }
80 
81 TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
82 				       const struct tcf_proto *tp,
83 				       struct tcf_result *res)
84 {
85 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 	bool at_ingress = skb_at_tc_ingress(skb);
87 	struct cls_bpf_prog *prog;
88 	int ret = -1;
89 
90 	list_for_each_entry_rcu(prog, &head->plist, link) {
91 		int filter_res;
92 
93 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94 
95 		if (tc_skip_sw(prog->gen_flags)) {
96 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 		} else if (at_ingress) {
98 			/* It is safe to push/pull even if skb_shared() */
99 			__skb_push(skb, skb->mac_len);
100 			bpf_compute_data_pointers(skb);
101 			filter_res = bpf_prog_run(prog->filter, skb);
102 			__skb_pull(skb, skb->mac_len);
103 		} else {
104 			bpf_compute_data_pointers(skb);
105 			filter_res = bpf_prog_run(prog->filter, skb);
106 		}
107 		if (unlikely(!skb->tstamp && skb->mono_delivery_time))
108 			skb->mono_delivery_time = 0;
109 
110 		if (prog->exts_integrated) {
111 			res->class   = 0;
112 			res->classid = TC_H_MAJ(prog->res.classid) |
113 				       qdisc_skb_cb(skb)->tc_classid;
114 
115 			ret = cls_bpf_exec_opcode(filter_res);
116 			if (ret == TC_ACT_UNSPEC)
117 				continue;
118 			break;
119 		}
120 
121 		if (filter_res == 0)
122 			continue;
123 		if (filter_res != -1) {
124 			res->class   = 0;
125 			res->classid = filter_res;
126 		} else {
127 			*res = prog->res;
128 		}
129 
130 		ret = tcf_exts_exec(skb, &prog->exts, res);
131 		if (ret < 0)
132 			continue;
133 
134 		break;
135 	}
136 
137 	return ret;
138 }
139 
140 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141 {
142 	return !prog->bpf_ops;
143 }
144 
145 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 			       struct cls_bpf_prog *oldprog,
147 			       struct netlink_ext_ack *extack)
148 {
149 	struct tcf_block *block = tp->chain->block;
150 	struct tc_cls_bpf_offload cls_bpf = {};
151 	struct cls_bpf_prog *obj;
152 	bool skip_sw;
153 	int err;
154 
155 	skip_sw = prog && tc_skip_sw(prog->gen_flags);
156 	obj = prog ?: oldprog;
157 
158 	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
159 	cls_bpf.command = TC_CLSBPF_OFFLOAD;
160 	cls_bpf.exts = &obj->exts;
161 	cls_bpf.prog = prog ? prog->filter : NULL;
162 	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
163 	cls_bpf.name = obj->bpf_name;
164 	cls_bpf.exts_integrated = obj->exts_integrated;
165 
166 	if (oldprog && prog)
167 		err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
168 					  skip_sw, &oldprog->gen_flags,
169 					  &oldprog->in_hw_count,
170 					  &prog->gen_flags, &prog->in_hw_count,
171 					  true);
172 	else if (prog)
173 		err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174 				      skip_sw, &prog->gen_flags,
175 				      &prog->in_hw_count, true);
176 	else
177 		err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
178 					  skip_sw, &oldprog->gen_flags,
179 					  &oldprog->in_hw_count, true);
180 
181 	if (prog && err) {
182 		cls_bpf_offload_cmd(tp, oldprog, prog, extack);
183 		return err;
184 	}
185 
186 	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
187 		return -EINVAL;
188 
189 	return 0;
190 }
191 
192 static u32 cls_bpf_flags(u32 flags)
193 {
194 	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
195 }
196 
197 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
198 			   struct cls_bpf_prog *oldprog,
199 			   struct netlink_ext_ack *extack)
200 {
201 	if (prog && oldprog &&
202 	    cls_bpf_flags(prog->gen_flags) !=
203 	    cls_bpf_flags(oldprog->gen_flags))
204 		return -EINVAL;
205 
206 	if (prog && tc_skip_hw(prog->gen_flags))
207 		prog = NULL;
208 	if (oldprog && tc_skip_hw(oldprog->gen_flags))
209 		oldprog = NULL;
210 	if (!prog && !oldprog)
211 		return 0;
212 
213 	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
214 }
215 
216 static void cls_bpf_stop_offload(struct tcf_proto *tp,
217 				 struct cls_bpf_prog *prog,
218 				 struct netlink_ext_ack *extack)
219 {
220 	int err;
221 
222 	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
223 	if (err)
224 		pr_err("Stopping hardware offload failed: %d\n", err);
225 }
226 
227 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
228 					 struct cls_bpf_prog *prog)
229 {
230 	struct tcf_block *block = tp->chain->block;
231 	struct tc_cls_bpf_offload cls_bpf = {};
232 
233 	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
234 	cls_bpf.command = TC_CLSBPF_STATS;
235 	cls_bpf.exts = &prog->exts;
236 	cls_bpf.prog = prog->filter;
237 	cls_bpf.name = prog->bpf_name;
238 	cls_bpf.exts_integrated = prog->exts_integrated;
239 
240 	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
241 }
242 
243 static int cls_bpf_init(struct tcf_proto *tp)
244 {
245 	struct cls_bpf_head *head;
246 
247 	head = kzalloc(sizeof(*head), GFP_KERNEL);
248 	if (head == NULL)
249 		return -ENOBUFS;
250 
251 	INIT_LIST_HEAD_RCU(&head->plist);
252 	idr_init(&head->handle_idr);
253 	rcu_assign_pointer(tp->root, head);
254 
255 	return 0;
256 }
257 
258 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
259 {
260 	if (cls_bpf_is_ebpf(prog))
261 		bpf_prog_put(prog->filter);
262 	else
263 		bpf_prog_destroy(prog->filter);
264 
265 	kfree(prog->bpf_name);
266 	kfree(prog->bpf_ops);
267 }
268 
269 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270 {
271 	tcf_exts_destroy(&prog->exts);
272 	tcf_exts_put_net(&prog->exts);
273 
274 	cls_bpf_free_parms(prog);
275 	kfree(prog);
276 }
277 
278 static void cls_bpf_delete_prog_work(struct work_struct *work)
279 {
280 	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281 						 struct cls_bpf_prog,
282 						 rwork);
283 	rtnl_lock();
284 	__cls_bpf_delete_prog(prog);
285 	rtnl_unlock();
286 }
287 
288 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
289 			     struct netlink_ext_ack *extack)
290 {
291 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
292 
293 	idr_remove(&head->handle_idr, prog->handle);
294 	cls_bpf_stop_offload(tp, prog, extack);
295 	list_del_rcu(&prog->link);
296 	tcf_unbind_filter(tp, &prog->res);
297 	if (tcf_exts_get_net(&prog->exts))
298 		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
299 	else
300 		__cls_bpf_delete_prog(prog);
301 }
302 
303 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
304 			  bool rtnl_held, struct netlink_ext_ack *extack)
305 {
306 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
307 
308 	__cls_bpf_delete(tp, arg, extack);
309 	*last = list_empty(&head->plist);
310 	return 0;
311 }
312 
313 static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
314 			    struct netlink_ext_ack *extack)
315 {
316 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
317 	struct cls_bpf_prog *prog, *tmp;
318 
319 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
320 		__cls_bpf_delete(tp, prog, extack);
321 
322 	idr_destroy(&head->handle_idr);
323 	kfree_rcu(head, rcu);
324 }
325 
326 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
327 {
328 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
329 	struct cls_bpf_prog *prog;
330 
331 	list_for_each_entry(prog, &head->plist, link) {
332 		if (prog->handle == handle)
333 			return prog;
334 	}
335 
336 	return NULL;
337 }
338 
339 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
340 {
341 	struct sock_filter *bpf_ops;
342 	struct sock_fprog_kern fprog_tmp;
343 	struct bpf_prog *fp;
344 	u16 bpf_size, bpf_num_ops;
345 	int ret;
346 
347 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
348 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
349 		return -EINVAL;
350 
351 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
352 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
353 		return -EINVAL;
354 
355 	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
356 	if (bpf_ops == NULL)
357 		return -ENOMEM;
358 
359 	fprog_tmp.len = bpf_num_ops;
360 	fprog_tmp.filter = bpf_ops;
361 
362 	ret = bpf_prog_create(&fp, &fprog_tmp);
363 	if (ret < 0) {
364 		kfree(bpf_ops);
365 		return ret;
366 	}
367 
368 	prog->bpf_ops = bpf_ops;
369 	prog->bpf_num_ops = bpf_num_ops;
370 	prog->bpf_name = NULL;
371 	prog->filter = fp;
372 
373 	return 0;
374 }
375 
376 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377 				 u32 gen_flags, const struct tcf_proto *tp)
378 {
379 	struct bpf_prog *fp;
380 	char *name = NULL;
381 	bool skip_sw;
382 	u32 bpf_fd;
383 
384 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
385 	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
386 
387 	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
388 	if (IS_ERR(fp))
389 		return PTR_ERR(fp);
390 
391 	if (tb[TCA_BPF_NAME]) {
392 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
393 		if (!name) {
394 			bpf_prog_put(fp);
395 			return -ENOMEM;
396 		}
397 	}
398 
399 	prog->bpf_ops = NULL;
400 	prog->bpf_name = name;
401 	prog->filter = fp;
402 
403 	if (fp->dst_needed)
404 		tcf_block_netif_keep_dst(tp->chain->block);
405 
406 	return 0;
407 }
408 
409 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
410 			     struct cls_bpf_prog *prog, unsigned long base,
411 			     struct nlattr **tb, struct nlattr *est, u32 flags,
412 			     struct netlink_ext_ack *extack)
413 {
414 	bool is_bpf, is_ebpf, have_exts = false;
415 	u32 gen_flags = 0;
416 	int ret;
417 
418 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
419 	is_ebpf = tb[TCA_BPF_FD];
420 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
421 		return -EINVAL;
422 
423 	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
424 				extack);
425 	if (ret < 0)
426 		return ret;
427 
428 	if (tb[TCA_BPF_FLAGS]) {
429 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
430 
431 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
432 			return -EINVAL;
433 
434 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
435 	}
436 	if (tb[TCA_BPF_FLAGS_GEN]) {
437 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
438 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
439 		    !tc_flags_valid(gen_flags))
440 			return -EINVAL;
441 	}
442 
443 	prog->exts_integrated = have_exts;
444 	prog->gen_flags = gen_flags;
445 
446 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
447 		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
448 	if (ret < 0)
449 		return ret;
450 
451 	if (tb[TCA_BPF_CLASSID]) {
452 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
453 		tcf_bind_filter(tp, &prog->res, base);
454 	}
455 
456 	return 0;
457 }
458 
459 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
460 			  struct tcf_proto *tp, unsigned long base,
461 			  u32 handle, struct nlattr **tca,
462 			  void **arg, u32 flags,
463 			  struct netlink_ext_ack *extack)
464 {
465 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
466 	struct cls_bpf_prog *oldprog = *arg;
467 	struct nlattr *tb[TCA_BPF_MAX + 1];
468 	struct cls_bpf_prog *prog;
469 	int ret;
470 
471 	if (tca[TCA_OPTIONS] == NULL)
472 		return -EINVAL;
473 
474 	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
475 					  bpf_policy, NULL);
476 	if (ret < 0)
477 		return ret;
478 
479 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480 	if (!prog)
481 		return -ENOBUFS;
482 
483 	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
484 	if (ret < 0)
485 		goto errout;
486 
487 	if (oldprog) {
488 		if (handle && oldprog->handle != handle) {
489 			ret = -EINVAL;
490 			goto errout;
491 		}
492 	}
493 
494 	if (handle == 0) {
495 		handle = 1;
496 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
497 				    INT_MAX, GFP_KERNEL);
498 	} else if (!oldprog) {
499 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
500 				    handle, GFP_KERNEL);
501 	}
502 
503 	if (ret)
504 		goto errout;
505 	prog->handle = handle;
506 
507 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
508 				extack);
509 	if (ret < 0)
510 		goto errout_idr;
511 
512 	ret = cls_bpf_offload(tp, prog, oldprog, extack);
513 	if (ret)
514 		goto errout_parms;
515 
516 	if (!tc_in_hw(prog->gen_flags))
517 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
518 
519 	if (oldprog) {
520 		idr_replace(&head->handle_idr, prog, handle);
521 		list_replace_rcu(&oldprog->link, &prog->link);
522 		tcf_unbind_filter(tp, &oldprog->res);
523 		tcf_exts_get_net(&oldprog->exts);
524 		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
525 	} else {
526 		list_add_rcu(&prog->link, &head->plist);
527 	}
528 
529 	*arg = prog;
530 	return 0;
531 
532 errout_parms:
533 	cls_bpf_free_parms(prog);
534 errout_idr:
535 	if (!oldprog)
536 		idr_remove(&head->handle_idr, prog->handle);
537 errout:
538 	tcf_exts_destroy(&prog->exts);
539 	kfree(prog);
540 	return ret;
541 }
542 
543 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
544 				 struct sk_buff *skb)
545 {
546 	struct nlattr *nla;
547 
548 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
549 		return -EMSGSIZE;
550 
551 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
552 			  sizeof(struct sock_filter));
553 	if (nla == NULL)
554 		return -EMSGSIZE;
555 
556 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
557 
558 	return 0;
559 }
560 
561 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
562 				  struct sk_buff *skb)
563 {
564 	struct nlattr *nla;
565 
566 	if (prog->bpf_name &&
567 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
568 		return -EMSGSIZE;
569 
570 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
571 		return -EMSGSIZE;
572 
573 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
574 	if (nla == NULL)
575 		return -EMSGSIZE;
576 
577 	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
578 
579 	return 0;
580 }
581 
582 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
583 			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
584 {
585 	struct cls_bpf_prog *prog = fh;
586 	struct nlattr *nest;
587 	u32 bpf_flags = 0;
588 	int ret;
589 
590 	if (prog == NULL)
591 		return skb->len;
592 
593 	tm->tcm_handle = prog->handle;
594 
595 	cls_bpf_offload_update_stats(tp, prog);
596 
597 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
598 	if (nest == NULL)
599 		goto nla_put_failure;
600 
601 	if (prog->res.classid &&
602 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
603 		goto nla_put_failure;
604 
605 	if (cls_bpf_is_ebpf(prog))
606 		ret = cls_bpf_dump_ebpf_info(prog, skb);
607 	else
608 		ret = cls_bpf_dump_bpf_info(prog, skb);
609 	if (ret)
610 		goto nla_put_failure;
611 
612 	if (tcf_exts_dump(skb, &prog->exts) < 0)
613 		goto nla_put_failure;
614 
615 	if (prog->exts_integrated)
616 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
617 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
618 		goto nla_put_failure;
619 	if (prog->gen_flags &&
620 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
621 		goto nla_put_failure;
622 
623 	nla_nest_end(skb, nest);
624 
625 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
626 		goto nla_put_failure;
627 
628 	return skb->len;
629 
630 nla_put_failure:
631 	nla_nest_cancel(skb, nest);
632 	return -1;
633 }
634 
635 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
636 			       void *q, unsigned long base)
637 {
638 	struct cls_bpf_prog *prog = fh;
639 
640 	tc_cls_bind_class(classid, cl, q, &prog->res, base);
641 }
642 
643 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
644 			 bool rtnl_held)
645 {
646 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
647 	struct cls_bpf_prog *prog;
648 
649 	list_for_each_entry(prog, &head->plist, link) {
650 		if (!tc_cls_stats_dump(tp, arg, prog))
651 			break;
652 	}
653 }
654 
655 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
656 			     void *cb_priv, struct netlink_ext_ack *extack)
657 {
658 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
659 	struct tcf_block *block = tp->chain->block;
660 	struct tc_cls_bpf_offload cls_bpf = {};
661 	struct cls_bpf_prog *prog;
662 	int err;
663 
664 	list_for_each_entry(prog, &head->plist, link) {
665 		if (tc_skip_hw(prog->gen_flags))
666 			continue;
667 
668 		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
669 					   extack);
670 		cls_bpf.command = TC_CLSBPF_OFFLOAD;
671 		cls_bpf.exts = &prog->exts;
672 		cls_bpf.prog = add ? prog->filter : NULL;
673 		cls_bpf.oldprog = add ? NULL : prog->filter;
674 		cls_bpf.name = prog->bpf_name;
675 		cls_bpf.exts_integrated = prog->exts_integrated;
676 
677 		err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
678 					    &cls_bpf, cb_priv, &prog->gen_flags,
679 					    &prog->in_hw_count);
680 		if (err)
681 			return err;
682 	}
683 
684 	return 0;
685 }
686 
687 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
688 	.kind		=	"bpf",
689 	.owner		=	THIS_MODULE,
690 	.classify	=	cls_bpf_classify,
691 	.init		=	cls_bpf_init,
692 	.destroy	=	cls_bpf_destroy,
693 	.get		=	cls_bpf_get,
694 	.change		=	cls_bpf_change,
695 	.delete		=	cls_bpf_delete,
696 	.walk		=	cls_bpf_walk,
697 	.reoffload	=	cls_bpf_reoffload,
698 	.dump		=	cls_bpf_dump,
699 	.bind_class	=	cls_bpf_bind_class,
700 };
701 
702 static int __init cls_bpf_init_mod(void)
703 {
704 	return register_tcf_proto_ops(&cls_bpf_ops);
705 }
706 
707 static void __exit cls_bpf_exit_mod(void)
708 {
709 	unregister_tcf_proto_ops(&cls_bpf_ops);
710 }
711 
712 module_init(cls_bpf_init_mod);
713 module_exit(cls_bpf_exit_mod);
714