xref: /openbmc/linux/net/sched/cls_bpf.c (revision 978d13d6)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24 
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28 
29 #define CLS_BPF_NAME_LEN	256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
31 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32 
33 struct cls_bpf_head {
34 	struct list_head plist;
35 	u32 hgen;
36 	struct rcu_head rcu;
37 };
38 
39 struct cls_bpf_prog {
40 	struct bpf_prog *filter;
41 	struct list_head link;
42 	struct tcf_result res;
43 	bool exts_integrated;
44 	bool offloaded;
45 	u32 gen_flags;
46 	struct tcf_exts exts;
47 	u32 handle;
48 	u16 bpf_num_ops;
49 	struct sock_filter *bpf_ops;
50 	const char *bpf_name;
51 	struct tcf_proto *tp;
52 	struct rcu_head rcu;
53 };
54 
55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
57 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
58 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
59 	[TCA_BPF_FD]		= { .type = NLA_U32 },
60 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
61 				    .len = CLS_BPF_NAME_LEN },
62 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
63 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
64 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
65 };
66 
67 static int cls_bpf_exec_opcode(int code)
68 {
69 	switch (code) {
70 	case TC_ACT_OK:
71 	case TC_ACT_SHOT:
72 	case TC_ACT_STOLEN:
73 	case TC_ACT_TRAP:
74 	case TC_ACT_REDIRECT:
75 	case TC_ACT_UNSPEC:
76 		return code;
77 	default:
78 		return TC_ACT_UNSPEC;
79 	}
80 }
81 
82 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 			    struct tcf_result *res)
84 {
85 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 	bool at_ingress = skb_at_tc_ingress(skb);
87 	struct cls_bpf_prog *prog;
88 	int ret = -1;
89 
90 	/* Needed here for accessing maps. */
91 	rcu_read_lock();
92 	list_for_each_entry_rcu(prog, &head->plist, link) {
93 		int filter_res;
94 
95 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
96 
97 		if (tc_skip_sw(prog->gen_flags)) {
98 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
99 		} else if (at_ingress) {
100 			/* It is safe to push/pull even if skb_shared() */
101 			__skb_push(skb, skb->mac_len);
102 			bpf_compute_data_end(skb);
103 			filter_res = BPF_PROG_RUN(prog->filter, skb);
104 			__skb_pull(skb, skb->mac_len);
105 		} else {
106 			bpf_compute_data_end(skb);
107 			filter_res = BPF_PROG_RUN(prog->filter, skb);
108 		}
109 
110 		if (prog->exts_integrated) {
111 			res->class   = 0;
112 			res->classid = TC_H_MAJ(prog->res.classid) |
113 				       qdisc_skb_cb(skb)->tc_classid;
114 
115 			ret = cls_bpf_exec_opcode(filter_res);
116 			if (ret == TC_ACT_UNSPEC)
117 				continue;
118 			break;
119 		}
120 
121 		if (filter_res == 0)
122 			continue;
123 		if (filter_res != -1) {
124 			res->class   = 0;
125 			res->classid = filter_res;
126 		} else {
127 			*res = prog->res;
128 		}
129 
130 		ret = tcf_exts_exec(skb, &prog->exts, res);
131 		if (ret < 0)
132 			continue;
133 
134 		break;
135 	}
136 	rcu_read_unlock();
137 
138 	return ret;
139 }
140 
141 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
142 {
143 	return !prog->bpf_ops;
144 }
145 
146 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
147 			       enum tc_clsbpf_command cmd)
148 {
149 	struct net_device *dev = tp->q->dev_queue->dev;
150 	struct tc_cls_bpf_offload bpf_offload = {};
151 	struct tc_to_netdev offload;
152 	int err;
153 
154 	offload.type = TC_SETUP_CLSBPF;
155 	offload.cls_bpf = &bpf_offload;
156 
157 	bpf_offload.command = cmd;
158 	bpf_offload.exts = &prog->exts;
159 	bpf_offload.prog = prog->filter;
160 	bpf_offload.name = prog->bpf_name;
161 	bpf_offload.exts_integrated = prog->exts_integrated;
162 	bpf_offload.gen_flags = prog->gen_flags;
163 
164 	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
165 					    tp->chain->index,
166 					    tp->protocol, &offload);
167 
168 	if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
169 		prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
170 
171 	return err;
172 }
173 
174 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
175 			   struct cls_bpf_prog *oldprog)
176 {
177 	struct net_device *dev = tp->q->dev_queue->dev;
178 	struct cls_bpf_prog *obj = prog;
179 	enum tc_clsbpf_command cmd;
180 	bool skip_sw;
181 	int ret;
182 
183 	skip_sw = tc_skip_sw(prog->gen_flags) ||
184 		(oldprog && tc_skip_sw(oldprog->gen_flags));
185 
186 	if (oldprog && oldprog->offloaded) {
187 		if (tc_should_offload(dev, tp, prog->gen_flags)) {
188 			cmd = TC_CLSBPF_REPLACE;
189 		} else if (!tc_skip_sw(prog->gen_flags)) {
190 			obj = oldprog;
191 			cmd = TC_CLSBPF_DESTROY;
192 		} else {
193 			return -EINVAL;
194 		}
195 	} else {
196 		if (!tc_should_offload(dev, tp, prog->gen_flags))
197 			return skip_sw ? -EINVAL : 0;
198 		cmd = TC_CLSBPF_ADD;
199 	}
200 
201 	ret = cls_bpf_offload_cmd(tp, obj, cmd);
202 	if (ret)
203 		return skip_sw ? ret : 0;
204 
205 	obj->offloaded = true;
206 	if (oldprog)
207 		oldprog->offloaded = false;
208 
209 	return 0;
210 }
211 
212 static void cls_bpf_stop_offload(struct tcf_proto *tp,
213 				 struct cls_bpf_prog *prog)
214 {
215 	int err;
216 
217 	if (!prog->offloaded)
218 		return;
219 
220 	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
221 	if (err) {
222 		pr_err("Stopping hardware offload failed: %d\n", err);
223 		return;
224 	}
225 
226 	prog->offloaded = false;
227 }
228 
229 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
230 					 struct cls_bpf_prog *prog)
231 {
232 	if (!prog->offloaded)
233 		return;
234 
235 	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
236 }
237 
238 static int cls_bpf_init(struct tcf_proto *tp)
239 {
240 	struct cls_bpf_head *head;
241 
242 	head = kzalloc(sizeof(*head), GFP_KERNEL);
243 	if (head == NULL)
244 		return -ENOBUFS;
245 
246 	INIT_LIST_HEAD_RCU(&head->plist);
247 	rcu_assign_pointer(tp->root, head);
248 
249 	return 0;
250 }
251 
252 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
253 {
254 	tcf_exts_destroy(&prog->exts);
255 
256 	if (cls_bpf_is_ebpf(prog))
257 		bpf_prog_put(prog->filter);
258 	else
259 		bpf_prog_destroy(prog->filter);
260 
261 	kfree(prog->bpf_name);
262 	kfree(prog->bpf_ops);
263 	kfree(prog);
264 }
265 
266 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
267 {
268 	__cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
269 }
270 
271 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
272 {
273 	cls_bpf_stop_offload(tp, prog);
274 	list_del_rcu(&prog->link);
275 	tcf_unbind_filter(tp, &prog->res);
276 	call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
277 }
278 
279 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
280 {
281 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
282 
283 	__cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
284 	*last = list_empty(&head->plist);
285 	return 0;
286 }
287 
288 static void cls_bpf_destroy(struct tcf_proto *tp)
289 {
290 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
291 	struct cls_bpf_prog *prog, *tmp;
292 
293 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
294 		__cls_bpf_delete(tp, prog);
295 
296 	kfree_rcu(head, rcu);
297 }
298 
299 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
300 {
301 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
302 	struct cls_bpf_prog *prog;
303 	unsigned long ret = 0UL;
304 
305 	list_for_each_entry(prog, &head->plist, link) {
306 		if (prog->handle == handle) {
307 			ret = (unsigned long) prog;
308 			break;
309 		}
310 	}
311 
312 	return ret;
313 }
314 
315 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
316 {
317 	struct sock_filter *bpf_ops;
318 	struct sock_fprog_kern fprog_tmp;
319 	struct bpf_prog *fp;
320 	u16 bpf_size, bpf_num_ops;
321 	int ret;
322 
323 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
324 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
325 		return -EINVAL;
326 
327 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
328 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
329 		return -EINVAL;
330 
331 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
332 	if (bpf_ops == NULL)
333 		return -ENOMEM;
334 
335 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
336 
337 	fprog_tmp.len = bpf_num_ops;
338 	fprog_tmp.filter = bpf_ops;
339 
340 	ret = bpf_prog_create(&fp, &fprog_tmp);
341 	if (ret < 0) {
342 		kfree(bpf_ops);
343 		return ret;
344 	}
345 
346 	prog->bpf_ops = bpf_ops;
347 	prog->bpf_num_ops = bpf_num_ops;
348 	prog->bpf_name = NULL;
349 	prog->filter = fp;
350 
351 	return 0;
352 }
353 
354 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
355 				 const struct tcf_proto *tp)
356 {
357 	struct bpf_prog *fp;
358 	char *name = NULL;
359 	u32 bpf_fd;
360 
361 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
362 
363 	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
364 	if (IS_ERR(fp))
365 		return PTR_ERR(fp);
366 
367 	if (tb[TCA_BPF_NAME]) {
368 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
369 		if (!name) {
370 			bpf_prog_put(fp);
371 			return -ENOMEM;
372 		}
373 	}
374 
375 	prog->bpf_ops = NULL;
376 	prog->bpf_name = name;
377 	prog->filter = fp;
378 
379 	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
380 		netif_keep_dst(qdisc_dev(tp->q));
381 
382 	return 0;
383 }
384 
385 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
386 				   struct cls_bpf_prog *prog,
387 				   unsigned long base, struct nlattr **tb,
388 				   struct nlattr *est, bool ovr)
389 {
390 	bool is_bpf, is_ebpf, have_exts = false;
391 	struct tcf_exts exts;
392 	u32 gen_flags = 0;
393 	int ret;
394 
395 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
396 	is_ebpf = tb[TCA_BPF_FD];
397 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
398 		return -EINVAL;
399 
400 	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
401 	if (ret < 0)
402 		return ret;
403 	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
404 	if (ret < 0)
405 		goto errout;
406 
407 	if (tb[TCA_BPF_FLAGS]) {
408 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
409 
410 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
411 			ret = -EINVAL;
412 			goto errout;
413 		}
414 
415 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
416 	}
417 	if (tb[TCA_BPF_FLAGS_GEN]) {
418 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
419 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
420 		    !tc_flags_valid(gen_flags)) {
421 			ret = -EINVAL;
422 			goto errout;
423 		}
424 	}
425 
426 	prog->exts_integrated = have_exts;
427 	prog->gen_flags = gen_flags;
428 
429 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
430 		       cls_bpf_prog_from_efd(tb, prog, tp);
431 	if (ret < 0)
432 		goto errout;
433 
434 	if (tb[TCA_BPF_CLASSID]) {
435 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
436 		tcf_bind_filter(tp, &prog->res, base);
437 	}
438 
439 	tcf_exts_change(tp, &prog->exts, &exts);
440 	return 0;
441 
442 errout:
443 	tcf_exts_destroy(&exts);
444 	return ret;
445 }
446 
447 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
448 				   struct cls_bpf_head *head)
449 {
450 	unsigned int i = 0x80000000;
451 	u32 handle;
452 
453 	do {
454 		if (++head->hgen == 0x7FFFFFFF)
455 			head->hgen = 1;
456 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
457 
458 	if (unlikely(i == 0)) {
459 		pr_err("Insufficient number of handles\n");
460 		handle = 0;
461 	} else {
462 		handle = head->hgen;
463 	}
464 
465 	return handle;
466 }
467 
468 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
469 			  struct tcf_proto *tp, unsigned long base,
470 			  u32 handle, struct nlattr **tca,
471 			  unsigned long *arg, bool ovr)
472 {
473 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
474 	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
475 	struct nlattr *tb[TCA_BPF_MAX + 1];
476 	struct cls_bpf_prog *prog;
477 	int ret;
478 
479 	if (tca[TCA_OPTIONS] == NULL)
480 		return -EINVAL;
481 
482 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
483 			       NULL);
484 	if (ret < 0)
485 		return ret;
486 
487 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
488 	if (!prog)
489 		return -ENOBUFS;
490 
491 	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
492 	if (ret < 0)
493 		goto errout;
494 
495 	if (oldprog) {
496 		if (handle && oldprog->handle != handle) {
497 			ret = -EINVAL;
498 			goto errout;
499 		}
500 	}
501 
502 	if (handle == 0)
503 		prog->handle = cls_bpf_grab_new_handle(tp, head);
504 	else
505 		prog->handle = handle;
506 	if (prog->handle == 0) {
507 		ret = -EINVAL;
508 		goto errout;
509 	}
510 
511 	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
512 				      ovr);
513 	if (ret < 0)
514 		goto errout;
515 
516 	ret = cls_bpf_offload(tp, prog, oldprog);
517 	if (ret) {
518 		__cls_bpf_delete_prog(prog);
519 		return ret;
520 	}
521 
522 	if (!tc_in_hw(prog->gen_flags))
523 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
524 
525 	if (oldprog) {
526 		list_replace_rcu(&oldprog->link, &prog->link);
527 		tcf_unbind_filter(tp, &oldprog->res);
528 		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
529 	} else {
530 		list_add_rcu(&prog->link, &head->plist);
531 	}
532 
533 	*arg = (unsigned long) prog;
534 	return 0;
535 
536 errout:
537 	tcf_exts_destroy(&prog->exts);
538 	kfree(prog);
539 	return ret;
540 }
541 
542 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
543 				 struct sk_buff *skb)
544 {
545 	struct nlattr *nla;
546 
547 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
548 		return -EMSGSIZE;
549 
550 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
551 			  sizeof(struct sock_filter));
552 	if (nla == NULL)
553 		return -EMSGSIZE;
554 
555 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
556 
557 	return 0;
558 }
559 
560 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
561 				  struct sk_buff *skb)
562 {
563 	struct nlattr *nla;
564 
565 	if (prog->bpf_name &&
566 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
567 		return -EMSGSIZE;
568 
569 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
570 		return -EMSGSIZE;
571 
572 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
573 	if (nla == NULL)
574 		return -EMSGSIZE;
575 
576 	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
577 
578 	return 0;
579 }
580 
581 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
582 			struct sk_buff *skb, struct tcmsg *tm)
583 {
584 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
585 	struct nlattr *nest;
586 	u32 bpf_flags = 0;
587 	int ret;
588 
589 	if (prog == NULL)
590 		return skb->len;
591 
592 	tm->tcm_handle = prog->handle;
593 
594 	cls_bpf_offload_update_stats(tp, prog);
595 
596 	nest = nla_nest_start(skb, TCA_OPTIONS);
597 	if (nest == NULL)
598 		goto nla_put_failure;
599 
600 	if (prog->res.classid &&
601 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
602 		goto nla_put_failure;
603 
604 	if (cls_bpf_is_ebpf(prog))
605 		ret = cls_bpf_dump_ebpf_info(prog, skb);
606 	else
607 		ret = cls_bpf_dump_bpf_info(prog, skb);
608 	if (ret)
609 		goto nla_put_failure;
610 
611 	if (tcf_exts_dump(skb, &prog->exts) < 0)
612 		goto nla_put_failure;
613 
614 	if (prog->exts_integrated)
615 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
616 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
617 		goto nla_put_failure;
618 	if (prog->gen_flags &&
619 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
620 		goto nla_put_failure;
621 
622 	nla_nest_end(skb, nest);
623 
624 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
625 		goto nla_put_failure;
626 
627 	return skb->len;
628 
629 nla_put_failure:
630 	nla_nest_cancel(skb, nest);
631 	return -1;
632 }
633 
634 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
635 {
636 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
637 	struct cls_bpf_prog *prog;
638 
639 	list_for_each_entry(prog, &head->plist, link) {
640 		if (arg->count < arg->skip)
641 			goto skip;
642 		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
643 			arg->stop = 1;
644 			break;
645 		}
646 skip:
647 		arg->count++;
648 	}
649 }
650 
651 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
652 	.kind		=	"bpf",
653 	.owner		=	THIS_MODULE,
654 	.classify	=	cls_bpf_classify,
655 	.init		=	cls_bpf_init,
656 	.destroy	=	cls_bpf_destroy,
657 	.get		=	cls_bpf_get,
658 	.change		=	cls_bpf_change,
659 	.delete		=	cls_bpf_delete,
660 	.walk		=	cls_bpf_walk,
661 	.dump		=	cls_bpf_dump,
662 };
663 
664 static int __init cls_bpf_init_mod(void)
665 {
666 	return register_tcf_proto_ops(&cls_bpf_ops);
667 }
668 
669 static void __exit cls_bpf_exit_mod(void)
670 {
671 	unregister_tcf_proto_ops(&cls_bpf_ops);
672 }
673 
674 module_init(cls_bpf_init_mod);
675 module_exit(cls_bpf_exit_mod);
676