xref: /openbmc/linux/net/sched/cls_bpf.c (revision cc8bbe1a)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24 
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28 
29 #define CLS_BPF_NAME_LEN	256
30 
31 struct cls_bpf_head {
32 	struct list_head plist;
33 	u32 hgen;
34 	struct rcu_head rcu;
35 };
36 
37 struct cls_bpf_prog {
38 	struct bpf_prog *filter;
39 	struct list_head link;
40 	struct tcf_result res;
41 	bool exts_integrated;
42 	struct tcf_exts exts;
43 	u32 handle;
44 	union {
45 		u32 bpf_fd;
46 		u16 bpf_num_ops;
47 	};
48 	struct sock_filter *bpf_ops;
49 	const char *bpf_name;
50 	struct tcf_proto *tp;
51 	struct rcu_head rcu;
52 };
53 
54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
56 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
57 	[TCA_BPF_FD]		= { .type = NLA_U32 },
58 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
59 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
60 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
61 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
62 };
63 
64 static int cls_bpf_exec_opcode(int code)
65 {
66 	switch (code) {
67 	case TC_ACT_OK:
68 	case TC_ACT_SHOT:
69 	case TC_ACT_STOLEN:
70 	case TC_ACT_REDIRECT:
71 	case TC_ACT_UNSPEC:
72 		return code;
73 	default:
74 		return TC_ACT_UNSPEC;
75 	}
76 }
77 
78 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
79 			    struct tcf_result *res)
80 {
81 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
82 	bool at_ingress = skb_at_tc_ingress(skb);
83 	struct cls_bpf_prog *prog;
84 	int ret = -1;
85 
86 	if (unlikely(!skb_mac_header_was_set(skb)))
87 		return -1;
88 
89 	/* Needed here for accessing maps. */
90 	rcu_read_lock();
91 	list_for_each_entry_rcu(prog, &head->plist, link) {
92 		int filter_res;
93 
94 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
95 
96 		if (at_ingress) {
97 			/* It is safe to push/pull even if skb_shared() */
98 			__skb_push(skb, skb->mac_len);
99 			filter_res = BPF_PROG_RUN(prog->filter, skb);
100 			__skb_pull(skb, skb->mac_len);
101 		} else {
102 			filter_res = BPF_PROG_RUN(prog->filter, skb);
103 		}
104 
105 		if (prog->exts_integrated) {
106 			res->class = prog->res.class;
107 			res->classid = qdisc_skb_cb(skb)->tc_classid;
108 
109 			ret = cls_bpf_exec_opcode(filter_res);
110 			if (ret == TC_ACT_UNSPEC)
111 				continue;
112 			break;
113 		}
114 
115 		if (filter_res == 0)
116 			continue;
117 
118 		*res = prog->res;
119 		if (filter_res != -1)
120 			res->classid = filter_res;
121 
122 		ret = tcf_exts_exec(skb, &prog->exts, res);
123 		if (ret < 0)
124 			continue;
125 
126 		break;
127 	}
128 	rcu_read_unlock();
129 
130 	return ret;
131 }
132 
133 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
134 {
135 	return !prog->bpf_ops;
136 }
137 
138 static int cls_bpf_init(struct tcf_proto *tp)
139 {
140 	struct cls_bpf_head *head;
141 
142 	head = kzalloc(sizeof(*head), GFP_KERNEL);
143 	if (head == NULL)
144 		return -ENOBUFS;
145 
146 	INIT_LIST_HEAD_RCU(&head->plist);
147 	rcu_assign_pointer(tp->root, head);
148 
149 	return 0;
150 }
151 
152 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
153 {
154 	tcf_exts_destroy(&prog->exts);
155 
156 	if (cls_bpf_is_ebpf(prog))
157 		bpf_prog_put(prog->filter);
158 	else
159 		bpf_prog_destroy(prog->filter);
160 
161 	kfree(prog->bpf_name);
162 	kfree(prog->bpf_ops);
163 	kfree(prog);
164 }
165 
166 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
167 {
168 	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
169 
170 	cls_bpf_delete_prog(prog->tp, prog);
171 }
172 
173 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
174 {
175 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
176 
177 	list_del_rcu(&prog->link);
178 	tcf_unbind_filter(tp, &prog->res);
179 	call_rcu(&prog->rcu, __cls_bpf_delete_prog);
180 
181 	return 0;
182 }
183 
184 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
185 {
186 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
187 	struct cls_bpf_prog *prog, *tmp;
188 
189 	if (!force && !list_empty(&head->plist))
190 		return false;
191 
192 	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
193 		list_del_rcu(&prog->link);
194 		tcf_unbind_filter(tp, &prog->res);
195 		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
196 	}
197 
198 	RCU_INIT_POINTER(tp->root, NULL);
199 	kfree_rcu(head, rcu);
200 	return true;
201 }
202 
203 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
204 {
205 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
206 	struct cls_bpf_prog *prog;
207 	unsigned long ret = 0UL;
208 
209 	if (head == NULL)
210 		return 0UL;
211 
212 	list_for_each_entry(prog, &head->plist, link) {
213 		if (prog->handle == handle) {
214 			ret = (unsigned long) prog;
215 			break;
216 		}
217 	}
218 
219 	return ret;
220 }
221 
222 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
223 {
224 	struct sock_filter *bpf_ops;
225 	struct sock_fprog_kern fprog_tmp;
226 	struct bpf_prog *fp;
227 	u16 bpf_size, bpf_num_ops;
228 	int ret;
229 
230 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
231 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
232 		return -EINVAL;
233 
234 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
235 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
236 		return -EINVAL;
237 
238 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
239 	if (bpf_ops == NULL)
240 		return -ENOMEM;
241 
242 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
243 
244 	fprog_tmp.len = bpf_num_ops;
245 	fprog_tmp.filter = bpf_ops;
246 
247 	ret = bpf_prog_create(&fp, &fprog_tmp);
248 	if (ret < 0) {
249 		kfree(bpf_ops);
250 		return ret;
251 	}
252 
253 	prog->bpf_ops = bpf_ops;
254 	prog->bpf_num_ops = bpf_num_ops;
255 	prog->bpf_name = NULL;
256 	prog->filter = fp;
257 
258 	return 0;
259 }
260 
261 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
262 				 const struct tcf_proto *tp)
263 {
264 	struct bpf_prog *fp;
265 	char *name = NULL;
266 	u32 bpf_fd;
267 
268 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
269 
270 	fp = bpf_prog_get(bpf_fd);
271 	if (IS_ERR(fp))
272 		return PTR_ERR(fp);
273 
274 	if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
275 		bpf_prog_put(fp);
276 		return -EINVAL;
277 	}
278 
279 	if (tb[TCA_BPF_NAME]) {
280 		name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
281 			       nla_len(tb[TCA_BPF_NAME]),
282 			       GFP_KERNEL);
283 		if (!name) {
284 			bpf_prog_put(fp);
285 			return -ENOMEM;
286 		}
287 	}
288 
289 	prog->bpf_ops = NULL;
290 	prog->bpf_fd = bpf_fd;
291 	prog->bpf_name = name;
292 	prog->filter = fp;
293 
294 	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
295 		netif_keep_dst(qdisc_dev(tp->q));
296 
297 	return 0;
298 }
299 
300 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
301 				   struct cls_bpf_prog *prog,
302 				   unsigned long base, struct nlattr **tb,
303 				   struct nlattr *est, bool ovr)
304 {
305 	bool is_bpf, is_ebpf, have_exts = false;
306 	struct tcf_exts exts;
307 	int ret;
308 
309 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
310 	is_ebpf = tb[TCA_BPF_FD];
311 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
312 		return -EINVAL;
313 
314 	tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
315 	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
316 	if (ret < 0)
317 		return ret;
318 
319 	if (tb[TCA_BPF_FLAGS]) {
320 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
321 
322 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
323 			tcf_exts_destroy(&exts);
324 			return -EINVAL;
325 		}
326 
327 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
328 	}
329 
330 	prog->exts_integrated = have_exts;
331 
332 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
333 		       cls_bpf_prog_from_efd(tb, prog, tp);
334 	if (ret < 0) {
335 		tcf_exts_destroy(&exts);
336 		return ret;
337 	}
338 
339 	if (tb[TCA_BPF_CLASSID]) {
340 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
341 		tcf_bind_filter(tp, &prog->res, base);
342 	}
343 
344 	tcf_exts_change(tp, &prog->exts, &exts);
345 	return 0;
346 }
347 
348 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
349 				   struct cls_bpf_head *head)
350 {
351 	unsigned int i = 0x80000000;
352 	u32 handle;
353 
354 	do {
355 		if (++head->hgen == 0x7FFFFFFF)
356 			head->hgen = 1;
357 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
358 
359 	if (unlikely(i == 0)) {
360 		pr_err("Insufficient number of handles\n");
361 		handle = 0;
362 	} else {
363 		handle = head->hgen;
364 	}
365 
366 	return handle;
367 }
368 
369 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
370 			  struct tcf_proto *tp, unsigned long base,
371 			  u32 handle, struct nlattr **tca,
372 			  unsigned long *arg, bool ovr)
373 {
374 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
375 	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
376 	struct nlattr *tb[TCA_BPF_MAX + 1];
377 	struct cls_bpf_prog *prog;
378 	int ret;
379 
380 	if (tca[TCA_OPTIONS] == NULL)
381 		return -EINVAL;
382 
383 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
384 	if (ret < 0)
385 		return ret;
386 
387 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
388 	if (!prog)
389 		return -ENOBUFS;
390 
391 	tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
392 
393 	if (oldprog) {
394 		if (handle && oldprog->handle != handle) {
395 			ret = -EINVAL;
396 			goto errout;
397 		}
398 	}
399 
400 	if (handle == 0)
401 		prog->handle = cls_bpf_grab_new_handle(tp, head);
402 	else
403 		prog->handle = handle;
404 	if (prog->handle == 0) {
405 		ret = -EINVAL;
406 		goto errout;
407 	}
408 
409 	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
410 	if (ret < 0)
411 		goto errout;
412 
413 	if (oldprog) {
414 		list_replace_rcu(&oldprog->link, &prog->link);
415 		tcf_unbind_filter(tp, &oldprog->res);
416 		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
417 	} else {
418 		list_add_rcu(&prog->link, &head->plist);
419 	}
420 
421 	*arg = (unsigned long) prog;
422 	return 0;
423 errout:
424 	kfree(prog);
425 
426 	return ret;
427 }
428 
429 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
430 				 struct sk_buff *skb)
431 {
432 	struct nlattr *nla;
433 
434 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
435 		return -EMSGSIZE;
436 
437 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
438 			  sizeof(struct sock_filter));
439 	if (nla == NULL)
440 		return -EMSGSIZE;
441 
442 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
443 
444 	return 0;
445 }
446 
447 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
448 				  struct sk_buff *skb)
449 {
450 	if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
451 		return -EMSGSIZE;
452 
453 	if (prog->bpf_name &&
454 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
455 		return -EMSGSIZE;
456 
457 	return 0;
458 }
459 
460 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
461 			struct sk_buff *skb, struct tcmsg *tm)
462 {
463 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
464 	struct nlattr *nest;
465 	u32 bpf_flags = 0;
466 	int ret;
467 
468 	if (prog == NULL)
469 		return skb->len;
470 
471 	tm->tcm_handle = prog->handle;
472 
473 	nest = nla_nest_start(skb, TCA_OPTIONS);
474 	if (nest == NULL)
475 		goto nla_put_failure;
476 
477 	if (prog->res.classid &&
478 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
479 		goto nla_put_failure;
480 
481 	if (cls_bpf_is_ebpf(prog))
482 		ret = cls_bpf_dump_ebpf_info(prog, skb);
483 	else
484 		ret = cls_bpf_dump_bpf_info(prog, skb);
485 	if (ret)
486 		goto nla_put_failure;
487 
488 	if (tcf_exts_dump(skb, &prog->exts) < 0)
489 		goto nla_put_failure;
490 
491 	if (prog->exts_integrated)
492 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
493 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
494 		goto nla_put_failure;
495 
496 	nla_nest_end(skb, nest);
497 
498 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
499 		goto nla_put_failure;
500 
501 	return skb->len;
502 
503 nla_put_failure:
504 	nla_nest_cancel(skb, nest);
505 	return -1;
506 }
507 
508 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
509 {
510 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
511 	struct cls_bpf_prog *prog;
512 
513 	list_for_each_entry(prog, &head->plist, link) {
514 		if (arg->count < arg->skip)
515 			goto skip;
516 		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
517 			arg->stop = 1;
518 			break;
519 		}
520 skip:
521 		arg->count++;
522 	}
523 }
524 
525 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
526 	.kind		=	"bpf",
527 	.owner		=	THIS_MODULE,
528 	.classify	=	cls_bpf_classify,
529 	.init		=	cls_bpf_init,
530 	.destroy	=	cls_bpf_destroy,
531 	.get		=	cls_bpf_get,
532 	.change		=	cls_bpf_change,
533 	.delete		=	cls_bpf_delete,
534 	.walk		=	cls_bpf_walk,
535 	.dump		=	cls_bpf_dump,
536 };
537 
538 static int __init cls_bpf_init_mod(void)
539 {
540 	return register_tcf_proto_ops(&cls_bpf_ops);
541 }
542 
543 static void __exit cls_bpf_exit_mod(void)
544 {
545 	unregister_tcf_proto_ops(&cls_bpf_ops);
546 }
547 
548 module_init(cls_bpf_init_mod);
549 module_exit(cls_bpf_exit_mod);
550