xref: /openbmc/linux/net/sched/cls_bpf.c (revision a03a8dbe20eff6d57aae3147577bf84b52aba4e6)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24 
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28 
29 #define CLS_BPF_NAME_LEN	256
30 
31 struct cls_bpf_head {
32 	struct list_head plist;
33 	u32 hgen;
34 	struct rcu_head rcu;
35 };
36 
37 struct cls_bpf_prog {
38 	struct bpf_prog *filter;
39 	struct list_head link;
40 	struct tcf_result res;
41 	struct tcf_exts exts;
42 	u32 handle;
43 	union {
44 		u32 bpf_fd;
45 		u16 bpf_num_ops;
46 	};
47 	struct sock_filter *bpf_ops;
48 	const char *bpf_name;
49 	struct tcf_proto *tp;
50 	struct rcu_head rcu;
51 };
52 
53 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
54 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
55 	[TCA_BPF_FD]		= { .type = NLA_U32 },
56 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
57 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
58 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
59 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
60 };
61 
62 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
63 			    struct tcf_result *res)
64 {
65 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
66 	struct cls_bpf_prog *prog;
67 	int ret;
68 
69 	list_for_each_entry_rcu(prog, &head->plist, link) {
70 		int filter_res = BPF_PROG_RUN(prog->filter, skb);
71 
72 		if (filter_res == 0)
73 			continue;
74 
75 		*res = prog->res;
76 		if (filter_res != -1)
77 			res->classid = filter_res;
78 
79 		ret = tcf_exts_exec(skb, &prog->exts, res);
80 		if (ret < 0)
81 			continue;
82 
83 		return ret;
84 	}
85 
86 	return -1;
87 }
88 
89 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
90 {
91 	return !prog->bpf_ops;
92 }
93 
94 static int cls_bpf_init(struct tcf_proto *tp)
95 {
96 	struct cls_bpf_head *head;
97 
98 	head = kzalloc(sizeof(*head), GFP_KERNEL);
99 	if (head == NULL)
100 		return -ENOBUFS;
101 
102 	INIT_LIST_HEAD_RCU(&head->plist);
103 	rcu_assign_pointer(tp->root, head);
104 
105 	return 0;
106 }
107 
108 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
109 {
110 	tcf_exts_destroy(&prog->exts);
111 
112 	if (cls_bpf_is_ebpf(prog))
113 		bpf_prog_put(prog->filter);
114 	else
115 		bpf_prog_destroy(prog->filter);
116 
117 	kfree(prog->bpf_name);
118 	kfree(prog->bpf_ops);
119 	kfree(prog);
120 }
121 
122 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
123 {
124 	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
125 
126 	cls_bpf_delete_prog(prog->tp, prog);
127 }
128 
129 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
130 {
131 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
132 
133 	list_del_rcu(&prog->link);
134 	tcf_unbind_filter(tp, &prog->res);
135 	call_rcu(&prog->rcu, __cls_bpf_delete_prog);
136 
137 	return 0;
138 }
139 
140 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
141 {
142 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
143 	struct cls_bpf_prog *prog, *tmp;
144 
145 	if (!force && !list_empty(&head->plist))
146 		return false;
147 
148 	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
149 		list_del_rcu(&prog->link);
150 		tcf_unbind_filter(tp, &prog->res);
151 		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
152 	}
153 
154 	RCU_INIT_POINTER(tp->root, NULL);
155 	kfree_rcu(head, rcu);
156 	return true;
157 }
158 
159 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
160 {
161 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
162 	struct cls_bpf_prog *prog;
163 	unsigned long ret = 0UL;
164 
165 	if (head == NULL)
166 		return 0UL;
167 
168 	list_for_each_entry(prog, &head->plist, link) {
169 		if (prog->handle == handle) {
170 			ret = (unsigned long) prog;
171 			break;
172 		}
173 	}
174 
175 	return ret;
176 }
177 
178 static int cls_bpf_prog_from_ops(struct nlattr **tb,
179 				 struct cls_bpf_prog *prog, u32 classid)
180 {
181 	struct sock_filter *bpf_ops;
182 	struct sock_fprog_kern fprog_tmp;
183 	struct bpf_prog *fp;
184 	u16 bpf_size, bpf_num_ops;
185 	int ret;
186 
187 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
188 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
189 		return -EINVAL;
190 
191 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
192 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
193 		return -EINVAL;
194 
195 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
196 	if (bpf_ops == NULL)
197 		return -ENOMEM;
198 
199 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
200 
201 	fprog_tmp.len = bpf_num_ops;
202 	fprog_tmp.filter = bpf_ops;
203 
204 	ret = bpf_prog_create(&fp, &fprog_tmp);
205 	if (ret < 0) {
206 		kfree(bpf_ops);
207 		return ret;
208 	}
209 
210 	prog->bpf_ops = bpf_ops;
211 	prog->bpf_num_ops = bpf_num_ops;
212 	prog->bpf_name = NULL;
213 
214 	prog->filter = fp;
215 	prog->res.classid = classid;
216 
217 	return 0;
218 }
219 
220 static int cls_bpf_prog_from_efd(struct nlattr **tb,
221 				 struct cls_bpf_prog *prog, u32 classid)
222 {
223 	struct bpf_prog *fp;
224 	char *name = NULL;
225 	u32 bpf_fd;
226 
227 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
228 
229 	fp = bpf_prog_get(bpf_fd);
230 	if (IS_ERR(fp))
231 		return PTR_ERR(fp);
232 
233 	if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
234 		bpf_prog_put(fp);
235 		return -EINVAL;
236 	}
237 
238 	if (tb[TCA_BPF_NAME]) {
239 		name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
240 			       nla_len(tb[TCA_BPF_NAME]),
241 			       GFP_KERNEL);
242 		if (!name) {
243 			bpf_prog_put(fp);
244 			return -ENOMEM;
245 		}
246 	}
247 
248 	prog->bpf_ops = NULL;
249 	prog->bpf_fd = bpf_fd;
250 	prog->bpf_name = name;
251 
252 	prog->filter = fp;
253 	prog->res.classid = classid;
254 
255 	return 0;
256 }
257 
258 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
259 				   struct cls_bpf_prog *prog,
260 				   unsigned long base, struct nlattr **tb,
261 				   struct nlattr *est, bool ovr)
262 {
263 	struct tcf_exts exts;
264 	bool is_bpf, is_ebpf;
265 	u32 classid;
266 	int ret;
267 
268 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
269 	is_ebpf = tb[TCA_BPF_FD];
270 
271 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
272 	    !tb[TCA_BPF_CLASSID])
273 		return -EINVAL;
274 
275 	tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
276 	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
277 	if (ret < 0)
278 		return ret;
279 
280 	classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
281 
282 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog, classid) :
283 		       cls_bpf_prog_from_efd(tb, prog, classid);
284 	if (ret < 0) {
285 		tcf_exts_destroy(&exts);
286 		return ret;
287 	}
288 
289 	tcf_bind_filter(tp, &prog->res, base);
290 	tcf_exts_change(tp, &prog->exts, &exts);
291 
292 	return 0;
293 }
294 
295 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
296 				   struct cls_bpf_head *head)
297 {
298 	unsigned int i = 0x80000000;
299 	u32 handle;
300 
301 	do {
302 		if (++head->hgen == 0x7FFFFFFF)
303 			head->hgen = 1;
304 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
305 
306 	if (unlikely(i == 0)) {
307 		pr_err("Insufficient number of handles\n");
308 		handle = 0;
309 	} else {
310 		handle = head->hgen;
311 	}
312 
313 	return handle;
314 }
315 
316 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
317 			  struct tcf_proto *tp, unsigned long base,
318 			  u32 handle, struct nlattr **tca,
319 			  unsigned long *arg, bool ovr)
320 {
321 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
322 	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
323 	struct nlattr *tb[TCA_BPF_MAX + 1];
324 	struct cls_bpf_prog *prog;
325 	int ret;
326 
327 	if (tca[TCA_OPTIONS] == NULL)
328 		return -EINVAL;
329 
330 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
331 	if (ret < 0)
332 		return ret;
333 
334 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
335 	if (!prog)
336 		return -ENOBUFS;
337 
338 	tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
339 
340 	if (oldprog) {
341 		if (handle && oldprog->handle != handle) {
342 			ret = -EINVAL;
343 			goto errout;
344 		}
345 	}
346 
347 	if (handle == 0)
348 		prog->handle = cls_bpf_grab_new_handle(tp, head);
349 	else
350 		prog->handle = handle;
351 	if (prog->handle == 0) {
352 		ret = -EINVAL;
353 		goto errout;
354 	}
355 
356 	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
357 	if (ret < 0)
358 		goto errout;
359 
360 	if (oldprog) {
361 		list_replace_rcu(&prog->link, &oldprog->link);
362 		tcf_unbind_filter(tp, &oldprog->res);
363 		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
364 	} else {
365 		list_add_rcu(&prog->link, &head->plist);
366 	}
367 
368 	*arg = (unsigned long) prog;
369 	return 0;
370 errout:
371 	kfree(prog);
372 
373 	return ret;
374 }
375 
376 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
377 				 struct sk_buff *skb)
378 {
379 	struct nlattr *nla;
380 
381 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
382 		return -EMSGSIZE;
383 
384 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
385 			  sizeof(struct sock_filter));
386 	if (nla == NULL)
387 		return -EMSGSIZE;
388 
389 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
390 
391 	return 0;
392 }
393 
394 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
395 				  struct sk_buff *skb)
396 {
397 	if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
398 		return -EMSGSIZE;
399 
400 	if (prog->bpf_name &&
401 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
402 		return -EMSGSIZE;
403 
404 	return 0;
405 }
406 
407 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
408 			struct sk_buff *skb, struct tcmsg *tm)
409 {
410 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
411 	struct nlattr *nest;
412 	int ret;
413 
414 	if (prog == NULL)
415 		return skb->len;
416 
417 	tm->tcm_handle = prog->handle;
418 
419 	nest = nla_nest_start(skb, TCA_OPTIONS);
420 	if (nest == NULL)
421 		goto nla_put_failure;
422 
423 	if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
424 		goto nla_put_failure;
425 
426 	if (cls_bpf_is_ebpf(prog))
427 		ret = cls_bpf_dump_ebpf_info(prog, skb);
428 	else
429 		ret = cls_bpf_dump_bpf_info(prog, skb);
430 	if (ret)
431 		goto nla_put_failure;
432 
433 	if (tcf_exts_dump(skb, &prog->exts) < 0)
434 		goto nla_put_failure;
435 
436 	nla_nest_end(skb, nest);
437 
438 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
439 		goto nla_put_failure;
440 
441 	return skb->len;
442 
443 nla_put_failure:
444 	nla_nest_cancel(skb, nest);
445 	return -1;
446 }
447 
448 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
449 {
450 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
451 	struct cls_bpf_prog *prog;
452 
453 	list_for_each_entry(prog, &head->plist, link) {
454 		if (arg->count < arg->skip)
455 			goto skip;
456 		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
457 			arg->stop = 1;
458 			break;
459 		}
460 skip:
461 		arg->count++;
462 	}
463 }
464 
465 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
466 	.kind		=	"bpf",
467 	.owner		=	THIS_MODULE,
468 	.classify	=	cls_bpf_classify,
469 	.init		=	cls_bpf_init,
470 	.destroy	=	cls_bpf_destroy,
471 	.get		=	cls_bpf_get,
472 	.change		=	cls_bpf_change,
473 	.delete		=	cls_bpf_delete,
474 	.walk		=	cls_bpf_walk,
475 	.dump		=	cls_bpf_dump,
476 };
477 
478 static int __init cls_bpf_init_mod(void)
479 {
480 	return register_tcf_proto_ops(&cls_bpf_ops);
481 }
482 
483 static void __exit cls_bpf_exit_mod(void)
484 {
485 	unregister_tcf_proto_ops(&cls_bpf_ops);
486 }
487 
488 module_init(cls_bpf_init_mod);
489 module_exit(cls_bpf_exit_mod);
490