xref: /openbmc/linux/net/sched/cls_bpf.c (revision 88d5e520)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/sock.h>
22 
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
26 
27 struct cls_bpf_head {
28 	struct list_head plist;
29 	u32 hgen;
30 };
31 
32 struct cls_bpf_prog {
33 	struct bpf_prog *filter;
34 	struct sock_filter *bpf_ops;
35 	struct tcf_exts exts;
36 	struct tcf_result res;
37 	struct list_head link;
38 	u32 handle;
39 	u16 bpf_len;
40 };
41 
42 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
43 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
44 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
45 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
46 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
47 };
48 
49 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
50 			    struct tcf_result *res)
51 {
52 	struct cls_bpf_head *head = tp->root;
53 	struct cls_bpf_prog *prog;
54 	int ret;
55 
56 	list_for_each_entry(prog, &head->plist, link) {
57 		int filter_res = BPF_PROG_RUN(prog->filter, skb);
58 
59 		if (filter_res == 0)
60 			continue;
61 
62 		*res = prog->res;
63 		if (filter_res != -1)
64 			res->classid = filter_res;
65 
66 		ret = tcf_exts_exec(skb, &prog->exts, res);
67 		if (ret < 0)
68 			continue;
69 
70 		return ret;
71 	}
72 
73 	return -1;
74 }
75 
76 static int cls_bpf_init(struct tcf_proto *tp)
77 {
78 	struct cls_bpf_head *head;
79 
80 	head = kzalloc(sizeof(*head), GFP_KERNEL);
81 	if (head == NULL)
82 		return -ENOBUFS;
83 
84 	INIT_LIST_HEAD(&head->plist);
85 	tp->root = head;
86 
87 	return 0;
88 }
89 
90 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
91 {
92 	tcf_unbind_filter(tp, &prog->res);
93 	tcf_exts_destroy(tp, &prog->exts);
94 
95 	bpf_prog_destroy(prog->filter);
96 
97 	kfree(prog->bpf_ops);
98 	kfree(prog);
99 }
100 
101 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
102 {
103 	struct cls_bpf_head *head = tp->root;
104 	struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
105 
106 	list_for_each_entry(prog, &head->plist, link) {
107 		if (prog == todel) {
108 			tcf_tree_lock(tp);
109 			list_del(&prog->link);
110 			tcf_tree_unlock(tp);
111 
112 			cls_bpf_delete_prog(tp, prog);
113 			return 0;
114 		}
115 	}
116 
117 	return -ENOENT;
118 }
119 
120 static void cls_bpf_destroy(struct tcf_proto *tp)
121 {
122 	struct cls_bpf_head *head = tp->root;
123 	struct cls_bpf_prog *prog, *tmp;
124 
125 	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
126 		list_del(&prog->link);
127 		cls_bpf_delete_prog(tp, prog);
128 	}
129 
130 	kfree(head);
131 }
132 
133 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
134 {
135 	struct cls_bpf_head *head = tp->root;
136 	struct cls_bpf_prog *prog;
137 	unsigned long ret = 0UL;
138 
139 	if (head == NULL)
140 		return 0UL;
141 
142 	list_for_each_entry(prog, &head->plist, link) {
143 		if (prog->handle == handle) {
144 			ret = (unsigned long) prog;
145 			break;
146 		}
147 	}
148 
149 	return ret;
150 }
151 
152 static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
153 {
154 }
155 
156 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
157 				   struct cls_bpf_prog *prog,
158 				   unsigned long base, struct nlattr **tb,
159 				   struct nlattr *est, bool ovr)
160 {
161 	struct sock_filter *bpf_ops, *bpf_old;
162 	struct tcf_exts exts;
163 	struct sock_fprog_kern tmp;
164 	struct bpf_prog *fp, *fp_old;
165 	u16 bpf_size, bpf_len;
166 	u32 classid;
167 	int ret;
168 
169 	if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
170 		return -EINVAL;
171 
172 	tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
173 	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
174 	if (ret < 0)
175 		return ret;
176 
177 	classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
178 	bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
179 	if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
180 		ret = -EINVAL;
181 		goto errout;
182 	}
183 
184 	bpf_size = bpf_len * sizeof(*bpf_ops);
185 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
186 	if (bpf_ops == NULL) {
187 		ret = -ENOMEM;
188 		goto errout;
189 	}
190 
191 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
192 
193 	tmp.len = bpf_len;
194 	tmp.filter = bpf_ops;
195 
196 	ret = bpf_prog_create(&fp, &tmp);
197 	if (ret)
198 		goto errout_free;
199 
200 	tcf_tree_lock(tp);
201 	fp_old = prog->filter;
202 	bpf_old = prog->bpf_ops;
203 
204 	prog->bpf_len = bpf_len;
205 	prog->bpf_ops = bpf_ops;
206 	prog->filter = fp;
207 	prog->res.classid = classid;
208 	tcf_tree_unlock(tp);
209 
210 	tcf_bind_filter(tp, &prog->res, base);
211 	tcf_exts_change(tp, &prog->exts, &exts);
212 
213 	if (fp_old)
214 		bpf_prog_destroy(fp_old);
215 	if (bpf_old)
216 		kfree(bpf_old);
217 
218 	return 0;
219 
220 errout_free:
221 	kfree(bpf_ops);
222 errout:
223 	tcf_exts_destroy(tp, &exts);
224 	return ret;
225 }
226 
227 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
228 				   struct cls_bpf_head *head)
229 {
230 	unsigned int i = 0x80000000;
231 
232 	do {
233 		if (++head->hgen == 0x7FFFFFFF)
234 			head->hgen = 1;
235 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
236 	if (i == 0)
237 		pr_err("Insufficient number of handles\n");
238 
239 	return i;
240 }
241 
242 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
243 			  struct tcf_proto *tp, unsigned long base,
244 			  u32 handle, struct nlattr **tca,
245 			  unsigned long *arg, bool ovr)
246 {
247 	struct cls_bpf_head *head = tp->root;
248 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
249 	struct nlattr *tb[TCA_BPF_MAX + 1];
250 	int ret;
251 
252 	if (tca[TCA_OPTIONS] == NULL)
253 		return -EINVAL;
254 
255 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
256 	if (ret < 0)
257 		return ret;
258 
259 	if (prog != NULL) {
260 		if (handle && prog->handle != handle)
261 			return -EINVAL;
262 		return cls_bpf_modify_existing(net, tp, prog, base, tb,
263 					       tca[TCA_RATE], ovr);
264 	}
265 
266 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
267 	if (prog == NULL)
268 		return -ENOBUFS;
269 
270 	tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
271 	if (handle == 0)
272 		prog->handle = cls_bpf_grab_new_handle(tp, head);
273 	else
274 		prog->handle = handle;
275 	if (prog->handle == 0) {
276 		ret = -EINVAL;
277 		goto errout;
278 	}
279 
280 	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
281 	if (ret < 0)
282 		goto errout;
283 
284 	tcf_tree_lock(tp);
285 	list_add(&prog->link, &head->plist);
286 	tcf_tree_unlock(tp);
287 
288 	*arg = (unsigned long) prog;
289 
290 	return 0;
291 errout:
292 	if (*arg == 0UL && prog)
293 		kfree(prog);
294 
295 	return ret;
296 }
297 
298 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
299 			struct sk_buff *skb, struct tcmsg *tm)
300 {
301 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
302 	struct nlattr *nest, *nla;
303 
304 	if (prog == NULL)
305 		return skb->len;
306 
307 	tm->tcm_handle = prog->handle;
308 
309 	nest = nla_nest_start(skb, TCA_OPTIONS);
310 	if (nest == NULL)
311 		goto nla_put_failure;
312 
313 	if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
314 		goto nla_put_failure;
315 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
316 		goto nla_put_failure;
317 
318 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
319 			  sizeof(struct sock_filter));
320 	if (nla == NULL)
321 		goto nla_put_failure;
322 
323 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
324 
325 	if (tcf_exts_dump(skb, &prog->exts) < 0)
326 		goto nla_put_failure;
327 
328 	nla_nest_end(skb, nest);
329 
330 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
331 		goto nla_put_failure;
332 
333 	return skb->len;
334 
335 nla_put_failure:
336 	nla_nest_cancel(skb, nest);
337 	return -1;
338 }
339 
340 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
341 {
342 	struct cls_bpf_head *head = tp->root;
343 	struct cls_bpf_prog *prog;
344 
345 	list_for_each_entry(prog, &head->plist, link) {
346 		if (arg->count < arg->skip)
347 			goto skip;
348 		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
349 			arg->stop = 1;
350 			break;
351 		}
352 skip:
353 		arg->count++;
354 	}
355 }
356 
357 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
358 	.kind		=	"bpf",
359 	.owner		=	THIS_MODULE,
360 	.classify	=	cls_bpf_classify,
361 	.init		=	cls_bpf_init,
362 	.destroy	=	cls_bpf_destroy,
363 	.get		=	cls_bpf_get,
364 	.put		=	cls_bpf_put,
365 	.change		=	cls_bpf_change,
366 	.delete		=	cls_bpf_delete,
367 	.walk		=	cls_bpf_walk,
368 	.dump		=	cls_bpf_dump,
369 };
370 
371 static int __init cls_bpf_init_mod(void)
372 {
373 	return register_tcf_proto_ops(&cls_bpf_ops);
374 }
375 
376 static void __exit cls_bpf_exit_mod(void)
377 {
378 	unregister_tcf_proto_ops(&cls_bpf_ops);
379 }
380 
381 module_init(cls_bpf_init_mod);
382 module_exit(cls_bpf_exit_mod);
383