xref: /openbmc/linux/net/sched/cls_route.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  * net/sched/cls_route.c	ROUTE4 classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/dst.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
23 
24 /*
25    1. For now we assume that route tags < 256.
26       It allows to use direct table lookups, instead of hash tables.
27    2. For now we assume that "from TAG" and "fromdev DEV" statements
28       are mutually  exclusive.
29    3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30  */
31 
32 struct route4_fastmap
33 {
34 	struct route4_filter	*filter;
35 	u32			id;
36 	int			iif;
37 };
38 
39 struct route4_head
40 {
41 	struct route4_fastmap	fastmap[16];
42 	struct route4_bucket	*table[256+1];
43 };
44 
45 struct route4_bucket
46 {
47 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 	struct route4_filter	*ht[16+16+1];
49 };
50 
51 struct route4_filter
52 {
53 	struct route4_filter	*next;
54 	u32			id;
55 	int			iif;
56 
57 	struct tcf_result	res;
58 	struct tcf_exts		exts;
59 	u32			handle;
60 	struct route4_bucket	*bkt;
61 };
62 
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64 
65 static struct tcf_ext_map route_ext_map = {
66 	.police = TCA_ROUTE4_POLICE,
67 	.action = TCA_ROUTE4_ACT
68 };
69 
70 static __inline__ int route4_fastmap_hash(u32 id, int iif)
71 {
72 	return id&0xF;
73 }
74 
75 static inline
76 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
77 {
78 	qdisc_lock_tree(dev);
79 	memset(head->fastmap, 0, sizeof(head->fastmap));
80 	qdisc_unlock_tree(dev);
81 }
82 
83 static inline void
84 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85 		   struct route4_filter *f)
86 {
87 	int h = route4_fastmap_hash(id, iif);
88 	head->fastmap[h].id = id;
89 	head->fastmap[h].iif = iif;
90 	head->fastmap[h].filter = f;
91 }
92 
93 static __inline__ int route4_hash_to(u32 id)
94 {
95 	return id&0xFF;
96 }
97 
98 static __inline__ int route4_hash_from(u32 id)
99 {
100 	return (id>>16)&0xF;
101 }
102 
103 static __inline__ int route4_hash_iif(int iif)
104 {
105 	return 16 + ((iif>>16)&0xF);
106 }
107 
108 static __inline__ int route4_hash_wild(void)
109 {
110 	return 32;
111 }
112 
113 #define ROUTE4_APPLY_RESULT()					\
114 {								\
115 	*res = f->res;						\
116 	if (tcf_exts_is_available(&f->exts)) {			\
117 		int r = tcf_exts_exec(skb, &f->exts, res);	\
118 		if (r < 0) {					\
119 			dont_cache = 1;				\
120 			continue;				\
121 		}						\
122 		return r;					\
123 	} else if (!dont_cache)					\
124 		route4_set_fastmap(head, id, iif, f);		\
125 	return 0;						\
126 }
127 
128 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129 			   struct tcf_result *res)
130 {
131 	struct route4_head *head = (struct route4_head*)tp->root;
132 	struct dst_entry *dst;
133 	struct route4_bucket *b;
134 	struct route4_filter *f;
135 	u32 id, h;
136 	int iif, dont_cache = 0;
137 
138 	if ((dst = skb->dst) == NULL)
139 		goto failure;
140 
141 	id = dst->tclassid;
142 	if (head == NULL)
143 		goto old_method;
144 
145 	iif = ((struct rtable*)dst)->fl.iif;
146 
147 	h = route4_fastmap_hash(id, iif);
148 	if (id == head->fastmap[h].id &&
149 	    iif == head->fastmap[h].iif &&
150 	    (f = head->fastmap[h].filter) != NULL) {
151 		if (f == ROUTE4_FAILURE)
152 			goto failure;
153 
154 		*res = f->res;
155 		return 0;
156 	}
157 
158 	h = route4_hash_to(id);
159 
160 restart:
161 	if ((b = head->table[h]) != NULL) {
162 		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163 			if (f->id == id)
164 				ROUTE4_APPLY_RESULT();
165 
166 		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167 			if (f->iif == iif)
168 				ROUTE4_APPLY_RESULT();
169 
170 		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171 			ROUTE4_APPLY_RESULT();
172 
173 	}
174 	if (h < 256) {
175 		h = 256;
176 		id &= ~0xFFFF;
177 		goto restart;
178 	}
179 
180 	if (!dont_cache)
181 		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182 failure:
183 	return -1;
184 
185 old_method:
186 	if (id && (TC_H_MAJ(id) == 0 ||
187 		   !(TC_H_MAJ(id^tp->q->handle)))) {
188 		res->classid = id;
189 		res->class = 0;
190 		return 0;
191 	}
192 	return -1;
193 }
194 
195 static inline u32 to_hash(u32 id)
196 {
197 	u32 h = id&0xFF;
198 	if (id&0x8000)
199 		h += 256;
200 	return h;
201 }
202 
203 static inline u32 from_hash(u32 id)
204 {
205 	id &= 0xFFFF;
206 	if (id == 0xFFFF)
207 		return 32;
208 	if (!(id & 0x8000)) {
209 		if (id > 255)
210 			return 256;
211 		return id&0xF;
212 	}
213 	return 16 + (id&0xF);
214 }
215 
216 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
217 {
218 	struct route4_head *head = (struct route4_head*)tp->root;
219 	struct route4_bucket *b;
220 	struct route4_filter *f;
221 	unsigned h1, h2;
222 
223 	if (!head)
224 		return 0;
225 
226 	h1 = to_hash(handle);
227 	if (h1 > 256)
228 		return 0;
229 
230 	h2 = from_hash(handle>>16);
231 	if (h2 > 32)
232 		return 0;
233 
234 	if ((b = head->table[h1]) != NULL) {
235 		for (f = b->ht[h2]; f; f = f->next)
236 			if (f->handle == handle)
237 				return (unsigned long)f;
238 	}
239 	return 0;
240 }
241 
242 static void route4_put(struct tcf_proto *tp, unsigned long f)
243 {
244 }
245 
246 static int route4_init(struct tcf_proto *tp)
247 {
248 	return 0;
249 }
250 
251 static inline void
252 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
253 {
254 	tcf_unbind_filter(tp, &f->res);
255 	tcf_exts_destroy(tp, &f->exts);
256 	kfree(f);
257 }
258 
259 static void route4_destroy(struct tcf_proto *tp)
260 {
261 	struct route4_head *head = xchg(&tp->root, NULL);
262 	int h1, h2;
263 
264 	if (head == NULL)
265 		return;
266 
267 	for (h1=0; h1<=256; h1++) {
268 		struct route4_bucket *b;
269 
270 		if ((b = head->table[h1]) != NULL) {
271 			for (h2=0; h2<=32; h2++) {
272 				struct route4_filter *f;
273 
274 				while ((f = b->ht[h2]) != NULL) {
275 					b->ht[h2] = f->next;
276 					route4_delete_filter(tp, f);
277 				}
278 			}
279 			kfree(b);
280 		}
281 	}
282 	kfree(head);
283 }
284 
285 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
286 {
287 	struct route4_head *head = (struct route4_head*)tp->root;
288 	struct route4_filter **fp, *f = (struct route4_filter*)arg;
289 	unsigned h = 0;
290 	struct route4_bucket *b;
291 	int i;
292 
293 	if (!head || !f)
294 		return -EINVAL;
295 
296 	h = f->handle;
297 	b = f->bkt;
298 
299 	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300 		if (*fp == f) {
301 			tcf_tree_lock(tp);
302 			*fp = f->next;
303 			tcf_tree_unlock(tp);
304 
305 			route4_reset_fastmap(tp->q->dev, head, f->id);
306 			route4_delete_filter(tp, f);
307 
308 			/* Strip tree */
309 
310 			for (i=0; i<=32; i++)
311 				if (b->ht[i])
312 					return 0;
313 
314 			/* OK, session has no flows */
315 			tcf_tree_lock(tp);
316 			head->table[to_hash(h)] = NULL;
317 			tcf_tree_unlock(tp);
318 
319 			kfree(b);
320 			return 0;
321 		}
322 	}
323 	return 0;
324 }
325 
326 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
327 	struct route4_filter *f, u32 handle, struct route4_head *head,
328 	struct rtattr **tb, struct rtattr *est, int new)
329 {
330 	int err;
331 	u32 id = 0, to = 0, nhandle = 0x8000;
332 	struct route4_filter *fp;
333 	unsigned int h1;
334 	struct route4_bucket *b;
335 	struct tcf_exts e;
336 
337 	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
338 	if (err < 0)
339 		return err;
340 
341 	err = -EINVAL;
342 	if (tb[TCA_ROUTE4_CLASSID-1])
343 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
344 			goto errout;
345 
346 	if (tb[TCA_ROUTE4_TO-1]) {
347 		if (new && handle & 0x8000)
348 			goto errout;
349 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
350 			goto errout;
351 		to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
352 		if (to > 0xFF)
353 			goto errout;
354 		nhandle = to;
355 	}
356 
357 	if (tb[TCA_ROUTE4_FROM-1]) {
358 		if (tb[TCA_ROUTE4_IIF-1])
359 			goto errout;
360 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
361 			goto errout;
362 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
363 		if (id > 0xFF)
364 			goto errout;
365 		nhandle |= id << 16;
366 	} else if (tb[TCA_ROUTE4_IIF-1]) {
367 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
368 			goto errout;
369 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
370 		if (id > 0x7FFF)
371 			goto errout;
372 		nhandle |= (id | 0x8000) << 16;
373 	} else
374 		nhandle |= 0xFFFF << 16;
375 
376 	if (handle && new) {
377 		nhandle |= handle & 0x7F00;
378 		if (nhandle != handle)
379 			goto errout;
380 	}
381 
382 	h1 = to_hash(nhandle);
383 	if ((b = head->table[h1]) == NULL) {
384 		err = -ENOBUFS;
385 		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
386 		if (b == NULL)
387 			goto errout;
388 
389 		tcf_tree_lock(tp);
390 		head->table[h1] = b;
391 		tcf_tree_unlock(tp);
392 	} else {
393 		unsigned int h2 = from_hash(nhandle >> 16);
394 		err = -EEXIST;
395 		for (fp = b->ht[h2]; fp; fp = fp->next)
396 			if (fp->handle == f->handle)
397 				goto errout;
398 	}
399 
400 	tcf_tree_lock(tp);
401 	if (tb[TCA_ROUTE4_TO-1])
402 		f->id = to;
403 
404 	if (tb[TCA_ROUTE4_FROM-1])
405 		f->id = to | id<<16;
406 	else if (tb[TCA_ROUTE4_IIF-1])
407 		f->iif = id;
408 
409 	f->handle = nhandle;
410 	f->bkt = b;
411 	tcf_tree_unlock(tp);
412 
413 	if (tb[TCA_ROUTE4_CLASSID-1]) {
414 		f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
415 		tcf_bind_filter(tp, &f->res, base);
416 	}
417 
418 	tcf_exts_change(tp, &f->exts, &e);
419 
420 	return 0;
421 errout:
422 	tcf_exts_destroy(tp, &e);
423 	return err;
424 }
425 
426 static int route4_change(struct tcf_proto *tp, unsigned long base,
427 		       u32 handle,
428 		       struct rtattr **tca,
429 		       unsigned long *arg)
430 {
431 	struct route4_head *head = tp->root;
432 	struct route4_filter *f, *f1, **fp;
433 	struct route4_bucket *b;
434 	struct rtattr *opt = tca[TCA_OPTIONS-1];
435 	struct rtattr *tb[TCA_ROUTE4_MAX];
436 	unsigned int h, th;
437 	u32 old_handle = 0;
438 	int err;
439 
440 	if (opt == NULL)
441 		return handle ? -EINVAL : 0;
442 
443 	if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
444 		return -EINVAL;
445 
446 	if ((f = (struct route4_filter*)*arg) != NULL) {
447 		if (f->handle != handle && handle)
448 			return -EINVAL;
449 
450 		if (f->bkt)
451 			old_handle = f->handle;
452 
453 		err = route4_set_parms(tp, base, f, handle, head, tb,
454 			tca[TCA_RATE-1], 0);
455 		if (err < 0)
456 			return err;
457 
458 		goto reinsert;
459 	}
460 
461 	err = -ENOBUFS;
462 	if (head == NULL) {
463 		head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
464 		if (head == NULL)
465 			goto errout;
466 
467 		tcf_tree_lock(tp);
468 		tp->root = head;
469 		tcf_tree_unlock(tp);
470 	}
471 
472 	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
473 	if (f == NULL)
474 		goto errout;
475 
476 	err = route4_set_parms(tp, base, f, handle, head, tb,
477 		tca[TCA_RATE-1], 1);
478 	if (err < 0)
479 		goto errout;
480 
481 reinsert:
482 	h = from_hash(f->handle >> 16);
483 	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
484 		if (f->handle < f1->handle)
485 			break;
486 
487 	f->next = f1;
488 	tcf_tree_lock(tp);
489 	*fp = f;
490 
491 	if (old_handle && f->handle != old_handle) {
492 		th = to_hash(old_handle);
493 		h = from_hash(old_handle >> 16);
494 		if ((b = head->table[th]) != NULL) {
495 			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
496 				if (*fp == f) {
497 					*fp = f->next;
498 					break;
499 				}
500 			}
501 		}
502 	}
503 	tcf_tree_unlock(tp);
504 
505 	route4_reset_fastmap(tp->q->dev, head, f->id);
506 	*arg = (unsigned long)f;
507 	return 0;
508 
509 errout:
510 	kfree(f);
511 	return err;
512 }
513 
514 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
515 {
516 	struct route4_head *head = tp->root;
517 	unsigned h, h1;
518 
519 	if (head == NULL)
520 		arg->stop = 1;
521 
522 	if (arg->stop)
523 		return;
524 
525 	for (h = 0; h <= 256; h++) {
526 		struct route4_bucket *b = head->table[h];
527 
528 		if (b) {
529 			for (h1 = 0; h1 <= 32; h1++) {
530 				struct route4_filter *f;
531 
532 				for (f = b->ht[h1]; f; f = f->next) {
533 					if (arg->count < arg->skip) {
534 						arg->count++;
535 						continue;
536 					}
537 					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
538 						arg->stop = 1;
539 						return;
540 					}
541 					arg->count++;
542 				}
543 			}
544 		}
545 	}
546 }
547 
548 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
549 		       struct sk_buff *skb, struct tcmsg *t)
550 {
551 	struct route4_filter *f = (struct route4_filter*)fh;
552 	unsigned char *b = skb_tail_pointer(skb);
553 	struct rtattr *rta;
554 	u32 id;
555 
556 	if (f == NULL)
557 		return skb->len;
558 
559 	t->tcm_handle = f->handle;
560 
561 	rta = (struct rtattr*)b;
562 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
563 
564 	if (!(f->handle&0x8000)) {
565 		id = f->id&0xFF;
566 		RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
567 	}
568 	if (f->handle&0x80000000) {
569 		if ((f->handle>>16) != 0xFFFF)
570 			RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
571 	} else {
572 		id = f->id>>16;
573 		RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
574 	}
575 	if (f->res.classid)
576 		RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
577 
578 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
579 		goto rtattr_failure;
580 
581 	rta->rta_len = skb_tail_pointer(skb) - b;
582 
583 	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
584 		goto rtattr_failure;
585 
586 	return skb->len;
587 
588 rtattr_failure:
589 	nlmsg_trim(skb, b);
590 	return -1;
591 }
592 
593 static struct tcf_proto_ops cls_route4_ops = {
594 	.next		=	NULL,
595 	.kind		=	"route",
596 	.classify	=	route4_classify,
597 	.init		=	route4_init,
598 	.destroy	=	route4_destroy,
599 	.get		=	route4_get,
600 	.put		=	route4_put,
601 	.change		=	route4_change,
602 	.delete		=	route4_delete,
603 	.walk		=	route4_walk,
604 	.dump		=	route4_dump,
605 	.owner		=	THIS_MODULE,
606 };
607 
608 static int __init init_route4(void)
609 {
610 	return register_tcf_proto_ops(&cls_route4_ops);
611 }
612 
613 static void __exit exit_route4(void)
614 {
615 	unregister_tcf_proto_ops(&cls_route4_ops);
616 }
617 
618 module_init(init_route4)
619 module_exit(exit_route4)
620 MODULE_LICENSE("GPL");
621