xref: /openbmc/linux/net/sched/cls_route.c (revision 1da177e4)
1 /*
2  * net/sched/cls_route.c	ROUTE4 classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <linux/bitops.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/mm.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/in.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/if_ether.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/notifier.h>
32 #include <net/ip.h>
33 #include <net/route.h>
34 #include <linux/skbuff.h>
35 #include <net/sock.h>
36 #include <net/act_api.h>
37 #include <net/pkt_cls.h>
38 
39 /*
40    1. For now we assume that route tags < 256.
41       It allows to use direct table lookups, instead of hash tables.
42    2. For now we assume that "from TAG" and "fromdev DEV" statements
43       are mutually  exclusive.
44    3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
45  */
46 
47 struct route4_fastmap
48 {
49 	struct route4_filter	*filter;
50 	u32			id;
51 	int			iif;
52 };
53 
54 struct route4_head
55 {
56 	struct route4_fastmap	fastmap[16];
57 	struct route4_bucket	*table[256+1];
58 };
59 
60 struct route4_bucket
61 {
62 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
63 	struct route4_filter	*ht[16+16+1];
64 };
65 
66 struct route4_filter
67 {
68 	struct route4_filter	*next;
69 	u32			id;
70 	int			iif;
71 
72 	struct tcf_result	res;
73 	struct tcf_exts		exts;
74 	u32			handle;
75 	struct route4_bucket	*bkt;
76 };
77 
78 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
79 
80 static struct tcf_ext_map route_ext_map = {
81 	.police = TCA_ROUTE4_POLICE,
82 	.action = TCA_ROUTE4_ACT
83 };
84 
85 static __inline__ int route4_fastmap_hash(u32 id, int iif)
86 {
87 	return id&0xF;
88 }
89 
90 static inline
91 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
92 {
93 	spin_lock_bh(&dev->queue_lock);
94 	memset(head->fastmap, 0, sizeof(head->fastmap));
95 	spin_unlock_bh(&dev->queue_lock);
96 }
97 
98 static void __inline__
99 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
100 		   struct route4_filter *f)
101 {
102 	int h = route4_fastmap_hash(id, iif);
103 	head->fastmap[h].id = id;
104 	head->fastmap[h].iif = iif;
105 	head->fastmap[h].filter = f;
106 }
107 
108 static __inline__ int route4_hash_to(u32 id)
109 {
110 	return id&0xFF;
111 }
112 
113 static __inline__ int route4_hash_from(u32 id)
114 {
115 	return (id>>16)&0xF;
116 }
117 
118 static __inline__ int route4_hash_iif(int iif)
119 {
120 	return 16 + ((iif>>16)&0xF);
121 }
122 
123 static __inline__ int route4_hash_wild(void)
124 {
125 	return 32;
126 }
127 
128 #define ROUTE4_APPLY_RESULT()					\
129 {								\
130 	*res = f->res;						\
131 	if (tcf_exts_is_available(&f->exts)) {			\
132 		int r = tcf_exts_exec(skb, &f->exts, res);	\
133 		if (r < 0) {					\
134 			dont_cache = 1;				\
135 			continue;				\
136 		}						\
137 		return r;					\
138 	} else if (!dont_cache)					\
139 		route4_set_fastmap(head, id, iif, f);		\
140 	return 0;						\
141 }
142 
143 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
144 			   struct tcf_result *res)
145 {
146 	struct route4_head *head = (struct route4_head*)tp->root;
147 	struct dst_entry *dst;
148 	struct route4_bucket *b;
149 	struct route4_filter *f;
150 	u32 id, h;
151 	int iif, dont_cache = 0;
152 
153 	if ((dst = skb->dst) == NULL)
154 		goto failure;
155 
156 	id = dst->tclassid;
157 	if (head == NULL)
158 		goto old_method;
159 
160 	iif = ((struct rtable*)dst)->fl.iif;
161 
162 	h = route4_fastmap_hash(id, iif);
163 	if (id == head->fastmap[h].id &&
164 	    iif == head->fastmap[h].iif &&
165 	    (f = head->fastmap[h].filter) != NULL) {
166 		if (f == ROUTE4_FAILURE)
167 			goto failure;
168 
169 		*res = f->res;
170 		return 0;
171 	}
172 
173 	h = route4_hash_to(id);
174 
175 restart:
176 	if ((b = head->table[h]) != NULL) {
177 		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
178 			if (f->id == id)
179 				ROUTE4_APPLY_RESULT();
180 
181 		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
182 			if (f->iif == iif)
183 				ROUTE4_APPLY_RESULT();
184 
185 		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
186 			ROUTE4_APPLY_RESULT();
187 
188 	}
189 	if (h < 256) {
190 		h = 256;
191 		id &= ~0xFFFF;
192 		goto restart;
193 	}
194 
195 	if (!dont_cache)
196 		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
197 failure:
198 	return -1;
199 
200 old_method:
201 	if (id && (TC_H_MAJ(id) == 0 ||
202 		   !(TC_H_MAJ(id^tp->q->handle)))) {
203 		res->classid = id;
204 		res->class = 0;
205 		return 0;
206 	}
207 	return -1;
208 }
209 
210 static inline u32 to_hash(u32 id)
211 {
212 	u32 h = id&0xFF;
213 	if (id&0x8000)
214 		h += 256;
215 	return h;
216 }
217 
218 static inline u32 from_hash(u32 id)
219 {
220 	id &= 0xFFFF;
221 	if (id == 0xFFFF)
222 		return 32;
223 	if (!(id & 0x8000)) {
224 		if (id > 255)
225 			return 256;
226 		return id&0xF;
227 	}
228 	return 16 + (id&0xF);
229 }
230 
231 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
232 {
233 	struct route4_head *head = (struct route4_head*)tp->root;
234 	struct route4_bucket *b;
235 	struct route4_filter *f;
236 	unsigned h1, h2;
237 
238 	if (!head)
239 		return 0;
240 
241 	h1 = to_hash(handle);
242 	if (h1 > 256)
243 		return 0;
244 
245 	h2 = from_hash(handle>>16);
246 	if (h2 > 32)
247 		return 0;
248 
249 	if ((b = head->table[h1]) != NULL) {
250 		for (f = b->ht[h2]; f; f = f->next)
251 			if (f->handle == handle)
252 				return (unsigned long)f;
253 	}
254 	return 0;
255 }
256 
257 static void route4_put(struct tcf_proto *tp, unsigned long f)
258 {
259 }
260 
261 static int route4_init(struct tcf_proto *tp)
262 {
263 	return 0;
264 }
265 
266 static inline void
267 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
268 {
269 	tcf_unbind_filter(tp, &f->res);
270 	tcf_exts_destroy(tp, &f->exts);
271 	kfree(f);
272 }
273 
274 static void route4_destroy(struct tcf_proto *tp)
275 {
276 	struct route4_head *head = xchg(&tp->root, NULL);
277 	int h1, h2;
278 
279 	if (head == NULL)
280 		return;
281 
282 	for (h1=0; h1<=256; h1++) {
283 		struct route4_bucket *b;
284 
285 		if ((b = head->table[h1]) != NULL) {
286 			for (h2=0; h2<=32; h2++) {
287 				struct route4_filter *f;
288 
289 				while ((f = b->ht[h2]) != NULL) {
290 					b->ht[h2] = f->next;
291 					route4_delete_filter(tp, f);
292 				}
293 			}
294 			kfree(b);
295 		}
296 	}
297 	kfree(head);
298 }
299 
300 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
301 {
302 	struct route4_head *head = (struct route4_head*)tp->root;
303 	struct route4_filter **fp, *f = (struct route4_filter*)arg;
304 	unsigned h = 0;
305 	struct route4_bucket *b;
306 	int i;
307 
308 	if (!head || !f)
309 		return -EINVAL;
310 
311 	h = f->handle;
312 	b = f->bkt;
313 
314 	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
315 		if (*fp == f) {
316 			tcf_tree_lock(tp);
317 			*fp = f->next;
318 			tcf_tree_unlock(tp);
319 
320 			route4_reset_fastmap(tp->q->dev, head, f->id);
321 			route4_delete_filter(tp, f);
322 
323 			/* Strip tree */
324 
325 			for (i=0; i<=32; i++)
326 				if (b->ht[i])
327 					return 0;
328 
329 			/* OK, session has no flows */
330 			tcf_tree_lock(tp);
331 			head->table[to_hash(h)] = NULL;
332 			tcf_tree_unlock(tp);
333 
334 			kfree(b);
335 			return 0;
336 		}
337 	}
338 	return 0;
339 }
340 
341 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
342 	struct route4_filter *f, u32 handle, struct route4_head *head,
343 	struct rtattr **tb, struct rtattr *est, int new)
344 {
345 	int err;
346 	u32 id = 0, to = 0, nhandle = 0x8000;
347 	struct route4_filter *fp;
348 	unsigned int h1;
349 	struct route4_bucket *b;
350 	struct tcf_exts e;
351 
352 	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
353 	if (err < 0)
354 		return err;
355 
356 	err = -EINVAL;
357 	if (tb[TCA_ROUTE4_CLASSID-1])
358 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
359 			goto errout;
360 
361 	if (tb[TCA_ROUTE4_TO-1]) {
362 		if (new && handle & 0x8000)
363 			goto errout;
364 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
365 			goto errout;
366 		to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
367 		if (to > 0xFF)
368 			goto errout;
369 		nhandle = to;
370 	}
371 
372 	if (tb[TCA_ROUTE4_FROM-1]) {
373 		if (tb[TCA_ROUTE4_IIF-1])
374 			goto errout;
375 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
376 			goto errout;
377 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
378 		if (id > 0xFF)
379 			goto errout;
380 		nhandle |= id << 16;
381 	} else if (tb[TCA_ROUTE4_IIF-1]) {
382 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
383 			goto errout;
384 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
385 		if (id > 0x7FFF)
386 			goto errout;
387 		nhandle |= (id | 0x8000) << 16;
388 	} else
389 		nhandle |= 0xFFFF << 16;
390 
391 	if (handle && new) {
392 		nhandle |= handle & 0x7F00;
393 		if (nhandle != handle)
394 			goto errout;
395 	}
396 
397 	h1 = to_hash(nhandle);
398 	if ((b = head->table[h1]) == NULL) {
399 		err = -ENOBUFS;
400 		b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
401 		if (b == NULL)
402 			goto errout;
403 		memset(b, 0, sizeof(*b));
404 
405 		tcf_tree_lock(tp);
406 		head->table[h1] = b;
407 		tcf_tree_unlock(tp);
408 	} else {
409 		unsigned int h2 = from_hash(nhandle >> 16);
410 		err = -EEXIST;
411 		for (fp = b->ht[h2]; fp; fp = fp->next)
412 			if (fp->handle == f->handle)
413 				goto errout;
414 	}
415 
416 	tcf_tree_lock(tp);
417 	if (tb[TCA_ROUTE4_TO-1])
418 		f->id = to;
419 
420 	if (tb[TCA_ROUTE4_FROM-1])
421 		f->id = to | id<<16;
422 	else if (tb[TCA_ROUTE4_IIF-1])
423 		f->iif = id;
424 
425 	f->handle = nhandle;
426 	f->bkt = b;
427 	tcf_tree_unlock(tp);
428 
429 	if (tb[TCA_ROUTE4_CLASSID-1]) {
430 		f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
431 		tcf_bind_filter(tp, &f->res, base);
432 	}
433 
434 	tcf_exts_change(tp, &f->exts, &e);
435 
436 	return 0;
437 errout:
438 	tcf_exts_destroy(tp, &e);
439 	return err;
440 }
441 
442 static int route4_change(struct tcf_proto *tp, unsigned long base,
443 		       u32 handle,
444 		       struct rtattr **tca,
445 		       unsigned long *arg)
446 {
447 	struct route4_head *head = tp->root;
448 	struct route4_filter *f, *f1, **fp;
449 	struct route4_bucket *b;
450 	struct rtattr *opt = tca[TCA_OPTIONS-1];
451 	struct rtattr *tb[TCA_ROUTE4_MAX];
452 	unsigned int h, th;
453 	u32 old_handle = 0;
454 	int err;
455 
456 	if (opt == NULL)
457 		return handle ? -EINVAL : 0;
458 
459 	if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
460 		return -EINVAL;
461 
462 	if ((f = (struct route4_filter*)*arg) != NULL) {
463 		if (f->handle != handle && handle)
464 			return -EINVAL;
465 
466 		if (f->bkt)
467 			old_handle = f->handle;
468 
469 		err = route4_set_parms(tp, base, f, handle, head, tb,
470 			tca[TCA_RATE-1], 0);
471 		if (err < 0)
472 			return err;
473 
474 		goto reinsert;
475 	}
476 
477 	err = -ENOBUFS;
478 	if (head == NULL) {
479 		head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
480 		if (head == NULL)
481 			goto errout;
482 		memset(head, 0, sizeof(struct route4_head));
483 
484 		tcf_tree_lock(tp);
485 		tp->root = head;
486 		tcf_tree_unlock(tp);
487 	}
488 
489 	f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
490 	if (f == NULL)
491 		goto errout;
492 	memset(f, 0, sizeof(*f));
493 
494 	err = route4_set_parms(tp, base, f, handle, head, tb,
495 		tca[TCA_RATE-1], 1);
496 	if (err < 0)
497 		goto errout;
498 
499 reinsert:
500 	h = from_hash(f->handle >> 16);
501 	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
502 		if (f->handle < f1->handle)
503 			break;
504 
505 	f->next = f1;
506 	tcf_tree_lock(tp);
507 	*fp = f;
508 
509 	if (old_handle && f->handle != old_handle) {
510 		th = to_hash(old_handle);
511 		h = from_hash(old_handle >> 16);
512 		if ((b = head->table[th]) != NULL) {
513 			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
514 				if (*fp == f) {
515 					*fp = f->next;
516 					break;
517 				}
518 			}
519 		}
520 	}
521 	tcf_tree_unlock(tp);
522 
523 	route4_reset_fastmap(tp->q->dev, head, f->id);
524 	*arg = (unsigned long)f;
525 	return 0;
526 
527 errout:
528 	if (f)
529 		kfree(f);
530 	return err;
531 }
532 
533 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
534 {
535 	struct route4_head *head = tp->root;
536 	unsigned h, h1;
537 
538 	if (head == NULL)
539 		arg->stop = 1;
540 
541 	if (arg->stop)
542 		return;
543 
544 	for (h = 0; h <= 256; h++) {
545 		struct route4_bucket *b = head->table[h];
546 
547 		if (b) {
548 			for (h1 = 0; h1 <= 32; h1++) {
549 				struct route4_filter *f;
550 
551 				for (f = b->ht[h1]; f; f = f->next) {
552 					if (arg->count < arg->skip) {
553 						arg->count++;
554 						continue;
555 					}
556 					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
557 						arg->stop = 1;
558 						return;
559 					}
560 					arg->count++;
561 				}
562 			}
563 		}
564 	}
565 }
566 
567 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
568 		       struct sk_buff *skb, struct tcmsg *t)
569 {
570 	struct route4_filter *f = (struct route4_filter*)fh;
571 	unsigned char	 *b = skb->tail;
572 	struct rtattr *rta;
573 	u32 id;
574 
575 	if (f == NULL)
576 		return skb->len;
577 
578 	t->tcm_handle = f->handle;
579 
580 	rta = (struct rtattr*)b;
581 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
582 
583 	if (!(f->handle&0x8000)) {
584 		id = f->id&0xFF;
585 		RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
586 	}
587 	if (f->handle&0x80000000) {
588 		if ((f->handle>>16) != 0xFFFF)
589 			RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
590 	} else {
591 		id = f->id>>16;
592 		RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
593 	}
594 	if (f->res.classid)
595 		RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
596 
597 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
598 		goto rtattr_failure;
599 
600 	rta->rta_len = skb->tail - b;
601 
602 	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
603 		goto rtattr_failure;
604 
605 	return skb->len;
606 
607 rtattr_failure:
608 	skb_trim(skb, b - skb->data);
609 	return -1;
610 }
611 
612 static struct tcf_proto_ops cls_route4_ops = {
613 	.next		=	NULL,
614 	.kind		=	"route",
615 	.classify	=	route4_classify,
616 	.init		=	route4_init,
617 	.destroy	=	route4_destroy,
618 	.get		=	route4_get,
619 	.put		=	route4_put,
620 	.change		=	route4_change,
621 	.delete		=	route4_delete,
622 	.walk		=	route4_walk,
623 	.dump		=	route4_dump,
624 	.owner		=	THIS_MODULE,
625 };
626 
627 static int __init init_route4(void)
628 {
629 	return register_tcf_proto_ops(&cls_route4_ops);
630 }
631 
632 static void __exit exit_route4(void)
633 {
634 	unregister_tcf_proto_ops(&cls_route4_ops);
635 }
636 
637 module_init(init_route4)
638 module_exit(exit_route4)
639 MODULE_LICENSE("GPL");
640