xref: /openbmc/linux/net/sched/cls_route.c (revision 64c70b1c)
1 /*
2  * net/sched/cls_route.c	ROUTE4 classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
22 #include <linux/in.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/if_ether.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/notifier.h>
30 #include <net/ip.h>
31 #include <net/netlink.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
34 #include <net/sock.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
37 
38 /*
39    1. For now we assume that route tags < 256.
40       It allows to use direct table lookups, instead of hash tables.
41    2. For now we assume that "from TAG" and "fromdev DEV" statements
42       are mutually  exclusive.
43    3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
44  */
45 
46 struct route4_fastmap
47 {
48 	struct route4_filter	*filter;
49 	u32			id;
50 	int			iif;
51 };
52 
53 struct route4_head
54 {
55 	struct route4_fastmap	fastmap[16];
56 	struct route4_bucket	*table[256+1];
57 };
58 
59 struct route4_bucket
60 {
61 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 	struct route4_filter	*ht[16+16+1];
63 };
64 
65 struct route4_filter
66 {
67 	struct route4_filter	*next;
68 	u32			id;
69 	int			iif;
70 
71 	struct tcf_result	res;
72 	struct tcf_exts		exts;
73 	u32			handle;
74 	struct route4_bucket	*bkt;
75 };
76 
77 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
78 
79 static struct tcf_ext_map route_ext_map = {
80 	.police = TCA_ROUTE4_POLICE,
81 	.action = TCA_ROUTE4_ACT
82 };
83 
84 static __inline__ int route4_fastmap_hash(u32 id, int iif)
85 {
86 	return id&0xF;
87 }
88 
89 static inline
90 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
91 {
92 	qdisc_lock_tree(dev);
93 	memset(head->fastmap, 0, sizeof(head->fastmap));
94 	qdisc_unlock_tree(dev);
95 }
96 
97 static inline void
98 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
99 		   struct route4_filter *f)
100 {
101 	int h = route4_fastmap_hash(id, iif);
102 	head->fastmap[h].id = id;
103 	head->fastmap[h].iif = iif;
104 	head->fastmap[h].filter = f;
105 }
106 
107 static __inline__ int route4_hash_to(u32 id)
108 {
109 	return id&0xFF;
110 }
111 
112 static __inline__ int route4_hash_from(u32 id)
113 {
114 	return (id>>16)&0xF;
115 }
116 
117 static __inline__ int route4_hash_iif(int iif)
118 {
119 	return 16 + ((iif>>16)&0xF);
120 }
121 
122 static __inline__ int route4_hash_wild(void)
123 {
124 	return 32;
125 }
126 
127 #define ROUTE4_APPLY_RESULT()					\
128 {								\
129 	*res = f->res;						\
130 	if (tcf_exts_is_available(&f->exts)) {			\
131 		int r = tcf_exts_exec(skb, &f->exts, res);	\
132 		if (r < 0) {					\
133 			dont_cache = 1;				\
134 			continue;				\
135 		}						\
136 		return r;					\
137 	} else if (!dont_cache)					\
138 		route4_set_fastmap(head, id, iif, f);		\
139 	return 0;						\
140 }
141 
142 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 			   struct tcf_result *res)
144 {
145 	struct route4_head *head = (struct route4_head*)tp->root;
146 	struct dst_entry *dst;
147 	struct route4_bucket *b;
148 	struct route4_filter *f;
149 	u32 id, h;
150 	int iif, dont_cache = 0;
151 
152 	if ((dst = skb->dst) == NULL)
153 		goto failure;
154 
155 	id = dst->tclassid;
156 	if (head == NULL)
157 		goto old_method;
158 
159 	iif = ((struct rtable*)dst)->fl.iif;
160 
161 	h = route4_fastmap_hash(id, iif);
162 	if (id == head->fastmap[h].id &&
163 	    iif == head->fastmap[h].iif &&
164 	    (f = head->fastmap[h].filter) != NULL) {
165 		if (f == ROUTE4_FAILURE)
166 			goto failure;
167 
168 		*res = f->res;
169 		return 0;
170 	}
171 
172 	h = route4_hash_to(id);
173 
174 restart:
175 	if ((b = head->table[h]) != NULL) {
176 		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
177 			if (f->id == id)
178 				ROUTE4_APPLY_RESULT();
179 
180 		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
181 			if (f->iif == iif)
182 				ROUTE4_APPLY_RESULT();
183 
184 		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
185 			ROUTE4_APPLY_RESULT();
186 
187 	}
188 	if (h < 256) {
189 		h = 256;
190 		id &= ~0xFFFF;
191 		goto restart;
192 	}
193 
194 	if (!dont_cache)
195 		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
196 failure:
197 	return -1;
198 
199 old_method:
200 	if (id && (TC_H_MAJ(id) == 0 ||
201 		   !(TC_H_MAJ(id^tp->q->handle)))) {
202 		res->classid = id;
203 		res->class = 0;
204 		return 0;
205 	}
206 	return -1;
207 }
208 
209 static inline u32 to_hash(u32 id)
210 {
211 	u32 h = id&0xFF;
212 	if (id&0x8000)
213 		h += 256;
214 	return h;
215 }
216 
217 static inline u32 from_hash(u32 id)
218 {
219 	id &= 0xFFFF;
220 	if (id == 0xFFFF)
221 		return 32;
222 	if (!(id & 0x8000)) {
223 		if (id > 255)
224 			return 256;
225 		return id&0xF;
226 	}
227 	return 16 + (id&0xF);
228 }
229 
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231 {
232 	struct route4_head *head = (struct route4_head*)tp->root;
233 	struct route4_bucket *b;
234 	struct route4_filter *f;
235 	unsigned h1, h2;
236 
237 	if (!head)
238 		return 0;
239 
240 	h1 = to_hash(handle);
241 	if (h1 > 256)
242 		return 0;
243 
244 	h2 = from_hash(handle>>16);
245 	if (h2 > 32)
246 		return 0;
247 
248 	if ((b = head->table[h1]) != NULL) {
249 		for (f = b->ht[h2]; f; f = f->next)
250 			if (f->handle == handle)
251 				return (unsigned long)f;
252 	}
253 	return 0;
254 }
255 
256 static void route4_put(struct tcf_proto *tp, unsigned long f)
257 {
258 }
259 
260 static int route4_init(struct tcf_proto *tp)
261 {
262 	return 0;
263 }
264 
265 static inline void
266 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
267 {
268 	tcf_unbind_filter(tp, &f->res);
269 	tcf_exts_destroy(tp, &f->exts);
270 	kfree(f);
271 }
272 
273 static void route4_destroy(struct tcf_proto *tp)
274 {
275 	struct route4_head *head = xchg(&tp->root, NULL);
276 	int h1, h2;
277 
278 	if (head == NULL)
279 		return;
280 
281 	for (h1=0; h1<=256; h1++) {
282 		struct route4_bucket *b;
283 
284 		if ((b = head->table[h1]) != NULL) {
285 			for (h2=0; h2<=32; h2++) {
286 				struct route4_filter *f;
287 
288 				while ((f = b->ht[h2]) != NULL) {
289 					b->ht[h2] = f->next;
290 					route4_delete_filter(tp, f);
291 				}
292 			}
293 			kfree(b);
294 		}
295 	}
296 	kfree(head);
297 }
298 
299 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
300 {
301 	struct route4_head *head = (struct route4_head*)tp->root;
302 	struct route4_filter **fp, *f = (struct route4_filter*)arg;
303 	unsigned h = 0;
304 	struct route4_bucket *b;
305 	int i;
306 
307 	if (!head || !f)
308 		return -EINVAL;
309 
310 	h = f->handle;
311 	b = f->bkt;
312 
313 	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
314 		if (*fp == f) {
315 			tcf_tree_lock(tp);
316 			*fp = f->next;
317 			tcf_tree_unlock(tp);
318 
319 			route4_reset_fastmap(tp->q->dev, head, f->id);
320 			route4_delete_filter(tp, f);
321 
322 			/* Strip tree */
323 
324 			for (i=0; i<=32; i++)
325 				if (b->ht[i])
326 					return 0;
327 
328 			/* OK, session has no flows */
329 			tcf_tree_lock(tp);
330 			head->table[to_hash(h)] = NULL;
331 			tcf_tree_unlock(tp);
332 
333 			kfree(b);
334 			return 0;
335 		}
336 	}
337 	return 0;
338 }
339 
340 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
341 	struct route4_filter *f, u32 handle, struct route4_head *head,
342 	struct rtattr **tb, struct rtattr *est, int new)
343 {
344 	int err;
345 	u32 id = 0, to = 0, nhandle = 0x8000;
346 	struct route4_filter *fp;
347 	unsigned int h1;
348 	struct route4_bucket *b;
349 	struct tcf_exts e;
350 
351 	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
352 	if (err < 0)
353 		return err;
354 
355 	err = -EINVAL;
356 	if (tb[TCA_ROUTE4_CLASSID-1])
357 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
358 			goto errout;
359 
360 	if (tb[TCA_ROUTE4_TO-1]) {
361 		if (new && handle & 0x8000)
362 			goto errout;
363 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
364 			goto errout;
365 		to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
366 		if (to > 0xFF)
367 			goto errout;
368 		nhandle = to;
369 	}
370 
371 	if (tb[TCA_ROUTE4_FROM-1]) {
372 		if (tb[TCA_ROUTE4_IIF-1])
373 			goto errout;
374 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
375 			goto errout;
376 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
377 		if (id > 0xFF)
378 			goto errout;
379 		nhandle |= id << 16;
380 	} else if (tb[TCA_ROUTE4_IIF-1]) {
381 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
382 			goto errout;
383 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
384 		if (id > 0x7FFF)
385 			goto errout;
386 		nhandle |= (id | 0x8000) << 16;
387 	} else
388 		nhandle |= 0xFFFF << 16;
389 
390 	if (handle && new) {
391 		nhandle |= handle & 0x7F00;
392 		if (nhandle != handle)
393 			goto errout;
394 	}
395 
396 	h1 = to_hash(nhandle);
397 	if ((b = head->table[h1]) == NULL) {
398 		err = -ENOBUFS;
399 		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 		if (b == NULL)
401 			goto errout;
402 
403 		tcf_tree_lock(tp);
404 		head->table[h1] = b;
405 		tcf_tree_unlock(tp);
406 	} else {
407 		unsigned int h2 = from_hash(nhandle >> 16);
408 		err = -EEXIST;
409 		for (fp = b->ht[h2]; fp; fp = fp->next)
410 			if (fp->handle == f->handle)
411 				goto errout;
412 	}
413 
414 	tcf_tree_lock(tp);
415 	if (tb[TCA_ROUTE4_TO-1])
416 		f->id = to;
417 
418 	if (tb[TCA_ROUTE4_FROM-1])
419 		f->id = to | id<<16;
420 	else if (tb[TCA_ROUTE4_IIF-1])
421 		f->iif = id;
422 
423 	f->handle = nhandle;
424 	f->bkt = b;
425 	tcf_tree_unlock(tp);
426 
427 	if (tb[TCA_ROUTE4_CLASSID-1]) {
428 		f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
429 		tcf_bind_filter(tp, &f->res, base);
430 	}
431 
432 	tcf_exts_change(tp, &f->exts, &e);
433 
434 	return 0;
435 errout:
436 	tcf_exts_destroy(tp, &e);
437 	return err;
438 }
439 
440 static int route4_change(struct tcf_proto *tp, unsigned long base,
441 		       u32 handle,
442 		       struct rtattr **tca,
443 		       unsigned long *arg)
444 {
445 	struct route4_head *head = tp->root;
446 	struct route4_filter *f, *f1, **fp;
447 	struct route4_bucket *b;
448 	struct rtattr *opt = tca[TCA_OPTIONS-1];
449 	struct rtattr *tb[TCA_ROUTE4_MAX];
450 	unsigned int h, th;
451 	u32 old_handle = 0;
452 	int err;
453 
454 	if (opt == NULL)
455 		return handle ? -EINVAL : 0;
456 
457 	if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
458 		return -EINVAL;
459 
460 	if ((f = (struct route4_filter*)*arg) != NULL) {
461 		if (f->handle != handle && handle)
462 			return -EINVAL;
463 
464 		if (f->bkt)
465 			old_handle = f->handle;
466 
467 		err = route4_set_parms(tp, base, f, handle, head, tb,
468 			tca[TCA_RATE-1], 0);
469 		if (err < 0)
470 			return err;
471 
472 		goto reinsert;
473 	}
474 
475 	err = -ENOBUFS;
476 	if (head == NULL) {
477 		head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
478 		if (head == NULL)
479 			goto errout;
480 
481 		tcf_tree_lock(tp);
482 		tp->root = head;
483 		tcf_tree_unlock(tp);
484 	}
485 
486 	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
487 	if (f == NULL)
488 		goto errout;
489 
490 	err = route4_set_parms(tp, base, f, handle, head, tb,
491 		tca[TCA_RATE-1], 1);
492 	if (err < 0)
493 		goto errout;
494 
495 reinsert:
496 	h = from_hash(f->handle >> 16);
497 	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
498 		if (f->handle < f1->handle)
499 			break;
500 
501 	f->next = f1;
502 	tcf_tree_lock(tp);
503 	*fp = f;
504 
505 	if (old_handle && f->handle != old_handle) {
506 		th = to_hash(old_handle);
507 		h = from_hash(old_handle >> 16);
508 		if ((b = head->table[th]) != NULL) {
509 			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
510 				if (*fp == f) {
511 					*fp = f->next;
512 					break;
513 				}
514 			}
515 		}
516 	}
517 	tcf_tree_unlock(tp);
518 
519 	route4_reset_fastmap(tp->q->dev, head, f->id);
520 	*arg = (unsigned long)f;
521 	return 0;
522 
523 errout:
524 	kfree(f);
525 	return err;
526 }
527 
528 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
529 {
530 	struct route4_head *head = tp->root;
531 	unsigned h, h1;
532 
533 	if (head == NULL)
534 		arg->stop = 1;
535 
536 	if (arg->stop)
537 		return;
538 
539 	for (h = 0; h <= 256; h++) {
540 		struct route4_bucket *b = head->table[h];
541 
542 		if (b) {
543 			for (h1 = 0; h1 <= 32; h1++) {
544 				struct route4_filter *f;
545 
546 				for (f = b->ht[h1]; f; f = f->next) {
547 					if (arg->count < arg->skip) {
548 						arg->count++;
549 						continue;
550 					}
551 					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
552 						arg->stop = 1;
553 						return;
554 					}
555 					arg->count++;
556 				}
557 			}
558 		}
559 	}
560 }
561 
562 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
563 		       struct sk_buff *skb, struct tcmsg *t)
564 {
565 	struct route4_filter *f = (struct route4_filter*)fh;
566 	unsigned char *b = skb_tail_pointer(skb);
567 	struct rtattr *rta;
568 	u32 id;
569 
570 	if (f == NULL)
571 		return skb->len;
572 
573 	t->tcm_handle = f->handle;
574 
575 	rta = (struct rtattr*)b;
576 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
577 
578 	if (!(f->handle&0x8000)) {
579 		id = f->id&0xFF;
580 		RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
581 	}
582 	if (f->handle&0x80000000) {
583 		if ((f->handle>>16) != 0xFFFF)
584 			RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
585 	} else {
586 		id = f->id>>16;
587 		RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
588 	}
589 	if (f->res.classid)
590 		RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
591 
592 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
593 		goto rtattr_failure;
594 
595 	rta->rta_len = skb_tail_pointer(skb) - b;
596 
597 	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
598 		goto rtattr_failure;
599 
600 	return skb->len;
601 
602 rtattr_failure:
603 	nlmsg_trim(skb, b);
604 	return -1;
605 }
606 
607 static struct tcf_proto_ops cls_route4_ops = {
608 	.next		=	NULL,
609 	.kind		=	"route",
610 	.classify	=	route4_classify,
611 	.init		=	route4_init,
612 	.destroy	=	route4_destroy,
613 	.get		=	route4_get,
614 	.put		=	route4_put,
615 	.change		=	route4_change,
616 	.delete		=	route4_delete,
617 	.walk		=	route4_walk,
618 	.dump		=	route4_dump,
619 	.owner		=	THIS_MODULE,
620 };
621 
622 static int __init init_route4(void)
623 {
624 	return register_tcf_proto_ops(&cls_route4_ops);
625 }
626 
627 static void __exit exit_route4(void)
628 {
629 	unregister_tcf_proto_ops(&cls_route4_ops);
630 }
631 
632 module_init(init_route4)
633 module_exit(exit_route4)
634 MODULE_LICENSE("GPL");
635