xref: /openbmc/linux/net/sched/cls_u32.c (revision c21b37f6)
1 /*
2  * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	The filters are packed to hash tables of key nodes
12  *	with a set of 32bit key/mask pairs at every node.
13  *	Nodes reference next level hash tables etc.
14  *
15  *	This scheme is the best universal classifier I managed to
16  *	invent; it is not super-fast, but it is not slow (provided you
17  *	program it correctly), and general enough.  And its relative
18  *	speed grows as the number of rules becomes larger.
19  *
20  *	It seems that it represents the best middle point between
21  *	speed and manageability both by human and by machine.
22  *
23  *	It is especially useful for link sharing combined with QoS;
24  *	pure RSVP doesn't need such a general approach and can use
25  *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *	eventually when the meta match extension is made available
29  *
30  *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/errno.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/skbuff.h>
40 #include <net/netlink.h>
41 #include <net/act_api.h>
42 #include <net/pkt_cls.h>
43 
44 struct tc_u_knode
45 {
46 	struct tc_u_knode	*next;
47 	u32			handle;
48 	struct tc_u_hnode	*ht_up;
49 	struct tcf_exts		exts;
50 #ifdef CONFIG_NET_CLS_IND
51 	char                     indev[IFNAMSIZ];
52 #endif
53 	u8			fshift;
54 	struct tcf_result	res;
55 	struct tc_u_hnode	*ht_down;
56 #ifdef CONFIG_CLS_U32_PERF
57 	struct tc_u32_pcnt	*pf;
58 #endif
59 #ifdef CONFIG_CLS_U32_MARK
60 	struct tc_u32_mark	mark;
61 #endif
62 	struct tc_u32_sel	sel;
63 };
64 
65 struct tc_u_hnode
66 {
67 	struct tc_u_hnode	*next;
68 	u32			handle;
69 	u32			prio;
70 	struct tc_u_common	*tp_c;
71 	int			refcnt;
72 	unsigned		divisor;
73 	struct tc_u_knode	*ht[1];
74 };
75 
76 struct tc_u_common
77 {
78 	struct tc_u_common	*next;
79 	struct tc_u_hnode	*hlist;
80 	struct Qdisc		*q;
81 	int			refcnt;
82 	u32			hgenerator;
83 };
84 
85 static struct tcf_ext_map u32_ext_map = {
86 	.action = TCA_U32_ACT,
87 	.police = TCA_U32_POLICE
88 };
89 
90 static struct tc_u_common *u32_list;
91 
92 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
93 {
94 	unsigned h = (key & sel->hmask)>>fshift;
95 
96 	return h;
97 }
98 
99 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
100 {
101 	struct {
102 		struct tc_u_knode *knode;
103 		u8		  *ptr;
104 	} stack[TC_U32_MAXDEPTH];
105 
106 	struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
107 	u8 *ptr = skb_network_header(skb);
108 	struct tc_u_knode *n;
109 	int sdepth = 0;
110 	int off2 = 0;
111 	int sel = 0;
112 #ifdef CONFIG_CLS_U32_PERF
113 	int j;
114 #endif
115 	int i, r;
116 
117 next_ht:
118 	n = ht->ht[sel];
119 
120 next_knode:
121 	if (n) {
122 		struct tc_u32_key *key = n->sel.keys;
123 
124 #ifdef CONFIG_CLS_U32_PERF
125 		n->pf->rcnt +=1;
126 		j = 0;
127 #endif
128 
129 #ifdef CONFIG_CLS_U32_MARK
130 		if ((skb->mark & n->mark.mask) != n->mark.val) {
131 			n = n->next;
132 			goto next_knode;
133 		} else {
134 			n->mark.success++;
135 		}
136 #endif
137 
138 		for (i = n->sel.nkeys; i>0; i--, key++) {
139 
140 			if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
141 				n = n->next;
142 				goto next_knode;
143 			}
144 #ifdef CONFIG_CLS_U32_PERF
145 			n->pf->kcnts[j] +=1;
146 			j++;
147 #endif
148 		}
149 		if (n->ht_down == NULL) {
150 check_terminal:
151 			if (n->sel.flags&TC_U32_TERMINAL) {
152 
153 				*res = n->res;
154 #ifdef CONFIG_NET_CLS_IND
155 				if (!tcf_match_indev(skb, n->indev)) {
156 					n = n->next;
157 					goto next_knode;
158 				}
159 #endif
160 #ifdef CONFIG_CLS_U32_PERF
161 				n->pf->rhit +=1;
162 #endif
163 				r = tcf_exts_exec(skb, &n->exts, res);
164 				if (r < 0) {
165 					n = n->next;
166 					goto next_knode;
167 				}
168 
169 				return r;
170 			}
171 			n = n->next;
172 			goto next_knode;
173 		}
174 
175 		/* PUSH */
176 		if (sdepth >= TC_U32_MAXDEPTH)
177 			goto deadloop;
178 		stack[sdepth].knode = n;
179 		stack[sdepth].ptr = ptr;
180 		sdepth++;
181 
182 		ht = n->ht_down;
183 		sel = 0;
184 		if (ht->divisor)
185 			sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
186 
187 		if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
188 			goto next_ht;
189 
190 		if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
191 			off2 = n->sel.off + 3;
192 			if (n->sel.flags&TC_U32_VAROFFSET)
193 				off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
194 			off2 &= ~3;
195 		}
196 		if (n->sel.flags&TC_U32_EAT) {
197 			ptr += off2;
198 			off2 = 0;
199 		}
200 
201 		if (ptr < skb_tail_pointer(skb))
202 			goto next_ht;
203 	}
204 
205 	/* POP */
206 	if (sdepth--) {
207 		n = stack[sdepth].knode;
208 		ht = n->ht_up;
209 		ptr = stack[sdepth].ptr;
210 		goto check_terminal;
211 	}
212 	return -1;
213 
214 deadloop:
215 	if (net_ratelimit())
216 		printk("cls_u32: dead loop\n");
217 	return -1;
218 }
219 
220 static __inline__ struct tc_u_hnode *
221 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
222 {
223 	struct tc_u_hnode *ht;
224 
225 	for (ht = tp_c->hlist; ht; ht = ht->next)
226 		if (ht->handle == handle)
227 			break;
228 
229 	return ht;
230 }
231 
232 static __inline__ struct tc_u_knode *
233 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
234 {
235 	unsigned sel;
236 	struct tc_u_knode *n = NULL;
237 
238 	sel = TC_U32_HASH(handle);
239 	if (sel > ht->divisor)
240 		goto out;
241 
242 	for (n = ht->ht[sel]; n; n = n->next)
243 		if (n->handle == handle)
244 			break;
245 out:
246 	return n;
247 }
248 
249 
250 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
251 {
252 	struct tc_u_hnode *ht;
253 	struct tc_u_common *tp_c = tp->data;
254 
255 	if (TC_U32_HTID(handle) == TC_U32_ROOT)
256 		ht = tp->root;
257 	else
258 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
259 
260 	if (!ht)
261 		return 0;
262 
263 	if (TC_U32_KEY(handle) == 0)
264 		return (unsigned long)ht;
265 
266 	return (unsigned long)u32_lookup_key(ht, handle);
267 }
268 
269 static void u32_put(struct tcf_proto *tp, unsigned long f)
270 {
271 }
272 
273 static u32 gen_new_htid(struct tc_u_common *tp_c)
274 {
275 	int i = 0x800;
276 
277 	do {
278 		if (++tp_c->hgenerator == 0x7FF)
279 			tp_c->hgenerator = 1;
280 	} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
281 
282 	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
283 }
284 
285 static int u32_init(struct tcf_proto *tp)
286 {
287 	struct tc_u_hnode *root_ht;
288 	struct tc_u_common *tp_c;
289 
290 	for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
291 		if (tp_c->q == tp->q)
292 			break;
293 
294 	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
295 	if (root_ht == NULL)
296 		return -ENOBUFS;
297 
298 	root_ht->divisor = 0;
299 	root_ht->refcnt++;
300 	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
301 	root_ht->prio = tp->prio;
302 
303 	if (tp_c == NULL) {
304 		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
305 		if (tp_c == NULL) {
306 			kfree(root_ht);
307 			return -ENOBUFS;
308 		}
309 		tp_c->q = tp->q;
310 		tp_c->next = u32_list;
311 		u32_list = tp_c;
312 	}
313 
314 	tp_c->refcnt++;
315 	root_ht->next = tp_c->hlist;
316 	tp_c->hlist = root_ht;
317 	root_ht->tp_c = tp_c;
318 
319 	tp->root = root_ht;
320 	tp->data = tp_c;
321 	return 0;
322 }
323 
324 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
325 {
326 	tcf_unbind_filter(tp, &n->res);
327 	tcf_exts_destroy(tp, &n->exts);
328 	if (n->ht_down)
329 		n->ht_down->refcnt--;
330 #ifdef CONFIG_CLS_U32_PERF
331 	kfree(n->pf);
332 #endif
333 	kfree(n);
334 	return 0;
335 }
336 
337 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
338 {
339 	struct tc_u_knode **kp;
340 	struct tc_u_hnode *ht = key->ht_up;
341 
342 	if (ht) {
343 		for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
344 			if (*kp == key) {
345 				tcf_tree_lock(tp);
346 				*kp = key->next;
347 				tcf_tree_unlock(tp);
348 
349 				u32_destroy_key(tp, key);
350 				return 0;
351 			}
352 		}
353 	}
354 	BUG_TRAP(0);
355 	return 0;
356 }
357 
358 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
359 {
360 	struct tc_u_knode *n;
361 	unsigned h;
362 
363 	for (h=0; h<=ht->divisor; h++) {
364 		while ((n = ht->ht[h]) != NULL) {
365 			ht->ht[h] = n->next;
366 
367 			u32_destroy_key(tp, n);
368 		}
369 	}
370 }
371 
372 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
373 {
374 	struct tc_u_common *tp_c = tp->data;
375 	struct tc_u_hnode **hn;
376 
377 	BUG_TRAP(!ht->refcnt);
378 
379 	u32_clear_hnode(tp, ht);
380 
381 	for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
382 		if (*hn == ht) {
383 			*hn = ht->next;
384 			kfree(ht);
385 			return 0;
386 		}
387 	}
388 
389 	BUG_TRAP(0);
390 	return -ENOENT;
391 }
392 
393 static void u32_destroy(struct tcf_proto *tp)
394 {
395 	struct tc_u_common *tp_c = tp->data;
396 	struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
397 
398 	BUG_TRAP(root_ht != NULL);
399 
400 	if (root_ht && --root_ht->refcnt == 0)
401 		u32_destroy_hnode(tp, root_ht);
402 
403 	if (--tp_c->refcnt == 0) {
404 		struct tc_u_hnode *ht;
405 		struct tc_u_common **tp_cp;
406 
407 		for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
408 			if (*tp_cp == tp_c) {
409 				*tp_cp = tp_c->next;
410 				break;
411 			}
412 		}
413 
414 		for (ht=tp_c->hlist; ht; ht = ht->next)
415 			u32_clear_hnode(tp, ht);
416 
417 		while ((ht = tp_c->hlist) != NULL) {
418 			tp_c->hlist = ht->next;
419 
420 			BUG_TRAP(ht->refcnt == 0);
421 
422 			kfree(ht);
423 		}
424 
425 		kfree(tp_c);
426 	}
427 
428 	tp->data = NULL;
429 }
430 
431 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
432 {
433 	struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
434 
435 	if (ht == NULL)
436 		return 0;
437 
438 	if (TC_U32_KEY(ht->handle))
439 		return u32_delete_key(tp, (struct tc_u_knode*)ht);
440 
441 	if (tp->root == ht)
442 		return -EINVAL;
443 
444 	if (--ht->refcnt == 0)
445 		u32_destroy_hnode(tp, ht);
446 
447 	return 0;
448 }
449 
450 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
451 {
452 	struct tc_u_knode *n;
453 	unsigned i = 0x7FF;
454 
455 	for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
456 		if (i < TC_U32_NODE(n->handle))
457 			i = TC_U32_NODE(n->handle);
458 	i++;
459 
460 	return handle|(i>0xFFF ? 0xFFF : i);
461 }
462 
463 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
464 			 struct tc_u_hnode *ht,
465 			 struct tc_u_knode *n, struct rtattr **tb,
466 			 struct rtattr *est)
467 {
468 	int err;
469 	struct tcf_exts e;
470 
471 	err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
472 	if (err < 0)
473 		return err;
474 
475 	err = -EINVAL;
476 	if (tb[TCA_U32_LINK-1]) {
477 		u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
478 		struct tc_u_hnode *ht_down = NULL;
479 
480 		if (TC_U32_KEY(handle))
481 			goto errout;
482 
483 		if (handle) {
484 			ht_down = u32_lookup_ht(ht->tp_c, handle);
485 
486 			if (ht_down == NULL)
487 				goto errout;
488 			ht_down->refcnt++;
489 		}
490 
491 		tcf_tree_lock(tp);
492 		ht_down = xchg(&n->ht_down, ht_down);
493 		tcf_tree_unlock(tp);
494 
495 		if (ht_down)
496 			ht_down->refcnt--;
497 	}
498 	if (tb[TCA_U32_CLASSID-1]) {
499 		n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
500 		tcf_bind_filter(tp, &n->res, base);
501 	}
502 
503 #ifdef CONFIG_NET_CLS_IND
504 	if (tb[TCA_U32_INDEV-1]) {
505 		int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
506 		if (err < 0)
507 			goto errout;
508 	}
509 #endif
510 	tcf_exts_change(tp, &n->exts, &e);
511 
512 	return 0;
513 errout:
514 	tcf_exts_destroy(tp, &e);
515 	return err;
516 }
517 
518 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
519 		      struct rtattr **tca,
520 		      unsigned long *arg)
521 {
522 	struct tc_u_common *tp_c = tp->data;
523 	struct tc_u_hnode *ht;
524 	struct tc_u_knode *n;
525 	struct tc_u32_sel *s;
526 	struct rtattr *opt = tca[TCA_OPTIONS-1];
527 	struct rtattr *tb[TCA_U32_MAX];
528 	u32 htid;
529 	int err;
530 
531 	if (opt == NULL)
532 		return handle ? -EINVAL : 0;
533 
534 	if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0)
535 		return -EINVAL;
536 
537 	if ((n = (struct tc_u_knode*)*arg) != NULL) {
538 		if (TC_U32_KEY(n->handle) == 0)
539 			return -EINVAL;
540 
541 		return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
542 	}
543 
544 	if (tb[TCA_U32_DIVISOR-1]) {
545 		unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
546 
547 		if (--divisor > 0x100)
548 			return -EINVAL;
549 		if (TC_U32_KEY(handle))
550 			return -EINVAL;
551 		if (handle == 0) {
552 			handle = gen_new_htid(tp->data);
553 			if (handle == 0)
554 				return -ENOMEM;
555 		}
556 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
557 		if (ht == NULL)
558 			return -ENOBUFS;
559 		ht->tp_c = tp_c;
560 		ht->refcnt = 0;
561 		ht->divisor = divisor;
562 		ht->handle = handle;
563 		ht->prio = tp->prio;
564 		ht->next = tp_c->hlist;
565 		tp_c->hlist = ht;
566 		*arg = (unsigned long)ht;
567 		return 0;
568 	}
569 
570 	if (tb[TCA_U32_HASH-1]) {
571 		htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
572 		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
573 			ht = tp->root;
574 			htid = ht->handle;
575 		} else {
576 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
577 			if (ht == NULL)
578 				return -EINVAL;
579 		}
580 	} else {
581 		ht = tp->root;
582 		htid = ht->handle;
583 	}
584 
585 	if (ht->divisor < TC_U32_HASH(htid))
586 		return -EINVAL;
587 
588 	if (handle) {
589 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
590 			return -EINVAL;
591 		handle = htid | TC_U32_NODE(handle);
592 	} else
593 		handle = gen_new_kid(ht, htid);
594 
595 	if (tb[TCA_U32_SEL-1] == 0 ||
596 	    RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
597 		return -EINVAL;
598 
599 	s = RTA_DATA(tb[TCA_U32_SEL-1]);
600 
601 	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
602 	if (n == NULL)
603 		return -ENOBUFS;
604 
605 #ifdef CONFIG_CLS_U32_PERF
606 	n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
607 	if (n->pf == NULL) {
608 		kfree(n);
609 		return -ENOBUFS;
610 	}
611 #endif
612 
613 	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
614 	n->ht_up = ht;
615 	n->handle = handle;
616 {
617 	u8 i = 0;
618 	u32 mask = s->hmask;
619 	if (mask) {
620 		while (!(mask & 1)) {
621 			i++;
622 			mask>>=1;
623 		}
624 	}
625 	n->fshift = i;
626 }
627 
628 #ifdef CONFIG_CLS_U32_MARK
629 	if (tb[TCA_U32_MARK-1]) {
630 		struct tc_u32_mark *mark;
631 
632 		if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) {
633 #ifdef CONFIG_CLS_U32_PERF
634 			kfree(n->pf);
635 #endif
636 			kfree(n);
637 			return -EINVAL;
638 		}
639 		mark = RTA_DATA(tb[TCA_U32_MARK-1]);
640 		memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
641 		n->mark.success = 0;
642 	}
643 #endif
644 
645 	err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]);
646 	if (err == 0) {
647 		struct tc_u_knode **ins;
648 		for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
649 			if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
650 				break;
651 
652 		n->next = *ins;
653 		wmb();
654 		*ins = n;
655 
656 		*arg = (unsigned long)n;
657 		return 0;
658 	}
659 #ifdef CONFIG_CLS_U32_PERF
660 	kfree(n->pf);
661 #endif
662 	kfree(n);
663 	return err;
664 }
665 
666 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
667 {
668 	struct tc_u_common *tp_c = tp->data;
669 	struct tc_u_hnode *ht;
670 	struct tc_u_knode *n;
671 	unsigned h;
672 
673 	if (arg->stop)
674 		return;
675 
676 	for (ht = tp_c->hlist; ht; ht = ht->next) {
677 		if (ht->prio != tp->prio)
678 			continue;
679 		if (arg->count >= arg->skip) {
680 			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
681 				arg->stop = 1;
682 				return;
683 			}
684 		}
685 		arg->count++;
686 		for (h = 0; h <= ht->divisor; h++) {
687 			for (n = ht->ht[h]; n; n = n->next) {
688 				if (arg->count < arg->skip) {
689 					arg->count++;
690 					continue;
691 				}
692 				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
693 					arg->stop = 1;
694 					return;
695 				}
696 				arg->count++;
697 			}
698 		}
699 	}
700 }
701 
702 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
703 		     struct sk_buff *skb, struct tcmsg *t)
704 {
705 	struct tc_u_knode *n = (struct tc_u_knode*)fh;
706 	unsigned char *b = skb_tail_pointer(skb);
707 	struct rtattr *rta;
708 
709 	if (n == NULL)
710 		return skb->len;
711 
712 	t->tcm_handle = n->handle;
713 
714 	rta = (struct rtattr*)b;
715 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
716 
717 	if (TC_U32_KEY(n->handle) == 0) {
718 		struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
719 		u32 divisor = ht->divisor+1;
720 		RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
721 	} else {
722 		RTA_PUT(skb, TCA_U32_SEL,
723 			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
724 			&n->sel);
725 		if (n->ht_up) {
726 			u32 htid = n->handle & 0xFFFFF000;
727 			RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
728 		}
729 		if (n->res.classid)
730 			RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
731 		if (n->ht_down)
732 			RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
733 
734 #ifdef CONFIG_CLS_U32_MARK
735 		if (n->mark.val || n->mark.mask)
736 			RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
737 #endif
738 
739 		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
740 			goto rtattr_failure;
741 
742 #ifdef CONFIG_NET_CLS_IND
743 		if(strlen(n->indev))
744 			RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
745 #endif
746 #ifdef CONFIG_CLS_U32_PERF
747 		RTA_PUT(skb, TCA_U32_PCNT,
748 		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
749 			n->pf);
750 #endif
751 	}
752 
753 	rta->rta_len = skb_tail_pointer(skb) - b;
754 	if (TC_U32_KEY(n->handle))
755 		if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
756 			goto rtattr_failure;
757 	return skb->len;
758 
759 rtattr_failure:
760 	nlmsg_trim(skb, b);
761 	return -1;
762 }
763 
764 static struct tcf_proto_ops cls_u32_ops = {
765 	.next		=	NULL,
766 	.kind		=	"u32",
767 	.classify	=	u32_classify,
768 	.init		=	u32_init,
769 	.destroy	=	u32_destroy,
770 	.get		=	u32_get,
771 	.put		=	u32_put,
772 	.change		=	u32_change,
773 	.delete		=	u32_delete,
774 	.walk		=	u32_walk,
775 	.dump		=	u32_dump,
776 	.owner		=	THIS_MODULE,
777 };
778 
779 static int __init init_u32(void)
780 {
781 	printk("u32 classifier\n");
782 #ifdef CONFIG_CLS_U32_PERF
783 	printk("    Performance counters on\n");
784 #endif
785 #ifdef CONFIG_NET_CLS_IND
786 	printk("    input device check on \n");
787 #endif
788 #ifdef CONFIG_NET_CLS_ACT
789 	printk("    Actions configured \n");
790 #endif
791 	return register_tcf_proto_ops(&cls_u32_ops);
792 }
793 
794 static void __exit exit_u32(void)
795 {
796 	unregister_tcf_proto_ops(&cls_u32_ops);
797 }
798 
799 module_init(init_u32)
800 module_exit(exit_u32)
801 MODULE_LICENSE("GPL");
802