xref: /openbmc/linux/net/sched/cls_u32.c (revision aac5987a)
1 /*
2  * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	The filters are packed to hash tables of key nodes
12  *	with a set of 32bit key/mask pairs at every node.
13  *	Nodes reference next level hash tables etc.
14  *
15  *	This scheme is the best universal classifier I managed to
16  *	invent; it is not super-fast, but it is not slow (provided you
17  *	program it correctly), and general enough.  And its relative
18  *	speed grows as the number of rules becomes larger.
19  *
20  *	It seems that it represents the best middle point between
21  *	speed and manageability both by human and by machine.
22  *
23  *	It is especially useful for link sharing combined with QoS;
24  *	pure RSVP doesn't need such a general approach and can use
25  *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *	eventually when the meta match extension is made available
29  *
30  *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <net/netlink.h>
44 #include <net/act_api.h>
45 #include <net/pkt_cls.h>
46 #include <linux/netdevice.h>
47 
48 struct tc_u_knode {
49 	struct tc_u_knode __rcu	*next;
50 	u32			handle;
51 	struct tc_u_hnode __rcu	*ht_up;
52 	struct tcf_exts		exts;
53 #ifdef CONFIG_NET_CLS_IND
54 	int			ifindex;
55 #endif
56 	u8			fshift;
57 	struct tcf_result	res;
58 	struct tc_u_hnode __rcu	*ht_down;
59 #ifdef CONFIG_CLS_U32_PERF
60 	struct tc_u32_pcnt __percpu *pf;
61 #endif
62 	u32			flags;
63 #ifdef CONFIG_CLS_U32_MARK
64 	u32			val;
65 	u32			mask;
66 	u32 __percpu		*pcpu_success;
67 #endif
68 	struct tcf_proto	*tp;
69 	struct rcu_head		rcu;
70 	/* The 'sel' field MUST be the last field in structure to allow for
71 	 * tc_u32_keys allocated at end of structure.
72 	 */
73 	struct tc_u32_sel	sel;
74 };
75 
76 struct tc_u_hnode {
77 	struct tc_u_hnode __rcu	*next;
78 	u32			handle;
79 	u32			prio;
80 	struct tc_u_common	*tp_c;
81 	int			refcnt;
82 	unsigned int		divisor;
83 	struct rcu_head		rcu;
84 	/* The 'ht' field MUST be the last field in structure to allow for
85 	 * more entries allocated at end of structure.
86 	 */
87 	struct tc_u_knode __rcu	*ht[1];
88 };
89 
90 struct tc_u_common {
91 	struct tc_u_hnode __rcu	*hlist;
92 	struct Qdisc		*q;
93 	int			refcnt;
94 	u32			hgenerator;
95 	struct rcu_head		rcu;
96 };
97 
98 static inline unsigned int u32_hash_fold(__be32 key,
99 					 const struct tc_u32_sel *sel,
100 					 u8 fshift)
101 {
102 	unsigned int h = ntohl(key & sel->hmask) >> fshift;
103 
104 	return h;
105 }
106 
107 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
108 			struct tcf_result *res)
109 {
110 	struct {
111 		struct tc_u_knode *knode;
112 		unsigned int	  off;
113 	} stack[TC_U32_MAXDEPTH];
114 
115 	struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
116 	unsigned int off = skb_network_offset(skb);
117 	struct tc_u_knode *n;
118 	int sdepth = 0;
119 	int off2 = 0;
120 	int sel = 0;
121 #ifdef CONFIG_CLS_U32_PERF
122 	int j;
123 #endif
124 	int i, r;
125 
126 next_ht:
127 	n = rcu_dereference_bh(ht->ht[sel]);
128 
129 next_knode:
130 	if (n) {
131 		struct tc_u32_key *key = n->sel.keys;
132 
133 #ifdef CONFIG_CLS_U32_PERF
134 		__this_cpu_inc(n->pf->rcnt);
135 		j = 0;
136 #endif
137 
138 		if (tc_skip_sw(n->flags)) {
139 			n = rcu_dereference_bh(n->next);
140 			goto next_knode;
141 		}
142 
143 #ifdef CONFIG_CLS_U32_MARK
144 		if ((skb->mark & n->mask) != n->val) {
145 			n = rcu_dereference_bh(n->next);
146 			goto next_knode;
147 		} else {
148 			__this_cpu_inc(*n->pcpu_success);
149 		}
150 #endif
151 
152 		for (i = n->sel.nkeys; i > 0; i--, key++) {
153 			int toff = off + key->off + (off2 & key->offmask);
154 			__be32 *data, hdata;
155 
156 			if (skb_headroom(skb) + toff > INT_MAX)
157 				goto out;
158 
159 			data = skb_header_pointer(skb, toff, 4, &hdata);
160 			if (!data)
161 				goto out;
162 			if ((*data ^ key->val) & key->mask) {
163 				n = rcu_dereference_bh(n->next);
164 				goto next_knode;
165 			}
166 #ifdef CONFIG_CLS_U32_PERF
167 			__this_cpu_inc(n->pf->kcnts[j]);
168 			j++;
169 #endif
170 		}
171 
172 		ht = rcu_dereference_bh(n->ht_down);
173 		if (!ht) {
174 check_terminal:
175 			if (n->sel.flags & TC_U32_TERMINAL) {
176 
177 				*res = n->res;
178 #ifdef CONFIG_NET_CLS_IND
179 				if (!tcf_match_indev(skb, n->ifindex)) {
180 					n = rcu_dereference_bh(n->next);
181 					goto next_knode;
182 				}
183 #endif
184 #ifdef CONFIG_CLS_U32_PERF
185 				__this_cpu_inc(n->pf->rhit);
186 #endif
187 				r = tcf_exts_exec(skb, &n->exts, res);
188 				if (r < 0) {
189 					n = rcu_dereference_bh(n->next);
190 					goto next_knode;
191 				}
192 
193 				return r;
194 			}
195 			n = rcu_dereference_bh(n->next);
196 			goto next_knode;
197 		}
198 
199 		/* PUSH */
200 		if (sdepth >= TC_U32_MAXDEPTH)
201 			goto deadloop;
202 		stack[sdepth].knode = n;
203 		stack[sdepth].off = off;
204 		sdepth++;
205 
206 		ht = rcu_dereference_bh(n->ht_down);
207 		sel = 0;
208 		if (ht->divisor) {
209 			__be32 *data, hdata;
210 
211 			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
212 						  &hdata);
213 			if (!data)
214 				goto out;
215 			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
216 							  n->fshift);
217 		}
218 		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
219 			goto next_ht;
220 
221 		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
222 			off2 = n->sel.off + 3;
223 			if (n->sel.flags & TC_U32_VAROFFSET) {
224 				__be16 *data, hdata;
225 
226 				data = skb_header_pointer(skb,
227 							  off + n->sel.offoff,
228 							  2, &hdata);
229 				if (!data)
230 					goto out;
231 				off2 += ntohs(n->sel.offmask & *data) >>
232 					n->sel.offshift;
233 			}
234 			off2 &= ~3;
235 		}
236 		if (n->sel.flags & TC_U32_EAT) {
237 			off += off2;
238 			off2 = 0;
239 		}
240 
241 		if (off < skb->len)
242 			goto next_ht;
243 	}
244 
245 	/* POP */
246 	if (sdepth--) {
247 		n = stack[sdepth].knode;
248 		ht = rcu_dereference_bh(n->ht_up);
249 		off = stack[sdepth].off;
250 		goto check_terminal;
251 	}
252 out:
253 	return -1;
254 
255 deadloop:
256 	net_warn_ratelimited("cls_u32: dead loop\n");
257 	return -1;
258 }
259 
260 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
261 {
262 	struct tc_u_hnode *ht;
263 
264 	for (ht = rtnl_dereference(tp_c->hlist);
265 	     ht;
266 	     ht = rtnl_dereference(ht->next))
267 		if (ht->handle == handle)
268 			break;
269 
270 	return ht;
271 }
272 
273 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
274 {
275 	unsigned int sel;
276 	struct tc_u_knode *n = NULL;
277 
278 	sel = TC_U32_HASH(handle);
279 	if (sel > ht->divisor)
280 		goto out;
281 
282 	for (n = rtnl_dereference(ht->ht[sel]);
283 	     n;
284 	     n = rtnl_dereference(n->next))
285 		if (n->handle == handle)
286 			break;
287 out:
288 	return n;
289 }
290 
291 
292 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
293 {
294 	struct tc_u_hnode *ht;
295 	struct tc_u_common *tp_c = tp->data;
296 
297 	if (TC_U32_HTID(handle) == TC_U32_ROOT)
298 		ht = rtnl_dereference(tp->root);
299 	else
300 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
301 
302 	if (!ht)
303 		return 0;
304 
305 	if (TC_U32_KEY(handle) == 0)
306 		return (unsigned long)ht;
307 
308 	return (unsigned long)u32_lookup_key(ht, handle);
309 }
310 
311 static u32 gen_new_htid(struct tc_u_common *tp_c)
312 {
313 	int i = 0x800;
314 
315 	/* hgenerator only used inside rtnl lock it is safe to increment
316 	 * without read _copy_ update semantics
317 	 */
318 	do {
319 		if (++tp_c->hgenerator == 0x7FF)
320 			tp_c->hgenerator = 1;
321 	} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
322 
323 	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
324 }
325 
326 static int u32_init(struct tcf_proto *tp)
327 {
328 	struct tc_u_hnode *root_ht;
329 	struct tc_u_common *tp_c;
330 
331 	tp_c = tp->q->u32_node;
332 
333 	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
334 	if (root_ht == NULL)
335 		return -ENOBUFS;
336 
337 	root_ht->refcnt++;
338 	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
339 	root_ht->prio = tp->prio;
340 
341 	if (tp_c == NULL) {
342 		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
343 		if (tp_c == NULL) {
344 			kfree(root_ht);
345 			return -ENOBUFS;
346 		}
347 		tp_c->q = tp->q;
348 		tp->q->u32_node = tp_c;
349 	}
350 
351 	tp_c->refcnt++;
352 	RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
353 	rcu_assign_pointer(tp_c->hlist, root_ht);
354 	root_ht->tp_c = tp_c;
355 
356 	rcu_assign_pointer(tp->root, root_ht);
357 	tp->data = tp_c;
358 	return 0;
359 }
360 
361 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
362 			   bool free_pf)
363 {
364 	tcf_exts_destroy(&n->exts);
365 	if (n->ht_down)
366 		n->ht_down->refcnt--;
367 #ifdef CONFIG_CLS_U32_PERF
368 	if (free_pf)
369 		free_percpu(n->pf);
370 #endif
371 #ifdef CONFIG_CLS_U32_MARK
372 	if (free_pf)
373 		free_percpu(n->pcpu_success);
374 #endif
375 	kfree(n);
376 	return 0;
377 }
378 
379 /* u32_delete_key_rcu should be called when free'ing a copied
380  * version of a tc_u_knode obtained from u32_init_knode(). When
381  * copies are obtained from u32_init_knode() the statistics are
382  * shared between the old and new copies to allow readers to
383  * continue to update the statistics during the copy. To support
384  * this the u32_delete_key_rcu variant does not free the percpu
385  * statistics.
386  */
387 static void u32_delete_key_rcu(struct rcu_head *rcu)
388 {
389 	struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
390 
391 	u32_destroy_key(key->tp, key, false);
392 }
393 
394 /* u32_delete_key_freepf_rcu is the rcu callback variant
395  * that free's the entire structure including the statistics
396  * percpu variables. Only use this if the key is not a copy
397  * returned by u32_init_knode(). See u32_delete_key_rcu()
398  * for the variant that should be used with keys return from
399  * u32_init_knode()
400  */
401 static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
402 {
403 	struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
404 
405 	u32_destroy_key(key->tp, key, true);
406 }
407 
408 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
409 {
410 	struct tc_u_knode __rcu **kp;
411 	struct tc_u_knode *pkp;
412 	struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
413 
414 	if (ht) {
415 		kp = &ht->ht[TC_U32_HASH(key->handle)];
416 		for (pkp = rtnl_dereference(*kp); pkp;
417 		     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
418 			if (pkp == key) {
419 				RCU_INIT_POINTER(*kp, key->next);
420 
421 				tcf_unbind_filter(tp, &key->res);
422 				call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
423 				return 0;
424 			}
425 		}
426 	}
427 	WARN_ON(1);
428 	return 0;
429 }
430 
431 static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
432 {
433 	struct net_device *dev = tp->q->dev_queue->dev;
434 	struct tc_cls_u32_offload u32_offload = {0};
435 	struct tc_to_netdev offload;
436 
437 	offload.type = TC_SETUP_CLSU32;
438 	offload.cls_u32 = &u32_offload;
439 
440 	if (tc_should_offload(dev, tp, 0)) {
441 		offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
442 		offload.cls_u32->knode.handle = handle;
443 		dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
444 					      tp->protocol, &offload);
445 	}
446 }
447 
448 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
449 				u32 flags)
450 {
451 	struct net_device *dev = tp->q->dev_queue->dev;
452 	struct tc_cls_u32_offload u32_offload = {0};
453 	struct tc_to_netdev offload;
454 	int err;
455 
456 	if (!tc_should_offload(dev, tp, flags))
457 		return tc_skip_sw(flags) ? -EINVAL : 0;
458 
459 	offload.type = TC_SETUP_CLSU32;
460 	offload.cls_u32 = &u32_offload;
461 
462 	offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
463 	offload.cls_u32->hnode.divisor = h->divisor;
464 	offload.cls_u32->hnode.handle = h->handle;
465 	offload.cls_u32->hnode.prio = h->prio;
466 
467 	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
468 					    tp->protocol, &offload);
469 	if (tc_skip_sw(flags))
470 		return err;
471 
472 	return 0;
473 }
474 
475 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
476 {
477 	struct net_device *dev = tp->q->dev_queue->dev;
478 	struct tc_cls_u32_offload u32_offload = {0};
479 	struct tc_to_netdev offload;
480 
481 	offload.type = TC_SETUP_CLSU32;
482 	offload.cls_u32 = &u32_offload;
483 
484 	if (tc_should_offload(dev, tp, 0)) {
485 		offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
486 		offload.cls_u32->hnode.divisor = h->divisor;
487 		offload.cls_u32->hnode.handle = h->handle;
488 		offload.cls_u32->hnode.prio = h->prio;
489 
490 		dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
491 					      tp->protocol, &offload);
492 	}
493 }
494 
495 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
496 				u32 flags)
497 {
498 	struct net_device *dev = tp->q->dev_queue->dev;
499 	struct tc_cls_u32_offload u32_offload = {0};
500 	struct tc_to_netdev offload;
501 	int err;
502 
503 	offload.type = TC_SETUP_CLSU32;
504 	offload.cls_u32 = &u32_offload;
505 
506 	if (!tc_should_offload(dev, tp, flags))
507 		return tc_skip_sw(flags) ? -EINVAL : 0;
508 
509 	offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
510 	offload.cls_u32->knode.handle = n->handle;
511 	offload.cls_u32->knode.fshift = n->fshift;
512 #ifdef CONFIG_CLS_U32_MARK
513 	offload.cls_u32->knode.val = n->val;
514 	offload.cls_u32->knode.mask = n->mask;
515 #else
516 	offload.cls_u32->knode.val = 0;
517 	offload.cls_u32->knode.mask = 0;
518 #endif
519 	offload.cls_u32->knode.sel = &n->sel;
520 	offload.cls_u32->knode.exts = &n->exts;
521 	if (n->ht_down)
522 		offload.cls_u32->knode.link_handle = n->ht_down->handle;
523 
524 	err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
525 					    tp->protocol, &offload);
526 
527 	if (!err)
528 		n->flags |= TCA_CLS_FLAGS_IN_HW;
529 
530 	if (tc_skip_sw(flags))
531 		return err;
532 
533 	return 0;
534 }
535 
536 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
537 {
538 	struct tc_u_knode *n;
539 	unsigned int h;
540 
541 	for (h = 0; h <= ht->divisor; h++) {
542 		while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
543 			RCU_INIT_POINTER(ht->ht[h],
544 					 rtnl_dereference(n->next));
545 			tcf_unbind_filter(tp, &n->res);
546 			u32_remove_hw_knode(tp, n->handle);
547 			call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
548 		}
549 	}
550 }
551 
552 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
553 {
554 	struct tc_u_common *tp_c = tp->data;
555 	struct tc_u_hnode __rcu **hn;
556 	struct tc_u_hnode *phn;
557 
558 	WARN_ON(ht->refcnt);
559 
560 	u32_clear_hnode(tp, ht);
561 
562 	hn = &tp_c->hlist;
563 	for (phn = rtnl_dereference(*hn);
564 	     phn;
565 	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
566 		if (phn == ht) {
567 			u32_clear_hw_hnode(tp, ht);
568 			RCU_INIT_POINTER(*hn, ht->next);
569 			kfree_rcu(ht, rcu);
570 			return 0;
571 		}
572 	}
573 
574 	return -ENOENT;
575 }
576 
577 static bool ht_empty(struct tc_u_hnode *ht)
578 {
579 	unsigned int h;
580 
581 	for (h = 0; h <= ht->divisor; h++)
582 		if (rcu_access_pointer(ht->ht[h]))
583 			return false;
584 
585 	return true;
586 }
587 
588 static bool u32_destroy(struct tcf_proto *tp, bool force)
589 {
590 	struct tc_u_common *tp_c = tp->data;
591 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
592 
593 	WARN_ON(root_ht == NULL);
594 
595 	if (!force) {
596 		if (root_ht) {
597 			if (root_ht->refcnt > 1)
598 				return false;
599 			if (root_ht->refcnt == 1) {
600 				if (!ht_empty(root_ht))
601 					return false;
602 			}
603 		}
604 
605 		if (tp_c->refcnt > 1)
606 			return false;
607 
608 		if (tp_c->refcnt == 1) {
609 			struct tc_u_hnode *ht;
610 
611 			for (ht = rtnl_dereference(tp_c->hlist);
612 			     ht;
613 			     ht = rtnl_dereference(ht->next))
614 				if (!ht_empty(ht))
615 					return false;
616 		}
617 	}
618 
619 	if (root_ht && --root_ht->refcnt == 0)
620 		u32_destroy_hnode(tp, root_ht);
621 
622 	if (--tp_c->refcnt == 0) {
623 		struct tc_u_hnode *ht;
624 
625 		tp->q->u32_node = NULL;
626 
627 		for (ht = rtnl_dereference(tp_c->hlist);
628 		     ht;
629 		     ht = rtnl_dereference(ht->next)) {
630 			ht->refcnt--;
631 			u32_clear_hnode(tp, ht);
632 		}
633 
634 		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
635 			RCU_INIT_POINTER(tp_c->hlist, ht->next);
636 			kfree_rcu(ht, rcu);
637 		}
638 
639 		kfree(tp_c);
640 	}
641 
642 	tp->data = NULL;
643 	return true;
644 }
645 
646 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
647 {
648 	struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
649 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
650 
651 	if (ht == NULL)
652 		return 0;
653 
654 	if (TC_U32_KEY(ht->handle)) {
655 		u32_remove_hw_knode(tp, ht->handle);
656 		return u32_delete_key(tp, (struct tc_u_knode *)ht);
657 	}
658 
659 	if (root_ht == ht)
660 		return -EINVAL;
661 
662 	if (ht->refcnt == 1) {
663 		ht->refcnt--;
664 		u32_destroy_hnode(tp, ht);
665 	} else {
666 		return -EBUSY;
667 	}
668 
669 	return 0;
670 }
671 
672 #define NR_U32_NODE (1<<12)
673 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
674 {
675 	struct tc_u_knode *n;
676 	unsigned long i;
677 	unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
678 					GFP_KERNEL);
679 	if (!bitmap)
680 		return handle | 0xFFF;
681 
682 	for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
683 	     n;
684 	     n = rtnl_dereference(n->next))
685 		set_bit(TC_U32_NODE(n->handle), bitmap);
686 
687 	i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
688 	if (i >= NR_U32_NODE)
689 		i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
690 
691 	kfree(bitmap);
692 	return handle | (i >= NR_U32_NODE ? 0xFFF : i);
693 }
694 
695 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
696 	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
697 	[TCA_U32_HASH]		= { .type = NLA_U32 },
698 	[TCA_U32_LINK]		= { .type = NLA_U32 },
699 	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
700 	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
701 	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
702 	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
703 	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
704 };
705 
706 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
707 			 unsigned long base, struct tc_u_hnode *ht,
708 			 struct tc_u_knode *n, struct nlattr **tb,
709 			 struct nlattr *est, bool ovr)
710 {
711 	struct tcf_exts e;
712 	int err;
713 
714 	err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
715 	if (err < 0)
716 		return err;
717 	err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
718 	if (err < 0)
719 		goto errout;
720 
721 	err = -EINVAL;
722 	if (tb[TCA_U32_LINK]) {
723 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
724 		struct tc_u_hnode *ht_down = NULL, *ht_old;
725 
726 		if (TC_U32_KEY(handle))
727 			goto errout;
728 
729 		if (handle) {
730 			ht_down = u32_lookup_ht(ht->tp_c, handle);
731 
732 			if (ht_down == NULL)
733 				goto errout;
734 			ht_down->refcnt++;
735 		}
736 
737 		ht_old = rtnl_dereference(n->ht_down);
738 		rcu_assign_pointer(n->ht_down, ht_down);
739 
740 		if (ht_old)
741 			ht_old->refcnt--;
742 	}
743 	if (tb[TCA_U32_CLASSID]) {
744 		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
745 		tcf_bind_filter(tp, &n->res, base);
746 	}
747 
748 #ifdef CONFIG_NET_CLS_IND
749 	if (tb[TCA_U32_INDEV]) {
750 		int ret;
751 		ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
752 		if (ret < 0)
753 			goto errout;
754 		n->ifindex = ret;
755 	}
756 #endif
757 	tcf_exts_change(tp, &n->exts, &e);
758 
759 	return 0;
760 errout:
761 	tcf_exts_destroy(&e);
762 	return err;
763 }
764 
765 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
766 			      struct tc_u_knode *n)
767 {
768 	struct tc_u_knode __rcu **ins;
769 	struct tc_u_knode *pins;
770 	struct tc_u_hnode *ht;
771 
772 	if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
773 		ht = rtnl_dereference(tp->root);
774 	else
775 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
776 
777 	ins = &ht->ht[TC_U32_HASH(n->handle)];
778 
779 	/* The node must always exist for it to be replaced if this is not the
780 	 * case then something went very wrong elsewhere.
781 	 */
782 	for (pins = rtnl_dereference(*ins); ;
783 	     ins = &pins->next, pins = rtnl_dereference(*ins))
784 		if (pins->handle == n->handle)
785 			break;
786 
787 	RCU_INIT_POINTER(n->next, pins->next);
788 	rcu_assign_pointer(*ins, n);
789 }
790 
791 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
792 					 struct tc_u_knode *n)
793 {
794 	struct tc_u_knode *new;
795 	struct tc_u32_sel *s = &n->sel;
796 
797 	new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
798 		      GFP_KERNEL);
799 
800 	if (!new)
801 		return NULL;
802 
803 	RCU_INIT_POINTER(new->next, n->next);
804 	new->handle = n->handle;
805 	RCU_INIT_POINTER(new->ht_up, n->ht_up);
806 
807 #ifdef CONFIG_NET_CLS_IND
808 	new->ifindex = n->ifindex;
809 #endif
810 	new->fshift = n->fshift;
811 	new->res = n->res;
812 	new->flags = n->flags;
813 	RCU_INIT_POINTER(new->ht_down, n->ht_down);
814 
815 	/* bump reference count as long as we hold pointer to structure */
816 	if (new->ht_down)
817 		new->ht_down->refcnt++;
818 
819 #ifdef CONFIG_CLS_U32_PERF
820 	/* Statistics may be incremented by readers during update
821 	 * so we must keep them in tact. When the node is later destroyed
822 	 * a special destroy call must be made to not free the pf memory.
823 	 */
824 	new->pf = n->pf;
825 #endif
826 
827 #ifdef CONFIG_CLS_U32_MARK
828 	new->val = n->val;
829 	new->mask = n->mask;
830 	/* Similarly success statistics must be moved as pointers */
831 	new->pcpu_success = n->pcpu_success;
832 #endif
833 	new->tp = tp;
834 	memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
835 
836 	if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
837 		kfree(new);
838 		return NULL;
839 	}
840 
841 	return new;
842 }
843 
844 static int u32_change(struct net *net, struct sk_buff *in_skb,
845 		      struct tcf_proto *tp, unsigned long base, u32 handle,
846 		      struct nlattr **tca, unsigned long *arg, bool ovr)
847 {
848 	struct tc_u_common *tp_c = tp->data;
849 	struct tc_u_hnode *ht;
850 	struct tc_u_knode *n;
851 	struct tc_u32_sel *s;
852 	struct nlattr *opt = tca[TCA_OPTIONS];
853 	struct nlattr *tb[TCA_U32_MAX + 1];
854 	u32 htid, flags = 0;
855 	int err;
856 #ifdef CONFIG_CLS_U32_PERF
857 	size_t size;
858 #endif
859 
860 	if (opt == NULL)
861 		return handle ? -EINVAL : 0;
862 
863 	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
864 	if (err < 0)
865 		return err;
866 
867 	if (tb[TCA_U32_FLAGS]) {
868 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
869 		if (!tc_flags_valid(flags))
870 			return -EINVAL;
871 	}
872 
873 	n = (struct tc_u_knode *)*arg;
874 	if (n) {
875 		struct tc_u_knode *new;
876 
877 		if (TC_U32_KEY(n->handle) == 0)
878 			return -EINVAL;
879 
880 		if (n->flags != flags)
881 			return -EINVAL;
882 
883 		new = u32_init_knode(tp, n);
884 		if (!new)
885 			return -ENOMEM;
886 
887 		err = u32_set_parms(net, tp, base,
888 				    rtnl_dereference(n->ht_up), new, tb,
889 				    tca[TCA_RATE], ovr);
890 
891 		if (err) {
892 			u32_destroy_key(tp, new, false);
893 			return err;
894 		}
895 
896 		err = u32_replace_hw_knode(tp, new, flags);
897 		if (err) {
898 			u32_destroy_key(tp, new, false);
899 			return err;
900 		}
901 
902 		if (!tc_in_hw(new->flags))
903 			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
904 
905 		u32_replace_knode(tp, tp_c, new);
906 		tcf_unbind_filter(tp, &n->res);
907 		call_rcu(&n->rcu, u32_delete_key_rcu);
908 		return 0;
909 	}
910 
911 	if (tb[TCA_U32_DIVISOR]) {
912 		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
913 
914 		if (--divisor > 0x100)
915 			return -EINVAL;
916 		if (TC_U32_KEY(handle))
917 			return -EINVAL;
918 		if (handle == 0) {
919 			handle = gen_new_htid(tp->data);
920 			if (handle == 0)
921 				return -ENOMEM;
922 		}
923 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
924 		if (ht == NULL)
925 			return -ENOBUFS;
926 		ht->tp_c = tp_c;
927 		ht->refcnt = 1;
928 		ht->divisor = divisor;
929 		ht->handle = handle;
930 		ht->prio = tp->prio;
931 
932 		err = u32_replace_hw_hnode(tp, ht, flags);
933 		if (err) {
934 			kfree(ht);
935 			return err;
936 		}
937 
938 		RCU_INIT_POINTER(ht->next, tp_c->hlist);
939 		rcu_assign_pointer(tp_c->hlist, ht);
940 		*arg = (unsigned long)ht;
941 
942 		return 0;
943 	}
944 
945 	if (tb[TCA_U32_HASH]) {
946 		htid = nla_get_u32(tb[TCA_U32_HASH]);
947 		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
948 			ht = rtnl_dereference(tp->root);
949 			htid = ht->handle;
950 		} else {
951 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
952 			if (ht == NULL)
953 				return -EINVAL;
954 		}
955 	} else {
956 		ht = rtnl_dereference(tp->root);
957 		htid = ht->handle;
958 	}
959 
960 	if (ht->divisor < TC_U32_HASH(htid))
961 		return -EINVAL;
962 
963 	if (handle) {
964 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
965 			return -EINVAL;
966 		handle = htid | TC_U32_NODE(handle);
967 	} else
968 		handle = gen_new_kid(ht, htid);
969 
970 	if (tb[TCA_U32_SEL] == NULL)
971 		return -EINVAL;
972 
973 	s = nla_data(tb[TCA_U32_SEL]);
974 
975 	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
976 	if (n == NULL)
977 		return -ENOBUFS;
978 
979 #ifdef CONFIG_CLS_U32_PERF
980 	size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
981 	n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
982 	if (!n->pf) {
983 		kfree(n);
984 		return -ENOBUFS;
985 	}
986 #endif
987 
988 	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
989 	RCU_INIT_POINTER(n->ht_up, ht);
990 	n->handle = handle;
991 	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
992 	n->flags = flags;
993 	n->tp = tp;
994 
995 	err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
996 	if (err < 0)
997 		goto errout;
998 
999 #ifdef CONFIG_CLS_U32_MARK
1000 	n->pcpu_success = alloc_percpu(u32);
1001 	if (!n->pcpu_success) {
1002 		err = -ENOMEM;
1003 		goto errout;
1004 	}
1005 
1006 	if (tb[TCA_U32_MARK]) {
1007 		struct tc_u32_mark *mark;
1008 
1009 		mark = nla_data(tb[TCA_U32_MARK]);
1010 		n->val = mark->val;
1011 		n->mask = mark->mask;
1012 	}
1013 #endif
1014 
1015 	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
1016 	if (err == 0) {
1017 		struct tc_u_knode __rcu **ins;
1018 		struct tc_u_knode *pins;
1019 
1020 		err = u32_replace_hw_knode(tp, n, flags);
1021 		if (err)
1022 			goto errhw;
1023 
1024 		if (!tc_in_hw(n->flags))
1025 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1026 
1027 		ins = &ht->ht[TC_U32_HASH(handle)];
1028 		for (pins = rtnl_dereference(*ins); pins;
1029 		     ins = &pins->next, pins = rtnl_dereference(*ins))
1030 			if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1031 				break;
1032 
1033 		RCU_INIT_POINTER(n->next, pins);
1034 		rcu_assign_pointer(*ins, n);
1035 		*arg = (unsigned long)n;
1036 		return 0;
1037 	}
1038 
1039 errhw:
1040 #ifdef CONFIG_CLS_U32_MARK
1041 	free_percpu(n->pcpu_success);
1042 #endif
1043 
1044 errout:
1045 	tcf_exts_destroy(&n->exts);
1046 #ifdef CONFIG_CLS_U32_PERF
1047 	free_percpu(n->pf);
1048 #endif
1049 	kfree(n);
1050 	return err;
1051 }
1052 
1053 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1054 {
1055 	struct tc_u_common *tp_c = tp->data;
1056 	struct tc_u_hnode *ht;
1057 	struct tc_u_knode *n;
1058 	unsigned int h;
1059 
1060 	if (arg->stop)
1061 		return;
1062 
1063 	for (ht = rtnl_dereference(tp_c->hlist);
1064 	     ht;
1065 	     ht = rtnl_dereference(ht->next)) {
1066 		if (ht->prio != tp->prio)
1067 			continue;
1068 		if (arg->count >= arg->skip) {
1069 			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
1070 				arg->stop = 1;
1071 				return;
1072 			}
1073 		}
1074 		arg->count++;
1075 		for (h = 0; h <= ht->divisor; h++) {
1076 			for (n = rtnl_dereference(ht->ht[h]);
1077 			     n;
1078 			     n = rtnl_dereference(n->next)) {
1079 				if (arg->count < arg->skip) {
1080 					arg->count++;
1081 					continue;
1082 				}
1083 				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
1084 					arg->stop = 1;
1085 					return;
1086 				}
1087 				arg->count++;
1088 			}
1089 		}
1090 	}
1091 }
1092 
1093 static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
1094 		    struct sk_buff *skb, struct tcmsg *t)
1095 {
1096 	struct tc_u_knode *n = (struct tc_u_knode *)fh;
1097 	struct tc_u_hnode *ht_up, *ht_down;
1098 	struct nlattr *nest;
1099 
1100 	if (n == NULL)
1101 		return skb->len;
1102 
1103 	t->tcm_handle = n->handle;
1104 
1105 	nest = nla_nest_start(skb, TCA_OPTIONS);
1106 	if (nest == NULL)
1107 		goto nla_put_failure;
1108 
1109 	if (TC_U32_KEY(n->handle) == 0) {
1110 		struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
1111 		u32 divisor = ht->divisor + 1;
1112 
1113 		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1114 			goto nla_put_failure;
1115 	} else {
1116 #ifdef CONFIG_CLS_U32_PERF
1117 		struct tc_u32_pcnt *gpf;
1118 		int cpu;
1119 #endif
1120 
1121 		if (nla_put(skb, TCA_U32_SEL,
1122 			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1123 			    &n->sel))
1124 			goto nla_put_failure;
1125 
1126 		ht_up = rtnl_dereference(n->ht_up);
1127 		if (ht_up) {
1128 			u32 htid = n->handle & 0xFFFFF000;
1129 			if (nla_put_u32(skb, TCA_U32_HASH, htid))
1130 				goto nla_put_failure;
1131 		}
1132 		if (n->res.classid &&
1133 		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1134 			goto nla_put_failure;
1135 
1136 		ht_down = rtnl_dereference(n->ht_down);
1137 		if (ht_down &&
1138 		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1139 			goto nla_put_failure;
1140 
1141 		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1142 			goto nla_put_failure;
1143 
1144 #ifdef CONFIG_CLS_U32_MARK
1145 		if ((n->val || n->mask)) {
1146 			struct tc_u32_mark mark = {.val = n->val,
1147 						   .mask = n->mask,
1148 						   .success = 0};
1149 			int cpum;
1150 
1151 			for_each_possible_cpu(cpum) {
1152 				__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1153 
1154 				mark.success += cnt;
1155 			}
1156 
1157 			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1158 				goto nla_put_failure;
1159 		}
1160 #endif
1161 
1162 		if (tcf_exts_dump(skb, &n->exts) < 0)
1163 			goto nla_put_failure;
1164 
1165 #ifdef CONFIG_NET_CLS_IND
1166 		if (n->ifindex) {
1167 			struct net_device *dev;
1168 			dev = __dev_get_by_index(net, n->ifindex);
1169 			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1170 				goto nla_put_failure;
1171 		}
1172 #endif
1173 #ifdef CONFIG_CLS_U32_PERF
1174 		gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1175 			      n->sel.nkeys * sizeof(u64),
1176 			      GFP_KERNEL);
1177 		if (!gpf)
1178 			goto nla_put_failure;
1179 
1180 		for_each_possible_cpu(cpu) {
1181 			int i;
1182 			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1183 
1184 			gpf->rcnt += pf->rcnt;
1185 			gpf->rhit += pf->rhit;
1186 			for (i = 0; i < n->sel.nkeys; i++)
1187 				gpf->kcnts[i] += pf->kcnts[i];
1188 		}
1189 
1190 		if (nla_put_64bit(skb, TCA_U32_PCNT,
1191 				  sizeof(struct tc_u32_pcnt) +
1192 				  n->sel.nkeys * sizeof(u64),
1193 				  gpf, TCA_U32_PAD)) {
1194 			kfree(gpf);
1195 			goto nla_put_failure;
1196 		}
1197 		kfree(gpf);
1198 #endif
1199 	}
1200 
1201 	nla_nest_end(skb, nest);
1202 
1203 	if (TC_U32_KEY(n->handle))
1204 		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1205 			goto nla_put_failure;
1206 	return skb->len;
1207 
1208 nla_put_failure:
1209 	nla_nest_cancel(skb, nest);
1210 	return -1;
1211 }
1212 
1213 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1214 	.kind		=	"u32",
1215 	.classify	=	u32_classify,
1216 	.init		=	u32_init,
1217 	.destroy	=	u32_destroy,
1218 	.get		=	u32_get,
1219 	.change		=	u32_change,
1220 	.delete		=	u32_delete,
1221 	.walk		=	u32_walk,
1222 	.dump		=	u32_dump,
1223 	.owner		=	THIS_MODULE,
1224 };
1225 
1226 static int __init init_u32(void)
1227 {
1228 	pr_info("u32 classifier\n");
1229 #ifdef CONFIG_CLS_U32_PERF
1230 	pr_info("    Performance counters on\n");
1231 #endif
1232 #ifdef CONFIG_NET_CLS_IND
1233 	pr_info("    input device check on\n");
1234 #endif
1235 #ifdef CONFIG_NET_CLS_ACT
1236 	pr_info("    Actions configured\n");
1237 #endif
1238 	return register_tcf_proto_ops(&cls_u32_ops);
1239 }
1240 
1241 static void __exit exit_u32(void)
1242 {
1243 	unregister_tcf_proto_ops(&cls_u32_ops);
1244 }
1245 
1246 module_init(init_u32)
1247 module_exit(exit_u32)
1248 MODULE_LICENSE("GPL");
1249