xref: /openbmc/linux/net/sched/cls_u32.c (revision 242cdad8)
1 /*
2  * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	The filters are packed to hash tables of key nodes
12  *	with a set of 32bit key/mask pairs at every node.
13  *	Nodes reference next level hash tables etc.
14  *
15  *	This scheme is the best universal classifier I managed to
16  *	invent; it is not super-fast, but it is not slow (provided you
17  *	program it correctly), and general enough.  And its relative
18  *	speed grows as the number of rules becomes larger.
19  *
20  *	It seems that it represents the best middle point between
21  *	speed and manageability both by human and by machine.
22  *
23  *	It is especially useful for link sharing combined with QoS;
24  *	pure RSVP doesn't need such a general approach and can use
25  *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *	eventually when the meta match extension is made available
29  *
30  *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/percpu.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/skbuff.h>
42 #include <linux/bitmap.h>
43 #include <linux/netdevice.h>
44 #include <linux/hash.h>
45 #include <net/netlink.h>
46 #include <net/act_api.h>
47 #include <net/pkt_cls.h>
48 #include <linux/idr.h>
49 
50 struct tc_u_knode {
51 	struct tc_u_knode __rcu	*next;
52 	u32			handle;
53 	struct tc_u_hnode __rcu	*ht_up;
54 	struct tcf_exts		exts;
55 #ifdef CONFIG_NET_CLS_IND
56 	int			ifindex;
57 #endif
58 	u8			fshift;
59 	struct tcf_result	res;
60 	struct tc_u_hnode __rcu	*ht_down;
61 #ifdef CONFIG_CLS_U32_PERF
62 	struct tc_u32_pcnt __percpu *pf;
63 #endif
64 	u32			flags;
65 	unsigned int		in_hw_count;
66 #ifdef CONFIG_CLS_U32_MARK
67 	u32			val;
68 	u32			mask;
69 	u32 __percpu		*pcpu_success;
70 #endif
71 	struct tcf_proto	*tp;
72 	struct rcu_work		rwork;
73 	/* The 'sel' field MUST be the last field in structure to allow for
74 	 * tc_u32_keys allocated at end of structure.
75 	 */
76 	struct tc_u32_sel	sel;
77 };
78 
79 struct tc_u_hnode {
80 	struct tc_u_hnode __rcu	*next;
81 	u32			handle;
82 	u32			prio;
83 	struct tc_u_common	*tp_c;
84 	int			refcnt;
85 	unsigned int		divisor;
86 	struct idr		handle_idr;
87 	struct rcu_head		rcu;
88 	u32			flags;
89 	/* The 'ht' field MUST be the last field in structure to allow for
90 	 * more entries allocated at end of structure.
91 	 */
92 	struct tc_u_knode __rcu	*ht[1];
93 };
94 
95 struct tc_u_common {
96 	struct tc_u_hnode __rcu	*hlist;
97 	void			*ptr;
98 	int			refcnt;
99 	struct idr		handle_idr;
100 	struct hlist_node	hnode;
101 	struct rcu_head		rcu;
102 };
103 
104 static inline unsigned int u32_hash_fold(__be32 key,
105 					 const struct tc_u32_sel *sel,
106 					 u8 fshift)
107 {
108 	unsigned int h = ntohl(key & sel->hmask) >> fshift;
109 
110 	return h;
111 }
112 
113 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
114 			struct tcf_result *res)
115 {
116 	struct {
117 		struct tc_u_knode *knode;
118 		unsigned int	  off;
119 	} stack[TC_U32_MAXDEPTH];
120 
121 	struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
122 	unsigned int off = skb_network_offset(skb);
123 	struct tc_u_knode *n;
124 	int sdepth = 0;
125 	int off2 = 0;
126 	int sel = 0;
127 #ifdef CONFIG_CLS_U32_PERF
128 	int j;
129 #endif
130 	int i, r;
131 
132 next_ht:
133 	n = rcu_dereference_bh(ht->ht[sel]);
134 
135 next_knode:
136 	if (n) {
137 		struct tc_u32_key *key = n->sel.keys;
138 
139 #ifdef CONFIG_CLS_U32_PERF
140 		__this_cpu_inc(n->pf->rcnt);
141 		j = 0;
142 #endif
143 
144 		if (tc_skip_sw(n->flags)) {
145 			n = rcu_dereference_bh(n->next);
146 			goto next_knode;
147 		}
148 
149 #ifdef CONFIG_CLS_U32_MARK
150 		if ((skb->mark & n->mask) != n->val) {
151 			n = rcu_dereference_bh(n->next);
152 			goto next_knode;
153 		} else {
154 			__this_cpu_inc(*n->pcpu_success);
155 		}
156 #endif
157 
158 		for (i = n->sel.nkeys; i > 0; i--, key++) {
159 			int toff = off + key->off + (off2 & key->offmask);
160 			__be32 *data, hdata;
161 
162 			if (skb_headroom(skb) + toff > INT_MAX)
163 				goto out;
164 
165 			data = skb_header_pointer(skb, toff, 4, &hdata);
166 			if (!data)
167 				goto out;
168 			if ((*data ^ key->val) & key->mask) {
169 				n = rcu_dereference_bh(n->next);
170 				goto next_knode;
171 			}
172 #ifdef CONFIG_CLS_U32_PERF
173 			__this_cpu_inc(n->pf->kcnts[j]);
174 			j++;
175 #endif
176 		}
177 
178 		ht = rcu_dereference_bh(n->ht_down);
179 		if (!ht) {
180 check_terminal:
181 			if (n->sel.flags & TC_U32_TERMINAL) {
182 
183 				*res = n->res;
184 #ifdef CONFIG_NET_CLS_IND
185 				if (!tcf_match_indev(skb, n->ifindex)) {
186 					n = rcu_dereference_bh(n->next);
187 					goto next_knode;
188 				}
189 #endif
190 #ifdef CONFIG_CLS_U32_PERF
191 				__this_cpu_inc(n->pf->rhit);
192 #endif
193 				r = tcf_exts_exec(skb, &n->exts, res);
194 				if (r < 0) {
195 					n = rcu_dereference_bh(n->next);
196 					goto next_knode;
197 				}
198 
199 				return r;
200 			}
201 			n = rcu_dereference_bh(n->next);
202 			goto next_knode;
203 		}
204 
205 		/* PUSH */
206 		if (sdepth >= TC_U32_MAXDEPTH)
207 			goto deadloop;
208 		stack[sdepth].knode = n;
209 		stack[sdepth].off = off;
210 		sdepth++;
211 
212 		ht = rcu_dereference_bh(n->ht_down);
213 		sel = 0;
214 		if (ht->divisor) {
215 			__be32 *data, hdata;
216 
217 			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
218 						  &hdata);
219 			if (!data)
220 				goto out;
221 			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
222 							  n->fshift);
223 		}
224 		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
225 			goto next_ht;
226 
227 		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
228 			off2 = n->sel.off + 3;
229 			if (n->sel.flags & TC_U32_VAROFFSET) {
230 				__be16 *data, hdata;
231 
232 				data = skb_header_pointer(skb,
233 							  off + n->sel.offoff,
234 							  2, &hdata);
235 				if (!data)
236 					goto out;
237 				off2 += ntohs(n->sel.offmask & *data) >>
238 					n->sel.offshift;
239 			}
240 			off2 &= ~3;
241 		}
242 		if (n->sel.flags & TC_U32_EAT) {
243 			off += off2;
244 			off2 = 0;
245 		}
246 
247 		if (off < skb->len)
248 			goto next_ht;
249 	}
250 
251 	/* POP */
252 	if (sdepth--) {
253 		n = stack[sdepth].knode;
254 		ht = rcu_dereference_bh(n->ht_up);
255 		off = stack[sdepth].off;
256 		goto check_terminal;
257 	}
258 out:
259 	return -1;
260 
261 deadloop:
262 	net_warn_ratelimited("cls_u32: dead loop\n");
263 	return -1;
264 }
265 
266 static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
267 {
268 	struct tc_u_hnode *ht;
269 
270 	for (ht = rtnl_dereference(tp_c->hlist);
271 	     ht;
272 	     ht = rtnl_dereference(ht->next))
273 		if (ht->handle == handle)
274 			break;
275 
276 	return ht;
277 }
278 
279 static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
280 {
281 	unsigned int sel;
282 	struct tc_u_knode *n = NULL;
283 
284 	sel = TC_U32_HASH(handle);
285 	if (sel > ht->divisor)
286 		goto out;
287 
288 	for (n = rtnl_dereference(ht->ht[sel]);
289 	     n;
290 	     n = rtnl_dereference(n->next))
291 		if (n->handle == handle)
292 			break;
293 out:
294 	return n;
295 }
296 
297 
298 static void *u32_get(struct tcf_proto *tp, u32 handle)
299 {
300 	struct tc_u_hnode *ht;
301 	struct tc_u_common *tp_c = tp->data;
302 
303 	if (TC_U32_HTID(handle) == TC_U32_ROOT)
304 		ht = rtnl_dereference(tp->root);
305 	else
306 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
307 
308 	if (!ht)
309 		return NULL;
310 
311 	if (TC_U32_KEY(handle) == 0)
312 		return ht;
313 
314 	return u32_lookup_key(ht, handle);
315 }
316 
317 /* Protected by rtnl lock */
318 static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
319 {
320 	int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
321 	if (id < 0)
322 		return 0;
323 	return (id | 0x800U) << 20;
324 }
325 
326 static struct hlist_head *tc_u_common_hash;
327 
328 #define U32_HASH_SHIFT 10
329 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
330 
331 static void *tc_u_common_ptr(const struct tcf_proto *tp)
332 {
333 	struct tcf_block *block = tp->chain->block;
334 
335 	/* The block sharing is currently supported only
336 	 * for classless qdiscs. In that case we use block
337 	 * for tc_u_common identification. In case the
338 	 * block is not shared, block->q is a valid pointer
339 	 * and we can use that. That works for classful qdiscs.
340 	 */
341 	if (tcf_block_shared(block))
342 		return block;
343 	else
344 		return block->q;
345 }
346 
347 static unsigned int tc_u_hash(const struct tcf_proto *tp)
348 {
349 	return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
350 }
351 
352 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
353 {
354 	struct tc_u_common *tc;
355 	unsigned int h;
356 
357 	h = tc_u_hash(tp);
358 	hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
359 		if (tc->ptr == tc_u_common_ptr(tp))
360 			return tc;
361 	}
362 	return NULL;
363 }
364 
365 static int u32_init(struct tcf_proto *tp)
366 {
367 	struct tc_u_hnode *root_ht;
368 	struct tc_u_common *tp_c;
369 	unsigned int h;
370 
371 	tp_c = tc_u_common_find(tp);
372 
373 	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
374 	if (root_ht == NULL)
375 		return -ENOBUFS;
376 
377 	root_ht->refcnt++;
378 	root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
379 	root_ht->prio = tp->prio;
380 	idr_init(&root_ht->handle_idr);
381 
382 	if (tp_c == NULL) {
383 		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
384 		if (tp_c == NULL) {
385 			kfree(root_ht);
386 			return -ENOBUFS;
387 		}
388 		tp_c->ptr = tc_u_common_ptr(tp);
389 		INIT_HLIST_NODE(&tp_c->hnode);
390 		idr_init(&tp_c->handle_idr);
391 
392 		h = tc_u_hash(tp);
393 		hlist_add_head(&tp_c->hnode, &tc_u_common_hash[h]);
394 	}
395 
396 	tp_c->refcnt++;
397 	RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
398 	rcu_assign_pointer(tp_c->hlist, root_ht);
399 	root_ht->tp_c = tp_c;
400 
401 	rcu_assign_pointer(tp->root, root_ht);
402 	tp->data = tp_c;
403 	return 0;
404 }
405 
406 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
407 			   bool free_pf)
408 {
409 	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
410 
411 	tcf_exts_destroy(&n->exts);
412 	tcf_exts_put_net(&n->exts);
413 	if (ht && --ht->refcnt == 0)
414 		kfree(ht);
415 #ifdef CONFIG_CLS_U32_PERF
416 	if (free_pf)
417 		free_percpu(n->pf);
418 #endif
419 #ifdef CONFIG_CLS_U32_MARK
420 	if (free_pf)
421 		free_percpu(n->pcpu_success);
422 #endif
423 	kfree(n);
424 	return 0;
425 }
426 
427 /* u32_delete_key_rcu should be called when free'ing a copied
428  * version of a tc_u_knode obtained from u32_init_knode(). When
429  * copies are obtained from u32_init_knode() the statistics are
430  * shared between the old and new copies to allow readers to
431  * continue to update the statistics during the copy. To support
432  * this the u32_delete_key_rcu variant does not free the percpu
433  * statistics.
434  */
435 static void u32_delete_key_work(struct work_struct *work)
436 {
437 	struct tc_u_knode *key = container_of(to_rcu_work(work),
438 					      struct tc_u_knode,
439 					      rwork);
440 	rtnl_lock();
441 	u32_destroy_key(key->tp, key, false);
442 	rtnl_unlock();
443 }
444 
445 /* u32_delete_key_freepf_rcu is the rcu callback variant
446  * that free's the entire structure including the statistics
447  * percpu variables. Only use this if the key is not a copy
448  * returned by u32_init_knode(). See u32_delete_key_rcu()
449  * for the variant that should be used with keys return from
450  * u32_init_knode()
451  */
452 static void u32_delete_key_freepf_work(struct work_struct *work)
453 {
454 	struct tc_u_knode *key = container_of(to_rcu_work(work),
455 					      struct tc_u_knode,
456 					      rwork);
457 	rtnl_lock();
458 	u32_destroy_key(key->tp, key, true);
459 	rtnl_unlock();
460 }
461 
462 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
463 {
464 	struct tc_u_knode __rcu **kp;
465 	struct tc_u_knode *pkp;
466 	struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
467 
468 	if (ht) {
469 		kp = &ht->ht[TC_U32_HASH(key->handle)];
470 		for (pkp = rtnl_dereference(*kp); pkp;
471 		     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
472 			if (pkp == key) {
473 				RCU_INIT_POINTER(*kp, key->next);
474 
475 				tcf_unbind_filter(tp, &key->res);
476 				idr_remove(&ht->handle_idr, key->handle);
477 				tcf_exts_get_net(&key->exts);
478 				tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
479 				return 0;
480 			}
481 		}
482 	}
483 	WARN_ON(1);
484 	return 0;
485 }
486 
487 static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
488 			       struct netlink_ext_ack *extack)
489 {
490 	struct tcf_block *block = tp->chain->block;
491 	struct tc_cls_u32_offload cls_u32 = {};
492 
493 	tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
494 	cls_u32.command = TC_CLSU32_DELETE_HNODE;
495 	cls_u32.hnode.divisor = h->divisor;
496 	cls_u32.hnode.handle = h->handle;
497 	cls_u32.hnode.prio = h->prio;
498 
499 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
500 }
501 
502 static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
503 				u32 flags, struct netlink_ext_ack *extack)
504 {
505 	struct tcf_block *block = tp->chain->block;
506 	struct tc_cls_u32_offload cls_u32 = {};
507 	bool skip_sw = tc_skip_sw(flags);
508 	bool offloaded = false;
509 	int err;
510 
511 	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
512 	cls_u32.command = TC_CLSU32_NEW_HNODE;
513 	cls_u32.hnode.divisor = h->divisor;
514 	cls_u32.hnode.handle = h->handle;
515 	cls_u32.hnode.prio = h->prio;
516 
517 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
518 	if (err < 0) {
519 		u32_clear_hw_hnode(tp, h, NULL);
520 		return err;
521 	} else if (err > 0) {
522 		offloaded = true;
523 	}
524 
525 	if (skip_sw && !offloaded)
526 		return -EINVAL;
527 
528 	return 0;
529 }
530 
531 static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
532 				struct netlink_ext_ack *extack)
533 {
534 	struct tcf_block *block = tp->chain->block;
535 	struct tc_cls_u32_offload cls_u32 = {};
536 
537 	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
538 	cls_u32.command = TC_CLSU32_DELETE_KNODE;
539 	cls_u32.knode.handle = n->handle;
540 
541 	tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
542 	tcf_block_offload_dec(block, &n->flags);
543 }
544 
545 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
546 				u32 flags, struct netlink_ext_ack *extack)
547 {
548 	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
549 	struct tcf_block *block = tp->chain->block;
550 	struct tc_cls_u32_offload cls_u32 = {};
551 	bool skip_sw = tc_skip_sw(flags);
552 	int err;
553 
554 	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
555 	cls_u32.command = TC_CLSU32_REPLACE_KNODE;
556 	cls_u32.knode.handle = n->handle;
557 	cls_u32.knode.fshift = n->fshift;
558 #ifdef CONFIG_CLS_U32_MARK
559 	cls_u32.knode.val = n->val;
560 	cls_u32.knode.mask = n->mask;
561 #else
562 	cls_u32.knode.val = 0;
563 	cls_u32.knode.mask = 0;
564 #endif
565 	cls_u32.knode.sel = &n->sel;
566 	cls_u32.knode.exts = &n->exts;
567 	if (n->ht_down)
568 		cls_u32.knode.link_handle = ht->handle;
569 
570 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
571 	if (err < 0) {
572 		u32_remove_hw_knode(tp, n, NULL);
573 		return err;
574 	} else if (err > 0) {
575 		n->in_hw_count = err;
576 		tcf_block_offload_inc(block, &n->flags);
577 	}
578 
579 	if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
580 		return -EINVAL;
581 
582 	return 0;
583 }
584 
585 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
586 			    struct netlink_ext_ack *extack)
587 {
588 	struct tc_u_knode *n;
589 	unsigned int h;
590 
591 	for (h = 0; h <= ht->divisor; h++) {
592 		while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
593 			RCU_INIT_POINTER(ht->ht[h],
594 					 rtnl_dereference(n->next));
595 			tcf_unbind_filter(tp, &n->res);
596 			u32_remove_hw_knode(tp, n, extack);
597 			idr_remove(&ht->handle_idr, n->handle);
598 			if (tcf_exts_get_net(&n->exts))
599 				tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
600 			else
601 				u32_destroy_key(n->tp, n, true);
602 		}
603 	}
604 }
605 
606 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
607 			     struct netlink_ext_ack *extack)
608 {
609 	struct tc_u_common *tp_c = tp->data;
610 	struct tc_u_hnode __rcu **hn;
611 	struct tc_u_hnode *phn;
612 
613 	WARN_ON(ht->refcnt);
614 
615 	u32_clear_hnode(tp, ht, extack);
616 
617 	hn = &tp_c->hlist;
618 	for (phn = rtnl_dereference(*hn);
619 	     phn;
620 	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
621 		if (phn == ht) {
622 			u32_clear_hw_hnode(tp, ht, extack);
623 			idr_destroy(&ht->handle_idr);
624 			idr_remove(&tp_c->handle_idr, ht->handle);
625 			RCU_INIT_POINTER(*hn, ht->next);
626 			kfree_rcu(ht, rcu);
627 			return 0;
628 		}
629 	}
630 
631 	return -ENOENT;
632 }
633 
634 static bool ht_empty(struct tc_u_hnode *ht)
635 {
636 	unsigned int h;
637 
638 	for (h = 0; h <= ht->divisor; h++)
639 		if (rcu_access_pointer(ht->ht[h]))
640 			return false;
641 
642 	return true;
643 }
644 
645 static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
646 {
647 	struct tc_u_common *tp_c = tp->data;
648 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
649 
650 	WARN_ON(root_ht == NULL);
651 
652 	if (root_ht && --root_ht->refcnt == 0)
653 		u32_destroy_hnode(tp, root_ht, extack);
654 
655 	if (--tp_c->refcnt == 0) {
656 		struct tc_u_hnode *ht;
657 
658 		hlist_del(&tp_c->hnode);
659 
660 		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
661 			u32_clear_hnode(tp, ht, extack);
662 			RCU_INIT_POINTER(tp_c->hlist, ht->next);
663 
664 			/* u32_destroy_key() will later free ht for us, if it's
665 			 * still referenced by some knode
666 			 */
667 			if (--ht->refcnt == 0)
668 				kfree_rcu(ht, rcu);
669 		}
670 
671 		idr_destroy(&tp_c->handle_idr);
672 		kfree(tp_c);
673 	}
674 
675 	tp->data = NULL;
676 }
677 
678 static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
679 		      struct netlink_ext_ack *extack)
680 {
681 	struct tc_u_hnode *ht = arg;
682 	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
683 	struct tc_u_common *tp_c = tp->data;
684 	int ret = 0;
685 
686 	if (ht == NULL)
687 		goto out;
688 
689 	if (TC_U32_KEY(ht->handle)) {
690 		u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
691 		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
692 		goto out;
693 	}
694 
695 	if (root_ht == ht) {
696 		NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
697 		return -EINVAL;
698 	}
699 
700 	if (ht->refcnt == 1) {
701 		ht->refcnt--;
702 		u32_destroy_hnode(tp, ht, extack);
703 	} else {
704 		NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
705 		return -EBUSY;
706 	}
707 
708 out:
709 	*last = true;
710 	if (root_ht) {
711 		if (root_ht->refcnt > 1) {
712 			*last = false;
713 			goto ret;
714 		}
715 		if (root_ht->refcnt == 1) {
716 			if (!ht_empty(root_ht)) {
717 				*last = false;
718 				goto ret;
719 			}
720 		}
721 	}
722 
723 	if (tp_c->refcnt > 1) {
724 		*last = false;
725 		goto ret;
726 	}
727 
728 	if (tp_c->refcnt == 1) {
729 		struct tc_u_hnode *ht;
730 
731 		for (ht = rtnl_dereference(tp_c->hlist);
732 		     ht;
733 		     ht = rtnl_dereference(ht->next))
734 			if (!ht_empty(ht)) {
735 				*last = false;
736 				break;
737 			}
738 	}
739 
740 ret:
741 	return ret;
742 }
743 
744 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
745 {
746 	u32 index = htid | 0x800;
747 	u32 max = htid | 0xFFF;
748 
749 	if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
750 		index = htid + 1;
751 		if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
752 				 GFP_KERNEL))
753 			index = max;
754 	}
755 
756 	return index;
757 }
758 
759 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
760 	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
761 	[TCA_U32_HASH]		= { .type = NLA_U32 },
762 	[TCA_U32_LINK]		= { .type = NLA_U32 },
763 	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
764 	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
765 	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
766 	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
767 	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
768 };
769 
770 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
771 			 unsigned long base, struct tc_u_hnode *ht,
772 			 struct tc_u_knode *n, struct nlattr **tb,
773 			 struct nlattr *est, bool ovr,
774 			 struct netlink_ext_ack *extack)
775 {
776 	int err;
777 
778 	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, extack);
779 	if (err < 0)
780 		return err;
781 
782 	if (tb[TCA_U32_LINK]) {
783 		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
784 		struct tc_u_hnode *ht_down = NULL, *ht_old;
785 
786 		if (TC_U32_KEY(handle)) {
787 			NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
788 			return -EINVAL;
789 		}
790 
791 		if (handle) {
792 			ht_down = u32_lookup_ht(ht->tp_c, handle);
793 
794 			if (!ht_down) {
795 				NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
796 				return -EINVAL;
797 			}
798 			ht_down->refcnt++;
799 		}
800 
801 		ht_old = rtnl_dereference(n->ht_down);
802 		rcu_assign_pointer(n->ht_down, ht_down);
803 
804 		if (ht_old)
805 			ht_old->refcnt--;
806 	}
807 	if (tb[TCA_U32_CLASSID]) {
808 		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
809 		tcf_bind_filter(tp, &n->res, base);
810 	}
811 
812 #ifdef CONFIG_NET_CLS_IND
813 	if (tb[TCA_U32_INDEV]) {
814 		int ret;
815 		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
816 		if (ret < 0)
817 			return -EINVAL;
818 		n->ifindex = ret;
819 	}
820 #endif
821 	return 0;
822 }
823 
824 static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
825 			      struct tc_u_knode *n)
826 {
827 	struct tc_u_knode __rcu **ins;
828 	struct tc_u_knode *pins;
829 	struct tc_u_hnode *ht;
830 
831 	if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
832 		ht = rtnl_dereference(tp->root);
833 	else
834 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
835 
836 	ins = &ht->ht[TC_U32_HASH(n->handle)];
837 
838 	/* The node must always exist for it to be replaced if this is not the
839 	 * case then something went very wrong elsewhere.
840 	 */
841 	for (pins = rtnl_dereference(*ins); ;
842 	     ins = &pins->next, pins = rtnl_dereference(*ins))
843 		if (pins->handle == n->handle)
844 			break;
845 
846 	idr_replace(&ht->handle_idr, n, n->handle);
847 	RCU_INIT_POINTER(n->next, pins->next);
848 	rcu_assign_pointer(*ins, n);
849 }
850 
851 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
852 					 struct tc_u_knode *n)
853 {
854 	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
855 	struct tc_u32_sel *s = &n->sel;
856 	struct tc_u_knode *new;
857 
858 	new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
859 		      GFP_KERNEL);
860 
861 	if (!new)
862 		return NULL;
863 
864 	RCU_INIT_POINTER(new->next, n->next);
865 	new->handle = n->handle;
866 	RCU_INIT_POINTER(new->ht_up, n->ht_up);
867 
868 #ifdef CONFIG_NET_CLS_IND
869 	new->ifindex = n->ifindex;
870 #endif
871 	new->fshift = n->fshift;
872 	new->res = n->res;
873 	new->flags = n->flags;
874 	RCU_INIT_POINTER(new->ht_down, ht);
875 
876 	/* bump reference count as long as we hold pointer to structure */
877 	if (ht)
878 		ht->refcnt++;
879 
880 #ifdef CONFIG_CLS_U32_PERF
881 	/* Statistics may be incremented by readers during update
882 	 * so we must keep them in tact. When the node is later destroyed
883 	 * a special destroy call must be made to not free the pf memory.
884 	 */
885 	new->pf = n->pf;
886 #endif
887 
888 #ifdef CONFIG_CLS_U32_MARK
889 	new->val = n->val;
890 	new->mask = n->mask;
891 	/* Similarly success statistics must be moved as pointers */
892 	new->pcpu_success = n->pcpu_success;
893 #endif
894 	new->tp = tp;
895 	memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
896 
897 	if (tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE)) {
898 		kfree(new);
899 		return NULL;
900 	}
901 
902 	return new;
903 }
904 
905 static int u32_change(struct net *net, struct sk_buff *in_skb,
906 		      struct tcf_proto *tp, unsigned long base, u32 handle,
907 		      struct nlattr **tca, void **arg, bool ovr,
908 		      struct netlink_ext_ack *extack)
909 {
910 	struct tc_u_common *tp_c = tp->data;
911 	struct tc_u_hnode *ht;
912 	struct tc_u_knode *n;
913 	struct tc_u32_sel *s;
914 	struct nlattr *opt = tca[TCA_OPTIONS];
915 	struct nlattr *tb[TCA_U32_MAX + 1];
916 	u32 htid, flags = 0;
917 	size_t sel_size;
918 	int err;
919 #ifdef CONFIG_CLS_U32_PERF
920 	size_t size;
921 #endif
922 
923 	if (!opt) {
924 		if (handle) {
925 			NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
926 			return -EINVAL;
927 		} else {
928 			return 0;
929 		}
930 	}
931 
932 	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy, extack);
933 	if (err < 0)
934 		return err;
935 
936 	if (tb[TCA_U32_FLAGS]) {
937 		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
938 		if (!tc_flags_valid(flags)) {
939 			NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
940 			return -EINVAL;
941 		}
942 	}
943 
944 	n = *arg;
945 	if (n) {
946 		struct tc_u_knode *new;
947 
948 		if (TC_U32_KEY(n->handle) == 0) {
949 			NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
950 			return -EINVAL;
951 		}
952 
953 		if ((n->flags ^ flags) &
954 		    ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
955 			NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
956 			return -EINVAL;
957 		}
958 
959 		new = u32_init_knode(tp, n);
960 		if (!new)
961 			return -ENOMEM;
962 
963 		err = u32_set_parms(net, tp, base,
964 				    rtnl_dereference(n->ht_up), new, tb,
965 				    tca[TCA_RATE], ovr, extack);
966 
967 		if (err) {
968 			u32_destroy_key(tp, new, false);
969 			return err;
970 		}
971 
972 		err = u32_replace_hw_knode(tp, new, flags, extack);
973 		if (err) {
974 			u32_destroy_key(tp, new, false);
975 			return err;
976 		}
977 
978 		if (!tc_in_hw(new->flags))
979 			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
980 
981 		u32_replace_knode(tp, tp_c, new);
982 		tcf_unbind_filter(tp, &n->res);
983 		tcf_exts_get_net(&n->exts);
984 		tcf_queue_work(&n->rwork, u32_delete_key_work);
985 		return 0;
986 	}
987 
988 	if (tb[TCA_U32_DIVISOR]) {
989 		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
990 
991 		if (--divisor > 0x100) {
992 			NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
993 			return -EINVAL;
994 		}
995 		if (TC_U32_KEY(handle)) {
996 			NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
997 			return -EINVAL;
998 		}
999 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
1000 		if (ht == NULL)
1001 			return -ENOBUFS;
1002 		if (handle == 0) {
1003 			handle = gen_new_htid(tp->data, ht);
1004 			if (handle == 0) {
1005 				kfree(ht);
1006 				return -ENOMEM;
1007 			}
1008 		} else {
1009 			err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
1010 					    handle, GFP_KERNEL);
1011 			if (err) {
1012 				kfree(ht);
1013 				return err;
1014 			}
1015 		}
1016 		ht->tp_c = tp_c;
1017 		ht->refcnt = 1;
1018 		ht->divisor = divisor;
1019 		ht->handle = handle;
1020 		ht->prio = tp->prio;
1021 		idr_init(&ht->handle_idr);
1022 		ht->flags = flags;
1023 
1024 		err = u32_replace_hw_hnode(tp, ht, flags, extack);
1025 		if (err) {
1026 			idr_remove(&tp_c->handle_idr, handle);
1027 			kfree(ht);
1028 			return err;
1029 		}
1030 
1031 		RCU_INIT_POINTER(ht->next, tp_c->hlist);
1032 		rcu_assign_pointer(tp_c->hlist, ht);
1033 		*arg = ht;
1034 
1035 		return 0;
1036 	}
1037 
1038 	if (tb[TCA_U32_HASH]) {
1039 		htid = nla_get_u32(tb[TCA_U32_HASH]);
1040 		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
1041 			ht = rtnl_dereference(tp->root);
1042 			htid = ht->handle;
1043 		} else {
1044 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
1045 			if (!ht) {
1046 				NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
1047 				return -EINVAL;
1048 			}
1049 		}
1050 	} else {
1051 		ht = rtnl_dereference(tp->root);
1052 		htid = ht->handle;
1053 	}
1054 
1055 	if (ht->divisor < TC_U32_HASH(htid)) {
1056 		NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
1057 		return -EINVAL;
1058 	}
1059 
1060 	if (handle) {
1061 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1062 			NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1063 			return -EINVAL;
1064 		}
1065 		handle = htid | TC_U32_NODE(handle);
1066 		err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1067 				    GFP_KERNEL);
1068 		if (err)
1069 			return err;
1070 	} else
1071 		handle = gen_new_kid(ht, htid);
1072 
1073 	if (tb[TCA_U32_SEL] == NULL) {
1074 		NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1075 		err = -EINVAL;
1076 		goto erridr;
1077 	}
1078 
1079 	s = nla_data(tb[TCA_U32_SEL]);
1080 	sel_size = struct_size(s, keys, s->nkeys);
1081 	if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1082 		err = -EINVAL;
1083 		goto erridr;
1084 	}
1085 
1086 	n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1087 	if (n == NULL) {
1088 		err = -ENOBUFS;
1089 		goto erridr;
1090 	}
1091 
1092 #ifdef CONFIG_CLS_U32_PERF
1093 	size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1094 	n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1095 	if (!n->pf) {
1096 		err = -ENOBUFS;
1097 		goto errfree;
1098 	}
1099 #endif
1100 
1101 	memcpy(&n->sel, s, sel_size);
1102 	RCU_INIT_POINTER(n->ht_up, ht);
1103 	n->handle = handle;
1104 	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1105 	n->flags = flags;
1106 	n->tp = tp;
1107 
1108 	err = tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
1109 	if (err < 0)
1110 		goto errout;
1111 
1112 #ifdef CONFIG_CLS_U32_MARK
1113 	n->pcpu_success = alloc_percpu(u32);
1114 	if (!n->pcpu_success) {
1115 		err = -ENOMEM;
1116 		goto errout;
1117 	}
1118 
1119 	if (tb[TCA_U32_MARK]) {
1120 		struct tc_u32_mark *mark;
1121 
1122 		mark = nla_data(tb[TCA_U32_MARK]);
1123 		n->val = mark->val;
1124 		n->mask = mark->mask;
1125 	}
1126 #endif
1127 
1128 	err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr,
1129 			    extack);
1130 	if (err == 0) {
1131 		struct tc_u_knode __rcu **ins;
1132 		struct tc_u_knode *pins;
1133 
1134 		err = u32_replace_hw_knode(tp, n, flags, extack);
1135 		if (err)
1136 			goto errhw;
1137 
1138 		if (!tc_in_hw(n->flags))
1139 			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1140 
1141 		ins = &ht->ht[TC_U32_HASH(handle)];
1142 		for (pins = rtnl_dereference(*ins); pins;
1143 		     ins = &pins->next, pins = rtnl_dereference(*ins))
1144 			if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1145 				break;
1146 
1147 		RCU_INIT_POINTER(n->next, pins);
1148 		rcu_assign_pointer(*ins, n);
1149 		*arg = n;
1150 		return 0;
1151 	}
1152 
1153 errhw:
1154 #ifdef CONFIG_CLS_U32_MARK
1155 	free_percpu(n->pcpu_success);
1156 #endif
1157 
1158 errout:
1159 	tcf_exts_destroy(&n->exts);
1160 #ifdef CONFIG_CLS_U32_PERF
1161 errfree:
1162 	free_percpu(n->pf);
1163 #endif
1164 	kfree(n);
1165 erridr:
1166 	idr_remove(&ht->handle_idr, handle);
1167 	return err;
1168 }
1169 
1170 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1171 {
1172 	struct tc_u_common *tp_c = tp->data;
1173 	struct tc_u_hnode *ht;
1174 	struct tc_u_knode *n;
1175 	unsigned int h;
1176 
1177 	if (arg->stop)
1178 		return;
1179 
1180 	for (ht = rtnl_dereference(tp_c->hlist);
1181 	     ht;
1182 	     ht = rtnl_dereference(ht->next)) {
1183 		if (ht->prio != tp->prio)
1184 			continue;
1185 		if (arg->count >= arg->skip) {
1186 			if (arg->fn(tp, ht, arg) < 0) {
1187 				arg->stop = 1;
1188 				return;
1189 			}
1190 		}
1191 		arg->count++;
1192 		for (h = 0; h <= ht->divisor; h++) {
1193 			for (n = rtnl_dereference(ht->ht[h]);
1194 			     n;
1195 			     n = rtnl_dereference(n->next)) {
1196 				if (arg->count < arg->skip) {
1197 					arg->count++;
1198 					continue;
1199 				}
1200 				if (arg->fn(tp, n, arg) < 0) {
1201 					arg->stop = 1;
1202 					return;
1203 				}
1204 				arg->count++;
1205 			}
1206 		}
1207 	}
1208 }
1209 
1210 static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1211 			       bool add, tc_setup_cb_t *cb, void *cb_priv,
1212 			       struct netlink_ext_ack *extack)
1213 {
1214 	struct tc_cls_u32_offload cls_u32 = {};
1215 	int err;
1216 
1217 	tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1218 	cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1219 	cls_u32.hnode.divisor = ht->divisor;
1220 	cls_u32.hnode.handle = ht->handle;
1221 	cls_u32.hnode.prio = ht->prio;
1222 
1223 	err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1224 	if (err && add && tc_skip_sw(ht->flags))
1225 		return err;
1226 
1227 	return 0;
1228 }
1229 
1230 static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1231 			       bool add, tc_setup_cb_t *cb, void *cb_priv,
1232 			       struct netlink_ext_ack *extack)
1233 {
1234 	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1235 	struct tcf_block *block = tp->chain->block;
1236 	struct tc_cls_u32_offload cls_u32 = {};
1237 	int err;
1238 
1239 	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1240 	cls_u32.command = add ?
1241 		TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1242 	cls_u32.knode.handle = n->handle;
1243 
1244 	if (add) {
1245 		cls_u32.knode.fshift = n->fshift;
1246 #ifdef CONFIG_CLS_U32_MARK
1247 		cls_u32.knode.val = n->val;
1248 		cls_u32.knode.mask = n->mask;
1249 #else
1250 		cls_u32.knode.val = 0;
1251 		cls_u32.knode.mask = 0;
1252 #endif
1253 		cls_u32.knode.sel = &n->sel;
1254 		cls_u32.knode.exts = &n->exts;
1255 		if (n->ht_down)
1256 			cls_u32.knode.link_handle = ht->handle;
1257 	}
1258 
1259 	err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1260 	if (err) {
1261 		if (add && tc_skip_sw(n->flags))
1262 			return err;
1263 		return 0;
1264 	}
1265 
1266 	tc_cls_offload_cnt_update(block, &n->in_hw_count, &n->flags, add);
1267 
1268 	return 0;
1269 }
1270 
1271 static int u32_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1272 			 void *cb_priv, struct netlink_ext_ack *extack)
1273 {
1274 	struct tc_u_common *tp_c = tp->data;
1275 	struct tc_u_hnode *ht;
1276 	struct tc_u_knode *n;
1277 	unsigned int h;
1278 	int err;
1279 
1280 	for (ht = rtnl_dereference(tp_c->hlist);
1281 	     ht;
1282 	     ht = rtnl_dereference(ht->next)) {
1283 		if (ht->prio != tp->prio)
1284 			continue;
1285 
1286 		/* When adding filters to a new dev, try to offload the
1287 		 * hashtable first. When removing, do the filters before the
1288 		 * hashtable.
1289 		 */
1290 		if (add && !tc_skip_hw(ht->flags)) {
1291 			err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1292 						  extack);
1293 			if (err)
1294 				return err;
1295 		}
1296 
1297 		for (h = 0; h <= ht->divisor; h++) {
1298 			for (n = rtnl_dereference(ht->ht[h]);
1299 			     n;
1300 			     n = rtnl_dereference(n->next)) {
1301 				if (tc_skip_hw(n->flags))
1302 					continue;
1303 
1304 				err = u32_reoffload_knode(tp, n, add, cb,
1305 							  cb_priv, extack);
1306 				if (err)
1307 					return err;
1308 			}
1309 		}
1310 
1311 		if (!add && !tc_skip_hw(ht->flags))
1312 			u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
1319 {
1320 	struct tc_u_knode *n = fh;
1321 
1322 	if (n && n->res.classid == classid)
1323 		n->res.class = cl;
1324 }
1325 
1326 static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1327 		    struct sk_buff *skb, struct tcmsg *t)
1328 {
1329 	struct tc_u_knode *n = fh;
1330 	struct tc_u_hnode *ht_up, *ht_down;
1331 	struct nlattr *nest;
1332 
1333 	if (n == NULL)
1334 		return skb->len;
1335 
1336 	t->tcm_handle = n->handle;
1337 
1338 	nest = nla_nest_start(skb, TCA_OPTIONS);
1339 	if (nest == NULL)
1340 		goto nla_put_failure;
1341 
1342 	if (TC_U32_KEY(n->handle) == 0) {
1343 		struct tc_u_hnode *ht = fh;
1344 		u32 divisor = ht->divisor + 1;
1345 
1346 		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1347 			goto nla_put_failure;
1348 	} else {
1349 #ifdef CONFIG_CLS_U32_PERF
1350 		struct tc_u32_pcnt *gpf;
1351 		int cpu;
1352 #endif
1353 
1354 		if (nla_put(skb, TCA_U32_SEL,
1355 			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1356 			    &n->sel))
1357 			goto nla_put_failure;
1358 
1359 		ht_up = rtnl_dereference(n->ht_up);
1360 		if (ht_up) {
1361 			u32 htid = n->handle & 0xFFFFF000;
1362 			if (nla_put_u32(skb, TCA_U32_HASH, htid))
1363 				goto nla_put_failure;
1364 		}
1365 		if (n->res.classid &&
1366 		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1367 			goto nla_put_failure;
1368 
1369 		ht_down = rtnl_dereference(n->ht_down);
1370 		if (ht_down &&
1371 		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1372 			goto nla_put_failure;
1373 
1374 		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1375 			goto nla_put_failure;
1376 
1377 #ifdef CONFIG_CLS_U32_MARK
1378 		if ((n->val || n->mask)) {
1379 			struct tc_u32_mark mark = {.val = n->val,
1380 						   .mask = n->mask,
1381 						   .success = 0};
1382 			int cpum;
1383 
1384 			for_each_possible_cpu(cpum) {
1385 				__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1386 
1387 				mark.success += cnt;
1388 			}
1389 
1390 			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1391 				goto nla_put_failure;
1392 		}
1393 #endif
1394 
1395 		if (tcf_exts_dump(skb, &n->exts) < 0)
1396 			goto nla_put_failure;
1397 
1398 #ifdef CONFIG_NET_CLS_IND
1399 		if (n->ifindex) {
1400 			struct net_device *dev;
1401 			dev = __dev_get_by_index(net, n->ifindex);
1402 			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1403 				goto nla_put_failure;
1404 		}
1405 #endif
1406 #ifdef CONFIG_CLS_U32_PERF
1407 		gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1408 			      n->sel.nkeys * sizeof(u64),
1409 			      GFP_KERNEL);
1410 		if (!gpf)
1411 			goto nla_put_failure;
1412 
1413 		for_each_possible_cpu(cpu) {
1414 			int i;
1415 			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1416 
1417 			gpf->rcnt += pf->rcnt;
1418 			gpf->rhit += pf->rhit;
1419 			for (i = 0; i < n->sel.nkeys; i++)
1420 				gpf->kcnts[i] += pf->kcnts[i];
1421 		}
1422 
1423 		if (nla_put_64bit(skb, TCA_U32_PCNT,
1424 				  sizeof(struct tc_u32_pcnt) +
1425 				  n->sel.nkeys * sizeof(u64),
1426 				  gpf, TCA_U32_PAD)) {
1427 			kfree(gpf);
1428 			goto nla_put_failure;
1429 		}
1430 		kfree(gpf);
1431 #endif
1432 	}
1433 
1434 	nla_nest_end(skb, nest);
1435 
1436 	if (TC_U32_KEY(n->handle))
1437 		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1438 			goto nla_put_failure;
1439 	return skb->len;
1440 
1441 nla_put_failure:
1442 	nla_nest_cancel(skb, nest);
1443 	return -1;
1444 }
1445 
1446 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1447 	.kind		=	"u32",
1448 	.classify	=	u32_classify,
1449 	.init		=	u32_init,
1450 	.destroy	=	u32_destroy,
1451 	.get		=	u32_get,
1452 	.change		=	u32_change,
1453 	.delete		=	u32_delete,
1454 	.walk		=	u32_walk,
1455 	.reoffload	=	u32_reoffload,
1456 	.dump		=	u32_dump,
1457 	.bind_class	=	u32_bind_class,
1458 	.owner		=	THIS_MODULE,
1459 };
1460 
1461 static int __init init_u32(void)
1462 {
1463 	int i, ret;
1464 
1465 	pr_info("u32 classifier\n");
1466 #ifdef CONFIG_CLS_U32_PERF
1467 	pr_info("    Performance counters on\n");
1468 #endif
1469 #ifdef CONFIG_NET_CLS_IND
1470 	pr_info("    input device check on\n");
1471 #endif
1472 #ifdef CONFIG_NET_CLS_ACT
1473 	pr_info("    Actions configured\n");
1474 #endif
1475 	tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1476 					  sizeof(struct hlist_head),
1477 					  GFP_KERNEL);
1478 	if (!tc_u_common_hash)
1479 		return -ENOMEM;
1480 
1481 	for (i = 0; i < U32_HASH_SIZE; i++)
1482 		INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1483 
1484 	ret = register_tcf_proto_ops(&cls_u32_ops);
1485 	if (ret)
1486 		kvfree(tc_u_common_hash);
1487 	return ret;
1488 }
1489 
1490 static void __exit exit_u32(void)
1491 {
1492 	unregister_tcf_proto_ops(&cls_u32_ops);
1493 	kvfree(tc_u_common_hash);
1494 }
1495 
1496 module_init(init_u32)
1497 module_exit(exit_u32)
1498 MODULE_LICENSE("GPL");
1499