xref: /openbmc/linux/net/sched/cls_api.c (revision 5927145e)
1 /*
2  * net/sched/cls_api.c	Packet classifier API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
33 
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
36 
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
39 
40 /* Find classifier type by string name */
41 
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
43 {
44 	const struct tcf_proto_ops *t, *res = NULL;
45 
46 	if (kind) {
47 		read_lock(&cls_mod_lock);
48 		list_for_each_entry(t, &tcf_proto_base, head) {
49 			if (strcmp(kind, t->kind) == 0) {
50 				if (try_module_get(t->owner))
51 					res = t;
52 				break;
53 			}
54 		}
55 		read_unlock(&cls_mod_lock);
56 	}
57 	return res;
58 }
59 
60 /* Register(unregister) new classifier type */
61 
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63 {
64 	struct tcf_proto_ops *t;
65 	int rc = -EEXIST;
66 
67 	write_lock(&cls_mod_lock);
68 	list_for_each_entry(t, &tcf_proto_base, head)
69 		if (!strcmp(ops->kind, t->kind))
70 			goto out;
71 
72 	list_add_tail(&ops->head, &tcf_proto_base);
73 	rc = 0;
74 out:
75 	write_unlock(&cls_mod_lock);
76 	return rc;
77 }
78 EXPORT_SYMBOL(register_tcf_proto_ops);
79 
80 static struct workqueue_struct *tc_filter_wq;
81 
82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
83 {
84 	struct tcf_proto_ops *t;
85 	int rc = -ENOENT;
86 
87 	/* Wait for outstanding call_rcu()s, if any, from a
88 	 * tcf_proto_ops's destroy() handler.
89 	 */
90 	rcu_barrier();
91 	flush_workqueue(tc_filter_wq);
92 
93 	write_lock(&cls_mod_lock);
94 	list_for_each_entry(t, &tcf_proto_base, head) {
95 		if (t == ops) {
96 			list_del(&t->head);
97 			rc = 0;
98 			break;
99 		}
100 	}
101 	write_unlock(&cls_mod_lock);
102 	return rc;
103 }
104 EXPORT_SYMBOL(unregister_tcf_proto_ops);
105 
106 bool tcf_queue_work(struct work_struct *work)
107 {
108 	return queue_work(tc_filter_wq, work);
109 }
110 EXPORT_SYMBOL(tcf_queue_work);
111 
112 /* Select new prio value from the range, managed by kernel. */
113 
114 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
115 {
116 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
117 
118 	if (tp)
119 		first = tp->prio - 1;
120 
121 	return TC_H_MAJ(first);
122 }
123 
124 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
125 					  u32 prio, struct tcf_chain *chain,
126 					  struct netlink_ext_ack *extack)
127 {
128 	struct tcf_proto *tp;
129 	int err;
130 
131 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
132 	if (!tp)
133 		return ERR_PTR(-ENOBUFS);
134 
135 	err = -ENOENT;
136 	tp->ops = tcf_proto_lookup_ops(kind);
137 	if (!tp->ops) {
138 #ifdef CONFIG_MODULES
139 		rtnl_unlock();
140 		request_module("cls_%s", kind);
141 		rtnl_lock();
142 		tp->ops = tcf_proto_lookup_ops(kind);
143 		/* We dropped the RTNL semaphore in order to perform
144 		 * the module load. So, even if we succeeded in loading
145 		 * the module we have to replay the request. We indicate
146 		 * this using -EAGAIN.
147 		 */
148 		if (tp->ops) {
149 			module_put(tp->ops->owner);
150 			err = -EAGAIN;
151 		} else {
152 			NL_SET_ERR_MSG(extack, "TC classifier not found");
153 			err = -ENOENT;
154 		}
155 		goto errout;
156 #endif
157 	}
158 	tp->classify = tp->ops->classify;
159 	tp->protocol = protocol;
160 	tp->prio = prio;
161 	tp->chain = chain;
162 
163 	err = tp->ops->init(tp);
164 	if (err) {
165 		module_put(tp->ops->owner);
166 		goto errout;
167 	}
168 	return tp;
169 
170 errout:
171 	kfree(tp);
172 	return ERR_PTR(err);
173 }
174 
175 static void tcf_proto_destroy(struct tcf_proto *tp,
176 			      struct netlink_ext_ack *extack)
177 {
178 	tp->ops->destroy(tp, extack);
179 	module_put(tp->ops->owner);
180 	kfree_rcu(tp, rcu);
181 }
182 
183 struct tcf_filter_chain_list_item {
184 	struct list_head list;
185 	tcf_chain_head_change_t *chain_head_change;
186 	void *chain_head_change_priv;
187 };
188 
189 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
190 					  u32 chain_index)
191 {
192 	struct tcf_chain *chain;
193 
194 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
195 	if (!chain)
196 		return NULL;
197 	INIT_LIST_HEAD(&chain->filter_chain_list);
198 	list_add_tail(&chain->list, &block->chain_list);
199 	chain->block = block;
200 	chain->index = chain_index;
201 	chain->refcnt = 1;
202 	return chain;
203 }
204 
205 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
206 				       struct tcf_proto *tp_head)
207 {
208 	if (item->chain_head_change)
209 		item->chain_head_change(tp_head, item->chain_head_change_priv);
210 }
211 static void tcf_chain_head_change(struct tcf_chain *chain,
212 				  struct tcf_proto *tp_head)
213 {
214 	struct tcf_filter_chain_list_item *item;
215 
216 	list_for_each_entry(item, &chain->filter_chain_list, list)
217 		tcf_chain_head_change_item(item, tp_head);
218 }
219 
220 static void tcf_chain_flush(struct tcf_chain *chain)
221 {
222 	struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
223 
224 	tcf_chain_head_change(chain, NULL);
225 	while (tp) {
226 		RCU_INIT_POINTER(chain->filter_chain, tp->next);
227 		tcf_proto_destroy(tp, NULL);
228 		tp = rtnl_dereference(chain->filter_chain);
229 		tcf_chain_put(chain);
230 	}
231 }
232 
233 static void tcf_chain_destroy(struct tcf_chain *chain)
234 {
235 	struct tcf_block *block = chain->block;
236 
237 	list_del(&chain->list);
238 	kfree(chain);
239 	if (list_empty(&block->chain_list))
240 		kfree(block);
241 }
242 
243 static void tcf_chain_hold(struct tcf_chain *chain)
244 {
245 	++chain->refcnt;
246 }
247 
248 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
249 				bool create)
250 {
251 	struct tcf_chain *chain;
252 
253 	list_for_each_entry(chain, &block->chain_list, list) {
254 		if (chain->index == chain_index) {
255 			tcf_chain_hold(chain);
256 			return chain;
257 		}
258 	}
259 
260 	return create ? tcf_chain_create(block, chain_index) : NULL;
261 }
262 EXPORT_SYMBOL(tcf_chain_get);
263 
264 void tcf_chain_put(struct tcf_chain *chain)
265 {
266 	if (--chain->refcnt == 0)
267 		tcf_chain_destroy(chain);
268 }
269 EXPORT_SYMBOL(tcf_chain_put);
270 
271 static bool tcf_block_offload_in_use(struct tcf_block *block)
272 {
273 	return block->offloadcnt;
274 }
275 
276 static int tcf_block_offload_cmd(struct tcf_block *block,
277 				 struct net_device *dev,
278 				 struct tcf_block_ext_info *ei,
279 				 enum tc_block_command command)
280 {
281 	struct tc_block_offload bo = {};
282 
283 	bo.command = command;
284 	bo.binder_type = ei->binder_type;
285 	bo.block = block;
286 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
287 }
288 
289 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
290 				  struct tcf_block_ext_info *ei)
291 {
292 	struct net_device *dev = q->dev_queue->dev;
293 	int err;
294 
295 	if (!dev->netdev_ops->ndo_setup_tc)
296 		goto no_offload_dev_inc;
297 
298 	/* If tc offload feature is disabled and the block we try to bind
299 	 * to already has some offloaded filters, forbid to bind.
300 	 */
301 	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
302 		return -EOPNOTSUPP;
303 
304 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
305 	if (err == -EOPNOTSUPP)
306 		goto no_offload_dev_inc;
307 	return err;
308 
309 no_offload_dev_inc:
310 	if (tcf_block_offload_in_use(block))
311 		return -EOPNOTSUPP;
312 	block->nooffloaddevcnt++;
313 	return 0;
314 }
315 
316 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
317 				     struct tcf_block_ext_info *ei)
318 {
319 	struct net_device *dev = q->dev_queue->dev;
320 	int err;
321 
322 	if (!dev->netdev_ops->ndo_setup_tc)
323 		goto no_offload_dev_dec;
324 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
325 	if (err == -EOPNOTSUPP)
326 		goto no_offload_dev_dec;
327 	return;
328 
329 no_offload_dev_dec:
330 	WARN_ON(block->nooffloaddevcnt-- == 0);
331 }
332 
333 static int
334 tcf_chain_head_change_cb_add(struct tcf_chain *chain,
335 			     struct tcf_block_ext_info *ei,
336 			     struct netlink_ext_ack *extack)
337 {
338 	struct tcf_filter_chain_list_item *item;
339 
340 	item = kmalloc(sizeof(*item), GFP_KERNEL);
341 	if (!item) {
342 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
343 		return -ENOMEM;
344 	}
345 	item->chain_head_change = ei->chain_head_change;
346 	item->chain_head_change_priv = ei->chain_head_change_priv;
347 	if (chain->filter_chain)
348 		tcf_chain_head_change_item(item, chain->filter_chain);
349 	list_add(&item->list, &chain->filter_chain_list);
350 	return 0;
351 }
352 
353 static void
354 tcf_chain_head_change_cb_del(struct tcf_chain *chain,
355 			     struct tcf_block_ext_info *ei)
356 {
357 	struct tcf_filter_chain_list_item *item;
358 
359 	list_for_each_entry(item, &chain->filter_chain_list, list) {
360 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
361 		    (item->chain_head_change == ei->chain_head_change &&
362 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
363 			tcf_chain_head_change_item(item, NULL);
364 			list_del(&item->list);
365 			kfree(item);
366 			return;
367 		}
368 	}
369 	WARN_ON(1);
370 }
371 
372 struct tcf_net {
373 	struct idr idr;
374 };
375 
376 static unsigned int tcf_net_id;
377 
378 static int tcf_block_insert(struct tcf_block *block, struct net *net,
379 			    u32 block_index, struct netlink_ext_ack *extack)
380 {
381 	struct tcf_net *tn = net_generic(net, tcf_net_id);
382 	int err;
383 
384 	err = idr_alloc_u32(&tn->idr, block, &block_index, block_index,
385 			    GFP_KERNEL);
386 	if (err)
387 		return err;
388 	block->index = block_index;
389 	return 0;
390 }
391 
392 static void tcf_block_remove(struct tcf_block *block, struct net *net)
393 {
394 	struct tcf_net *tn = net_generic(net, tcf_net_id);
395 
396 	idr_remove(&tn->idr, block->index);
397 }
398 
399 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
400 					  struct netlink_ext_ack *extack)
401 {
402 	struct tcf_block *block;
403 	struct tcf_chain *chain;
404 	int err;
405 
406 	block = kzalloc(sizeof(*block), GFP_KERNEL);
407 	if (!block) {
408 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
409 		return ERR_PTR(-ENOMEM);
410 	}
411 	INIT_LIST_HEAD(&block->chain_list);
412 	INIT_LIST_HEAD(&block->cb_list);
413 	INIT_LIST_HEAD(&block->owner_list);
414 
415 	/* Create chain 0 by default, it has to be always present. */
416 	chain = tcf_chain_create(block, 0);
417 	if (!chain) {
418 		NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
419 		err = -ENOMEM;
420 		goto err_chain_create;
421 	}
422 	block->net = qdisc_net(q);
423 	block->refcnt = 1;
424 	block->net = net;
425 	block->q = q;
426 	return block;
427 
428 err_chain_create:
429 	kfree(block);
430 	return ERR_PTR(err);
431 }
432 
433 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
434 {
435 	struct tcf_net *tn = net_generic(net, tcf_net_id);
436 
437 	return idr_find(&tn->idr, block_index);
438 }
439 
440 static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
441 {
442 	return list_first_entry(&block->chain_list, struct tcf_chain, list);
443 }
444 
445 struct tcf_block_owner_item {
446 	struct list_head list;
447 	struct Qdisc *q;
448 	enum tcf_block_binder_type binder_type;
449 };
450 
451 static void
452 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
453 			       struct Qdisc *q,
454 			       enum tcf_block_binder_type binder_type)
455 {
456 	if (block->keep_dst &&
457 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
458 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
459 		netif_keep_dst(qdisc_dev(q));
460 }
461 
462 void tcf_block_netif_keep_dst(struct tcf_block *block)
463 {
464 	struct tcf_block_owner_item *item;
465 
466 	block->keep_dst = true;
467 	list_for_each_entry(item, &block->owner_list, list)
468 		tcf_block_owner_netif_keep_dst(block, item->q,
469 					       item->binder_type);
470 }
471 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
472 
473 static int tcf_block_owner_add(struct tcf_block *block,
474 			       struct Qdisc *q,
475 			       enum tcf_block_binder_type binder_type)
476 {
477 	struct tcf_block_owner_item *item;
478 
479 	item = kmalloc(sizeof(*item), GFP_KERNEL);
480 	if (!item)
481 		return -ENOMEM;
482 	item->q = q;
483 	item->binder_type = binder_type;
484 	list_add(&item->list, &block->owner_list);
485 	return 0;
486 }
487 
488 static void tcf_block_owner_del(struct tcf_block *block,
489 				struct Qdisc *q,
490 				enum tcf_block_binder_type binder_type)
491 {
492 	struct tcf_block_owner_item *item;
493 
494 	list_for_each_entry(item, &block->owner_list, list) {
495 		if (item->q == q && item->binder_type == binder_type) {
496 			list_del(&item->list);
497 			kfree(item);
498 			return;
499 		}
500 	}
501 	WARN_ON(1);
502 }
503 
504 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
505 		      struct tcf_block_ext_info *ei,
506 		      struct netlink_ext_ack *extack)
507 {
508 	struct net *net = qdisc_net(q);
509 	struct tcf_block *block = NULL;
510 	bool created = false;
511 	int err;
512 
513 	if (ei->block_index) {
514 		/* block_index not 0 means the shared block is requested */
515 		block = tcf_block_lookup(net, ei->block_index);
516 		if (block)
517 			block->refcnt++;
518 	}
519 
520 	if (!block) {
521 		block = tcf_block_create(net, q, extack);
522 		if (IS_ERR(block))
523 			return PTR_ERR(block);
524 		created = true;
525 		if (ei->block_index) {
526 			err = tcf_block_insert(block, net,
527 					       ei->block_index, extack);
528 			if (err)
529 				goto err_block_insert;
530 		}
531 	}
532 
533 	err = tcf_block_owner_add(block, q, ei->binder_type);
534 	if (err)
535 		goto err_block_owner_add;
536 
537 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
538 
539 	err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
540 					   ei, extack);
541 	if (err)
542 		goto err_chain_head_change_cb_add;
543 
544 	err = tcf_block_offload_bind(block, q, ei);
545 	if (err)
546 		goto err_block_offload_bind;
547 
548 	*p_block = block;
549 	return 0;
550 
551 err_block_offload_bind:
552 	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
553 err_chain_head_change_cb_add:
554 	tcf_block_owner_del(block, q, ei->binder_type);
555 err_block_owner_add:
556 	if (created) {
557 		if (tcf_block_shared(block))
558 			tcf_block_remove(block, net);
559 err_block_insert:
560 		kfree(tcf_block_chain_zero(block));
561 		kfree(block);
562 	} else {
563 		block->refcnt--;
564 	}
565 	return err;
566 }
567 EXPORT_SYMBOL(tcf_block_get_ext);
568 
569 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
570 {
571 	struct tcf_proto __rcu **p_filter_chain = priv;
572 
573 	rcu_assign_pointer(*p_filter_chain, tp_head);
574 }
575 
576 int tcf_block_get(struct tcf_block **p_block,
577 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
578 		  struct netlink_ext_ack *extack)
579 {
580 	struct tcf_block_ext_info ei = {
581 		.chain_head_change = tcf_chain_head_change_dflt,
582 		.chain_head_change_priv = p_filter_chain,
583 	};
584 
585 	WARN_ON(!p_filter_chain);
586 	return tcf_block_get_ext(p_block, q, &ei, extack);
587 }
588 EXPORT_SYMBOL(tcf_block_get);
589 
590 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
591  * actions should be all removed after flushing.
592  */
593 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
594 		       struct tcf_block_ext_info *ei)
595 {
596 	struct tcf_chain *chain, *tmp;
597 
598 	if (!block)
599 		return;
600 	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
601 	tcf_block_owner_del(block, q, ei->binder_type);
602 
603 	if (--block->refcnt == 0) {
604 		if (tcf_block_shared(block))
605 			tcf_block_remove(block, block->net);
606 
607 		/* Hold a refcnt for all chains, so that they don't disappear
608 		 * while we are iterating.
609 		 */
610 		list_for_each_entry(chain, &block->chain_list, list)
611 			tcf_chain_hold(chain);
612 
613 		list_for_each_entry(chain, &block->chain_list, list)
614 			tcf_chain_flush(chain);
615 	}
616 
617 	tcf_block_offload_unbind(block, q, ei);
618 
619 	if (block->refcnt == 0) {
620 		/* At this point, all the chains should have refcnt >= 1. */
621 		list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
622 			tcf_chain_put(chain);
623 
624 		/* Finally, put chain 0 and allow block to be freed. */
625 		tcf_chain_put(tcf_block_chain_zero(block));
626 	}
627 }
628 EXPORT_SYMBOL(tcf_block_put_ext);
629 
630 void tcf_block_put(struct tcf_block *block)
631 {
632 	struct tcf_block_ext_info ei = {0, };
633 
634 	if (!block)
635 		return;
636 	tcf_block_put_ext(block, block->q, &ei);
637 }
638 
639 EXPORT_SYMBOL(tcf_block_put);
640 
641 struct tcf_block_cb {
642 	struct list_head list;
643 	tc_setup_cb_t *cb;
644 	void *cb_ident;
645 	void *cb_priv;
646 	unsigned int refcnt;
647 };
648 
649 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
650 {
651 	return block_cb->cb_priv;
652 }
653 EXPORT_SYMBOL(tcf_block_cb_priv);
654 
655 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
656 					 tc_setup_cb_t *cb, void *cb_ident)
657 {	struct tcf_block_cb *block_cb;
658 
659 	list_for_each_entry(block_cb, &block->cb_list, list)
660 		if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
661 			return block_cb;
662 	return NULL;
663 }
664 EXPORT_SYMBOL(tcf_block_cb_lookup);
665 
666 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
667 {
668 	block_cb->refcnt++;
669 }
670 EXPORT_SYMBOL(tcf_block_cb_incref);
671 
672 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
673 {
674 	return --block_cb->refcnt;
675 }
676 EXPORT_SYMBOL(tcf_block_cb_decref);
677 
678 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
679 					     tc_setup_cb_t *cb, void *cb_ident,
680 					     void *cb_priv)
681 {
682 	struct tcf_block_cb *block_cb;
683 
684 	/* At this point, playback of previous block cb calls is not supported,
685 	 * so forbid to register to block which already has some offloaded
686 	 * filters present.
687 	 */
688 	if (tcf_block_offload_in_use(block))
689 		return ERR_PTR(-EOPNOTSUPP);
690 
691 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
692 	if (!block_cb)
693 		return ERR_PTR(-ENOMEM);
694 	block_cb->cb = cb;
695 	block_cb->cb_ident = cb_ident;
696 	block_cb->cb_priv = cb_priv;
697 	list_add(&block_cb->list, &block->cb_list);
698 	return block_cb;
699 }
700 EXPORT_SYMBOL(__tcf_block_cb_register);
701 
702 int tcf_block_cb_register(struct tcf_block *block,
703 			  tc_setup_cb_t *cb, void *cb_ident,
704 			  void *cb_priv)
705 {
706 	struct tcf_block_cb *block_cb;
707 
708 	block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
709 	return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
710 }
711 EXPORT_SYMBOL(tcf_block_cb_register);
712 
713 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
714 {
715 	list_del(&block_cb->list);
716 	kfree(block_cb);
717 }
718 EXPORT_SYMBOL(__tcf_block_cb_unregister);
719 
720 void tcf_block_cb_unregister(struct tcf_block *block,
721 			     tc_setup_cb_t *cb, void *cb_ident)
722 {
723 	struct tcf_block_cb *block_cb;
724 
725 	block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
726 	if (!block_cb)
727 		return;
728 	__tcf_block_cb_unregister(block_cb);
729 }
730 EXPORT_SYMBOL(tcf_block_cb_unregister);
731 
732 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
733 			     void *type_data, bool err_stop)
734 {
735 	struct tcf_block_cb *block_cb;
736 	int ok_count = 0;
737 	int err;
738 
739 	/* Make sure all netdevs sharing this block are offload-capable. */
740 	if (block->nooffloaddevcnt && err_stop)
741 		return -EOPNOTSUPP;
742 
743 	list_for_each_entry(block_cb, &block->cb_list, list) {
744 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
745 		if (err) {
746 			if (err_stop)
747 				return err;
748 		} else {
749 			ok_count++;
750 		}
751 	}
752 	return ok_count;
753 }
754 
755 /* Main classifier routine: scans classifier chain attached
756  * to this qdisc, (optionally) tests for protocol and asks
757  * specific classifiers.
758  */
759 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
760 		 struct tcf_result *res, bool compat_mode)
761 {
762 	__be16 protocol = tc_skb_protocol(skb);
763 #ifdef CONFIG_NET_CLS_ACT
764 	const int max_reclassify_loop = 4;
765 	const struct tcf_proto *orig_tp = tp;
766 	const struct tcf_proto *first_tp;
767 	int limit = 0;
768 
769 reclassify:
770 #endif
771 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
772 		int err;
773 
774 		if (tp->protocol != protocol &&
775 		    tp->protocol != htons(ETH_P_ALL))
776 			continue;
777 
778 		err = tp->classify(skb, tp, res);
779 #ifdef CONFIG_NET_CLS_ACT
780 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
781 			first_tp = orig_tp;
782 			goto reset;
783 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
784 			first_tp = res->goto_tp;
785 			goto reset;
786 		}
787 #endif
788 		if (err >= 0)
789 			return err;
790 	}
791 
792 	return TC_ACT_UNSPEC; /* signal: continue lookup */
793 #ifdef CONFIG_NET_CLS_ACT
794 reset:
795 	if (unlikely(limit++ >= max_reclassify_loop)) {
796 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
797 				       tp->chain->block->index,
798 				       tp->prio & 0xffff,
799 				       ntohs(tp->protocol));
800 		return TC_ACT_SHOT;
801 	}
802 
803 	tp = first_tp;
804 	protocol = tc_skb_protocol(skb);
805 	goto reclassify;
806 #endif
807 }
808 EXPORT_SYMBOL(tcf_classify);
809 
810 struct tcf_chain_info {
811 	struct tcf_proto __rcu **pprev;
812 	struct tcf_proto __rcu *next;
813 };
814 
815 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
816 {
817 	return rtnl_dereference(*chain_info->pprev);
818 }
819 
820 static void tcf_chain_tp_insert(struct tcf_chain *chain,
821 				struct tcf_chain_info *chain_info,
822 				struct tcf_proto *tp)
823 {
824 	if (*chain_info->pprev == chain->filter_chain)
825 		tcf_chain_head_change(chain, tp);
826 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
827 	rcu_assign_pointer(*chain_info->pprev, tp);
828 	tcf_chain_hold(chain);
829 }
830 
831 static void tcf_chain_tp_remove(struct tcf_chain *chain,
832 				struct tcf_chain_info *chain_info,
833 				struct tcf_proto *tp)
834 {
835 	struct tcf_proto *next = rtnl_dereference(chain_info->next);
836 
837 	if (tp == chain->filter_chain)
838 		tcf_chain_head_change(chain, next);
839 	RCU_INIT_POINTER(*chain_info->pprev, next);
840 	tcf_chain_put(chain);
841 }
842 
843 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
844 					   struct tcf_chain_info *chain_info,
845 					   u32 protocol, u32 prio,
846 					   bool prio_allocate)
847 {
848 	struct tcf_proto **pprev;
849 	struct tcf_proto *tp;
850 
851 	/* Check the chain for existence of proto-tcf with this priority */
852 	for (pprev = &chain->filter_chain;
853 	     (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
854 		if (tp->prio >= prio) {
855 			if (tp->prio == prio) {
856 				if (prio_allocate ||
857 				    (tp->protocol != protocol && protocol))
858 					return ERR_PTR(-EINVAL);
859 			} else {
860 				tp = NULL;
861 			}
862 			break;
863 		}
864 	}
865 	chain_info->pprev = pprev;
866 	chain_info->next = tp ? tp->next : NULL;
867 	return tp;
868 }
869 
870 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
871 			 struct tcf_proto *tp, struct tcf_block *block,
872 			 struct Qdisc *q, u32 parent, void *fh,
873 			 u32 portid, u32 seq, u16 flags, int event)
874 {
875 	struct tcmsg *tcm;
876 	struct nlmsghdr  *nlh;
877 	unsigned char *b = skb_tail_pointer(skb);
878 
879 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
880 	if (!nlh)
881 		goto out_nlmsg_trim;
882 	tcm = nlmsg_data(nlh);
883 	tcm->tcm_family = AF_UNSPEC;
884 	tcm->tcm__pad1 = 0;
885 	tcm->tcm__pad2 = 0;
886 	if (q) {
887 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
888 		tcm->tcm_parent = parent;
889 	} else {
890 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
891 		tcm->tcm_block_index = block->index;
892 	}
893 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
894 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
895 		goto nla_put_failure;
896 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
897 		goto nla_put_failure;
898 	if (!fh) {
899 		tcm->tcm_handle = 0;
900 	} else {
901 		if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
902 			goto nla_put_failure;
903 	}
904 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
905 	return skb->len;
906 
907 out_nlmsg_trim:
908 nla_put_failure:
909 	nlmsg_trim(skb, b);
910 	return -1;
911 }
912 
913 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
914 			  struct nlmsghdr *n, struct tcf_proto *tp,
915 			  struct tcf_block *block, struct Qdisc *q,
916 			  u32 parent, void *fh, int event, bool unicast)
917 {
918 	struct sk_buff *skb;
919 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
920 
921 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
922 	if (!skb)
923 		return -ENOBUFS;
924 
925 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
926 			  n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
927 		kfree_skb(skb);
928 		return -EINVAL;
929 	}
930 
931 	if (unicast)
932 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
933 
934 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
935 			      n->nlmsg_flags & NLM_F_ECHO);
936 }
937 
938 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
939 			      struct nlmsghdr *n, struct tcf_proto *tp,
940 			      struct tcf_block *block, struct Qdisc *q,
941 			      u32 parent, void *fh, bool unicast, bool *last,
942 			      struct netlink_ext_ack *extack)
943 {
944 	struct sk_buff *skb;
945 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
946 	int err;
947 
948 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
949 	if (!skb)
950 		return -ENOBUFS;
951 
952 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
953 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
954 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
955 		kfree_skb(skb);
956 		return -EINVAL;
957 	}
958 
959 	err = tp->ops->delete(tp, fh, last, extack);
960 	if (err) {
961 		kfree_skb(skb);
962 		return err;
963 	}
964 
965 	if (unicast)
966 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
967 
968 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
969 			     n->nlmsg_flags & NLM_F_ECHO);
970 	if (err < 0)
971 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
972 	return err;
973 }
974 
975 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
976 				 struct tcf_block *block, struct Qdisc *q,
977 				 u32 parent, struct nlmsghdr *n,
978 				 struct tcf_chain *chain, int event)
979 {
980 	struct tcf_proto *tp;
981 
982 	for (tp = rtnl_dereference(chain->filter_chain);
983 	     tp; tp = rtnl_dereference(tp->next))
984 		tfilter_notify(net, oskb, n, tp, block,
985 			       q, parent, 0, event, false);
986 }
987 
988 /* Add/change/delete/get a filter node */
989 
990 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
991 			  struct netlink_ext_ack *extack)
992 {
993 	struct net *net = sock_net(skb->sk);
994 	struct nlattr *tca[TCA_MAX + 1];
995 	struct tcmsg *t;
996 	u32 protocol;
997 	u32 prio;
998 	bool prio_allocate;
999 	u32 parent;
1000 	u32 chain_index;
1001 	struct Qdisc *q = NULL;
1002 	struct tcf_chain_info chain_info;
1003 	struct tcf_chain *chain = NULL;
1004 	struct tcf_block *block;
1005 	struct tcf_proto *tp;
1006 	unsigned long cl;
1007 	void *fh;
1008 	int err;
1009 	int tp_created;
1010 
1011 	if ((n->nlmsg_type != RTM_GETTFILTER) &&
1012 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1013 		return -EPERM;
1014 
1015 replay:
1016 	tp_created = 0;
1017 
1018 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
1019 	if (err < 0)
1020 		return err;
1021 
1022 	t = nlmsg_data(n);
1023 	protocol = TC_H_MIN(t->tcm_info);
1024 	prio = TC_H_MAJ(t->tcm_info);
1025 	prio_allocate = false;
1026 	parent = t->tcm_parent;
1027 	cl = 0;
1028 
1029 	if (prio == 0) {
1030 		switch (n->nlmsg_type) {
1031 		case RTM_DELTFILTER:
1032 			if (protocol || t->tcm_handle || tca[TCA_KIND]) {
1033 				NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1034 				return -ENOENT;
1035 			}
1036 			break;
1037 		case RTM_NEWTFILTER:
1038 			/* If no priority is provided by the user,
1039 			 * we allocate one.
1040 			 */
1041 			if (n->nlmsg_flags & NLM_F_CREATE) {
1042 				prio = TC_H_MAKE(0x80000000U, 0U);
1043 				prio_allocate = true;
1044 				break;
1045 			}
1046 			/* fall-through */
1047 		default:
1048 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1049 			return -ENOENT;
1050 		}
1051 	}
1052 
1053 	/* Find head of filter chain. */
1054 
1055 	if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1056 		block = tcf_block_lookup(net, t->tcm_block_index);
1057 		if (!block) {
1058 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1059 			err = -EINVAL;
1060 			goto errout;
1061 		}
1062 	} else {
1063 		const struct Qdisc_class_ops *cops;
1064 		struct net_device *dev;
1065 
1066 		/* Find link */
1067 		dev = __dev_get_by_index(net, t->tcm_ifindex);
1068 		if (!dev)
1069 			return -ENODEV;
1070 
1071 		/* Find qdisc */
1072 		if (!parent) {
1073 			q = dev->qdisc;
1074 			parent = q->handle;
1075 		} else {
1076 			q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
1077 			if (!q) {
1078 				NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1079 				return -EINVAL;
1080 			}
1081 		}
1082 
1083 		/* Is it classful? */
1084 		cops = q->ops->cl_ops;
1085 		if (!cops) {
1086 			NL_SET_ERR_MSG(extack, "Qdisc not classful");
1087 			return -EINVAL;
1088 		}
1089 
1090 		if (!cops->tcf_block) {
1091 			NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1092 			return -EOPNOTSUPP;
1093 		}
1094 
1095 		/* Do we search for filter, attached to class? */
1096 		if (TC_H_MIN(parent)) {
1097 			cl = cops->find(q, parent);
1098 			if (cl == 0) {
1099 				NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1100 				return -ENOENT;
1101 			}
1102 		}
1103 
1104 		/* And the last stroke */
1105 		block = cops->tcf_block(q, cl, extack);
1106 		if (!block) {
1107 			err = -EINVAL;
1108 			goto errout;
1109 		}
1110 		if (tcf_block_shared(block)) {
1111 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1112 			err = -EOPNOTSUPP;
1113 			goto errout;
1114 		}
1115 	}
1116 
1117 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1118 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
1119 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1120 		err = -EINVAL;
1121 		goto errout;
1122 	}
1123 	chain = tcf_chain_get(block, chain_index,
1124 			      n->nlmsg_type == RTM_NEWTFILTER);
1125 	if (!chain) {
1126 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1127 		err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
1128 		goto errout;
1129 	}
1130 
1131 	if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
1132 		tfilter_notify_chain(net, skb, block, q, parent, n,
1133 				     chain, RTM_DELTFILTER);
1134 		tcf_chain_flush(chain);
1135 		err = 0;
1136 		goto errout;
1137 	}
1138 
1139 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1140 			       prio, prio_allocate);
1141 	if (IS_ERR(tp)) {
1142 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1143 		err = PTR_ERR(tp);
1144 		goto errout;
1145 	}
1146 
1147 	if (tp == NULL) {
1148 		/* Proto-tcf does not exist, create new one */
1149 
1150 		if (tca[TCA_KIND] == NULL || !protocol) {
1151 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1152 			err = -EINVAL;
1153 			goto errout;
1154 		}
1155 
1156 		if (n->nlmsg_type != RTM_NEWTFILTER ||
1157 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
1158 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1159 			err = -ENOENT;
1160 			goto errout;
1161 		}
1162 
1163 		if (prio_allocate)
1164 			prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
1165 
1166 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
1167 				      protocol, prio, chain, extack);
1168 		if (IS_ERR(tp)) {
1169 			err = PTR_ERR(tp);
1170 			goto errout;
1171 		}
1172 		tp_created = 1;
1173 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1174 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1175 		err = -EINVAL;
1176 		goto errout;
1177 	}
1178 
1179 	fh = tp->ops->get(tp, t->tcm_handle);
1180 
1181 	if (!fh) {
1182 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
1183 			tcf_chain_tp_remove(chain, &chain_info, tp);
1184 			tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1185 				       RTM_DELTFILTER, false);
1186 			tcf_proto_destroy(tp, extack);
1187 			err = 0;
1188 			goto errout;
1189 		}
1190 
1191 		if (n->nlmsg_type != RTM_NEWTFILTER ||
1192 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
1193 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1194 			err = -ENOENT;
1195 			goto errout;
1196 		}
1197 	} else {
1198 		bool last;
1199 
1200 		switch (n->nlmsg_type) {
1201 		case RTM_NEWTFILTER:
1202 			if (n->nlmsg_flags & NLM_F_EXCL) {
1203 				if (tp_created)
1204 					tcf_proto_destroy(tp, NULL);
1205 				NL_SET_ERR_MSG(extack, "Filter already exists");
1206 				err = -EEXIST;
1207 				goto errout;
1208 			}
1209 			break;
1210 		case RTM_DELTFILTER:
1211 			err = tfilter_del_notify(net, skb, n, tp, block,
1212 						 q, parent, fh, false, &last,
1213 						 extack);
1214 			if (err)
1215 				goto errout;
1216 			if (last) {
1217 				tcf_chain_tp_remove(chain, &chain_info, tp);
1218 				tcf_proto_destroy(tp, extack);
1219 			}
1220 			goto errout;
1221 		case RTM_GETTFILTER:
1222 			err = tfilter_notify(net, skb, n, tp, block, q, parent,
1223 					     fh, RTM_NEWTFILTER, true);
1224 			if (err < 0)
1225 				NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1226 			goto errout;
1227 		default:
1228 			NL_SET_ERR_MSG(extack, "Invalid netlink message type");
1229 			err = -EINVAL;
1230 			goto errout;
1231 		}
1232 	}
1233 
1234 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
1235 			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1236 			      extack);
1237 	if (err == 0) {
1238 		if (tp_created)
1239 			tcf_chain_tp_insert(chain, &chain_info, tp);
1240 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1241 			       RTM_NEWTFILTER, false);
1242 	} else {
1243 		if (tp_created)
1244 			tcf_proto_destroy(tp, NULL);
1245 	}
1246 
1247 errout:
1248 	if (chain)
1249 		tcf_chain_put(chain);
1250 	if (err == -EAGAIN)
1251 		/* Replay the request. */
1252 		goto replay;
1253 	return err;
1254 }
1255 
1256 struct tcf_dump_args {
1257 	struct tcf_walker w;
1258 	struct sk_buff *skb;
1259 	struct netlink_callback *cb;
1260 	struct tcf_block *block;
1261 	struct Qdisc *q;
1262 	u32 parent;
1263 };
1264 
1265 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1266 {
1267 	struct tcf_dump_args *a = (void *)arg;
1268 	struct net *net = sock_net(a->skb->sk);
1269 
1270 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
1271 			     n, NETLINK_CB(a->cb->skb).portid,
1272 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1273 			     RTM_NEWTFILTER);
1274 }
1275 
1276 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1277 			   struct sk_buff *skb, struct netlink_callback *cb,
1278 			   long index_start, long *p_index)
1279 {
1280 	struct net *net = sock_net(skb->sk);
1281 	struct tcf_block *block = chain->block;
1282 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1283 	struct tcf_dump_args arg;
1284 	struct tcf_proto *tp;
1285 
1286 	for (tp = rtnl_dereference(chain->filter_chain);
1287 	     tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1288 		if (*p_index < index_start)
1289 			continue;
1290 		if (TC_H_MAJ(tcm->tcm_info) &&
1291 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
1292 			continue;
1293 		if (TC_H_MIN(tcm->tcm_info) &&
1294 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
1295 			continue;
1296 		if (*p_index > index_start)
1297 			memset(&cb->args[1], 0,
1298 			       sizeof(cb->args) - sizeof(cb->args[0]));
1299 		if (cb->args[1] == 0) {
1300 			if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
1301 					  NETLINK_CB(cb->skb).portid,
1302 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1303 					  RTM_NEWTFILTER) <= 0)
1304 				return false;
1305 
1306 			cb->args[1] = 1;
1307 		}
1308 		if (!tp->ops->walk)
1309 			continue;
1310 		arg.w.fn = tcf_node_dump;
1311 		arg.skb = skb;
1312 		arg.cb = cb;
1313 		arg.block = block;
1314 		arg.q = q;
1315 		arg.parent = parent;
1316 		arg.w.stop = 0;
1317 		arg.w.skip = cb->args[1] - 1;
1318 		arg.w.count = 0;
1319 		tp->ops->walk(tp, &arg.w);
1320 		cb->args[1] = arg.w.count + 1;
1321 		if (arg.w.stop)
1322 			return false;
1323 	}
1324 	return true;
1325 }
1326 
1327 /* called with RTNL */
1328 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1329 {
1330 	struct net *net = sock_net(skb->sk);
1331 	struct nlattr *tca[TCA_MAX + 1];
1332 	struct Qdisc *q = NULL;
1333 	struct tcf_block *block;
1334 	struct tcf_chain *chain;
1335 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1336 	long index_start;
1337 	long index;
1338 	u32 parent;
1339 	int err;
1340 
1341 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1342 		return skb->len;
1343 
1344 	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1345 	if (err)
1346 		return err;
1347 
1348 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1349 		block = tcf_block_lookup(net, tcm->tcm_block_index);
1350 		if (!block)
1351 			goto out;
1352 		/* If we work with block index, q is NULL and parent value
1353 		 * will never be used in the following code. The check
1354 		 * in tcf_fill_node prevents it. However, compiler does not
1355 		 * see that far, so set parent to zero to silence the warning
1356 		 * about parent being uninitialized.
1357 		 */
1358 		parent = 0;
1359 	} else {
1360 		const struct Qdisc_class_ops *cops;
1361 		struct net_device *dev;
1362 		unsigned long cl = 0;
1363 
1364 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1365 		if (!dev)
1366 			return skb->len;
1367 
1368 		parent = tcm->tcm_parent;
1369 		if (!parent) {
1370 			q = dev->qdisc;
1371 			parent = q->handle;
1372 		} else {
1373 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
1374 		}
1375 		if (!q)
1376 			goto out;
1377 		cops = q->ops->cl_ops;
1378 		if (!cops)
1379 			goto out;
1380 		if (!cops->tcf_block)
1381 			goto out;
1382 		if (TC_H_MIN(tcm->tcm_parent)) {
1383 			cl = cops->find(q, tcm->tcm_parent);
1384 			if (cl == 0)
1385 				goto out;
1386 		}
1387 		block = cops->tcf_block(q, cl, NULL);
1388 		if (!block)
1389 			goto out;
1390 		if (tcf_block_shared(block))
1391 			q = NULL;
1392 	}
1393 
1394 	index_start = cb->args[0];
1395 	index = 0;
1396 
1397 	list_for_each_entry(chain, &block->chain_list, list) {
1398 		if (tca[TCA_CHAIN] &&
1399 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
1400 			continue;
1401 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
1402 				    index_start, &index))
1403 			break;
1404 	}
1405 
1406 	cb->args[0] = index;
1407 
1408 out:
1409 	return skb->len;
1410 }
1411 
1412 void tcf_exts_destroy(struct tcf_exts *exts)
1413 {
1414 #ifdef CONFIG_NET_CLS_ACT
1415 	LIST_HEAD(actions);
1416 
1417 	ASSERT_RTNL();
1418 	tcf_exts_to_list(exts, &actions);
1419 	tcf_action_destroy(&actions, TCA_ACT_UNBIND);
1420 	kfree(exts->actions);
1421 	exts->nr_actions = 0;
1422 #endif
1423 }
1424 EXPORT_SYMBOL(tcf_exts_destroy);
1425 
1426 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
1427 		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
1428 		      struct netlink_ext_ack *extack)
1429 {
1430 #ifdef CONFIG_NET_CLS_ACT
1431 	{
1432 		struct tc_action *act;
1433 
1434 		if (exts->police && tb[exts->police]) {
1435 			act = tcf_action_init_1(net, tp, tb[exts->police],
1436 						rate_tlv, "police", ovr,
1437 						TCA_ACT_BIND);
1438 			if (IS_ERR(act))
1439 				return PTR_ERR(act);
1440 
1441 			act->type = exts->type = TCA_OLD_COMPAT;
1442 			exts->actions[0] = act;
1443 			exts->nr_actions = 1;
1444 		} else if (exts->action && tb[exts->action]) {
1445 			LIST_HEAD(actions);
1446 			int err, i = 0;
1447 
1448 			err = tcf_action_init(net, tp, tb[exts->action],
1449 					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
1450 					      &actions);
1451 			if (err)
1452 				return err;
1453 			list_for_each_entry(act, &actions, list)
1454 				exts->actions[i++] = act;
1455 			exts->nr_actions = i;
1456 		}
1457 		exts->net = net;
1458 	}
1459 #else
1460 	if ((exts->action && tb[exts->action]) ||
1461 	    (exts->police && tb[exts->police])) {
1462 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
1463 		return -EOPNOTSUPP;
1464 	}
1465 #endif
1466 
1467 	return 0;
1468 }
1469 EXPORT_SYMBOL(tcf_exts_validate);
1470 
1471 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
1472 {
1473 #ifdef CONFIG_NET_CLS_ACT
1474 	struct tcf_exts old = *dst;
1475 
1476 	*dst = *src;
1477 	tcf_exts_destroy(&old);
1478 #endif
1479 }
1480 EXPORT_SYMBOL(tcf_exts_change);
1481 
1482 #ifdef CONFIG_NET_CLS_ACT
1483 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
1484 {
1485 	if (exts->nr_actions == 0)
1486 		return NULL;
1487 	else
1488 		return exts->actions[0];
1489 }
1490 #endif
1491 
1492 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
1493 {
1494 #ifdef CONFIG_NET_CLS_ACT
1495 	struct nlattr *nest;
1496 
1497 	if (exts->action && tcf_exts_has_actions(exts)) {
1498 		/*
1499 		 * again for backward compatible mode - we want
1500 		 * to work with both old and new modes of entering
1501 		 * tc data even if iproute2  was newer - jhs
1502 		 */
1503 		if (exts->type != TCA_OLD_COMPAT) {
1504 			LIST_HEAD(actions);
1505 
1506 			nest = nla_nest_start(skb, exts->action);
1507 			if (nest == NULL)
1508 				goto nla_put_failure;
1509 
1510 			tcf_exts_to_list(exts, &actions);
1511 			if (tcf_action_dump(skb, &actions, 0, 0) < 0)
1512 				goto nla_put_failure;
1513 			nla_nest_end(skb, nest);
1514 		} else if (exts->police) {
1515 			struct tc_action *act = tcf_exts_first_act(exts);
1516 			nest = nla_nest_start(skb, exts->police);
1517 			if (nest == NULL || !act)
1518 				goto nla_put_failure;
1519 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
1520 				goto nla_put_failure;
1521 			nla_nest_end(skb, nest);
1522 		}
1523 	}
1524 	return 0;
1525 
1526 nla_put_failure:
1527 	nla_nest_cancel(skb, nest);
1528 	return -1;
1529 #else
1530 	return 0;
1531 #endif
1532 }
1533 EXPORT_SYMBOL(tcf_exts_dump);
1534 
1535 
1536 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
1537 {
1538 #ifdef CONFIG_NET_CLS_ACT
1539 	struct tc_action *a = tcf_exts_first_act(exts);
1540 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
1541 		return -1;
1542 #endif
1543 	return 0;
1544 }
1545 EXPORT_SYMBOL(tcf_exts_dump_stats);
1546 
1547 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
1548 				       enum tc_setup_type type,
1549 				       void *type_data, bool err_stop)
1550 {
1551 	int ok_count = 0;
1552 #ifdef CONFIG_NET_CLS_ACT
1553 	const struct tc_action *a;
1554 	struct net_device *dev;
1555 	int i, ret;
1556 
1557 	if (!tcf_exts_has_actions(exts))
1558 		return 0;
1559 
1560 	for (i = 0; i < exts->nr_actions; i++) {
1561 		a = exts->actions[i];
1562 		if (!a->ops->get_dev)
1563 			continue;
1564 		dev = a->ops->get_dev(a);
1565 		if (!dev)
1566 			continue;
1567 		ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
1568 		if (ret < 0)
1569 			return ret;
1570 		ok_count += ret;
1571 	}
1572 #endif
1573 	return ok_count;
1574 }
1575 
1576 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1577 		     enum tc_setup_type type, void *type_data, bool err_stop)
1578 {
1579 	int ok_count;
1580 	int ret;
1581 
1582 	ret = tcf_block_cb_call(block, type, type_data, err_stop);
1583 	if (ret < 0)
1584 		return ret;
1585 	ok_count = ret;
1586 
1587 	if (!exts)
1588 		return ok_count;
1589 	ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1590 	if (ret < 0)
1591 		return ret;
1592 	ok_count += ret;
1593 
1594 	return ok_count;
1595 }
1596 EXPORT_SYMBOL(tc_setup_cb_call);
1597 
1598 static __net_init int tcf_net_init(struct net *net)
1599 {
1600 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1601 
1602 	idr_init(&tn->idr);
1603 	return 0;
1604 }
1605 
1606 static void __net_exit tcf_net_exit(struct net *net)
1607 {
1608 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1609 
1610 	idr_destroy(&tn->idr);
1611 }
1612 
1613 static struct pernet_operations tcf_net_ops = {
1614 	.init = tcf_net_init,
1615 	.exit = tcf_net_exit,
1616 	.id   = &tcf_net_id,
1617 	.size = sizeof(struct tcf_net),
1618 };
1619 
1620 static int __init tc_filter_init(void)
1621 {
1622 	int err;
1623 
1624 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1625 	if (!tc_filter_wq)
1626 		return -ENOMEM;
1627 
1628 	err = register_pernet_subsys(&tcf_net_ops);
1629 	if (err)
1630 		goto err_register_pernet_subsys;
1631 
1632 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1633 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1634 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
1635 		      tc_dump_tfilter, 0);
1636 
1637 	return 0;
1638 
1639 err_register_pernet_subsys:
1640 	destroy_workqueue(tc_filter_wq);
1641 	return err;
1642 }
1643 
1644 subsys_initcall(tc_filter_init);
1645