xref: /openbmc/linux/net/sched/cls_api.c (revision 77ab8d5d)
1 /*
2  * net/sched/cls_api.c	Packet classifier API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
33 
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
36 
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
39 
40 /* Find classifier type by string name */
41 
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
43 {
44 	const struct tcf_proto_ops *t, *res = NULL;
45 
46 	if (kind) {
47 		read_lock(&cls_mod_lock);
48 		list_for_each_entry(t, &tcf_proto_base, head) {
49 			if (strcmp(kind, t->kind) == 0) {
50 				if (try_module_get(t->owner))
51 					res = t;
52 				break;
53 			}
54 		}
55 		read_unlock(&cls_mod_lock);
56 	}
57 	return res;
58 }
59 
60 /* Register(unregister) new classifier type */
61 
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63 {
64 	struct tcf_proto_ops *t;
65 	int rc = -EEXIST;
66 
67 	write_lock(&cls_mod_lock);
68 	list_for_each_entry(t, &tcf_proto_base, head)
69 		if (!strcmp(ops->kind, t->kind))
70 			goto out;
71 
72 	list_add_tail(&ops->head, &tcf_proto_base);
73 	rc = 0;
74 out:
75 	write_unlock(&cls_mod_lock);
76 	return rc;
77 }
78 EXPORT_SYMBOL(register_tcf_proto_ops);
79 
80 static struct workqueue_struct *tc_filter_wq;
81 
82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
83 {
84 	struct tcf_proto_ops *t;
85 	int rc = -ENOENT;
86 
87 	/* Wait for outstanding call_rcu()s, if any, from a
88 	 * tcf_proto_ops's destroy() handler.
89 	 */
90 	rcu_barrier();
91 	flush_workqueue(tc_filter_wq);
92 
93 	write_lock(&cls_mod_lock);
94 	list_for_each_entry(t, &tcf_proto_base, head) {
95 		if (t == ops) {
96 			list_del(&t->head);
97 			rc = 0;
98 			break;
99 		}
100 	}
101 	write_unlock(&cls_mod_lock);
102 	return rc;
103 }
104 EXPORT_SYMBOL(unregister_tcf_proto_ops);
105 
106 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
107 {
108 	INIT_RCU_WORK(rwork, func);
109 	return queue_rcu_work(tc_filter_wq, rwork);
110 }
111 EXPORT_SYMBOL(tcf_queue_work);
112 
113 /* Select new prio value from the range, managed by kernel. */
114 
115 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
116 {
117 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
118 
119 	if (tp)
120 		first = tp->prio - 1;
121 
122 	return TC_H_MAJ(first);
123 }
124 
125 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
126 					  u32 prio, struct tcf_chain *chain,
127 					  struct netlink_ext_ack *extack)
128 {
129 	struct tcf_proto *tp;
130 	int err;
131 
132 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
133 	if (!tp)
134 		return ERR_PTR(-ENOBUFS);
135 
136 	err = -ENOENT;
137 	tp->ops = tcf_proto_lookup_ops(kind);
138 	if (!tp->ops) {
139 #ifdef CONFIG_MODULES
140 		rtnl_unlock();
141 		request_module("cls_%s", kind);
142 		rtnl_lock();
143 		tp->ops = tcf_proto_lookup_ops(kind);
144 		/* We dropped the RTNL semaphore in order to perform
145 		 * the module load. So, even if we succeeded in loading
146 		 * the module we have to replay the request. We indicate
147 		 * this using -EAGAIN.
148 		 */
149 		if (tp->ops) {
150 			module_put(tp->ops->owner);
151 			err = -EAGAIN;
152 		} else {
153 			NL_SET_ERR_MSG(extack, "TC classifier not found");
154 			err = -ENOENT;
155 		}
156 #endif
157 		goto errout;
158 	}
159 	tp->classify = tp->ops->classify;
160 	tp->protocol = protocol;
161 	tp->prio = prio;
162 	tp->chain = chain;
163 
164 	err = tp->ops->init(tp);
165 	if (err) {
166 		module_put(tp->ops->owner);
167 		goto errout;
168 	}
169 	return tp;
170 
171 errout:
172 	kfree(tp);
173 	return ERR_PTR(err);
174 }
175 
176 static void tcf_proto_destroy(struct tcf_proto *tp,
177 			      struct netlink_ext_ack *extack)
178 {
179 	tp->ops->destroy(tp, extack);
180 	module_put(tp->ops->owner);
181 	kfree_rcu(tp, rcu);
182 }
183 
184 struct tcf_filter_chain_list_item {
185 	struct list_head list;
186 	tcf_chain_head_change_t *chain_head_change;
187 	void *chain_head_change_priv;
188 };
189 
190 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
191 					  u32 chain_index)
192 {
193 	struct tcf_chain *chain;
194 
195 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
196 	if (!chain)
197 		return NULL;
198 	INIT_LIST_HEAD(&chain->filter_chain_list);
199 	list_add_tail(&chain->list, &block->chain_list);
200 	chain->block = block;
201 	chain->index = chain_index;
202 	chain->refcnt = 1;
203 	return chain;
204 }
205 
206 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
207 				       struct tcf_proto *tp_head)
208 {
209 	if (item->chain_head_change)
210 		item->chain_head_change(tp_head, item->chain_head_change_priv);
211 }
212 static void tcf_chain_head_change(struct tcf_chain *chain,
213 				  struct tcf_proto *tp_head)
214 {
215 	struct tcf_filter_chain_list_item *item;
216 
217 	list_for_each_entry(item, &chain->filter_chain_list, list)
218 		tcf_chain_head_change_item(item, tp_head);
219 }
220 
221 static void tcf_chain_flush(struct tcf_chain *chain)
222 {
223 	struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
224 
225 	tcf_chain_head_change(chain, NULL);
226 	while (tp) {
227 		RCU_INIT_POINTER(chain->filter_chain, tp->next);
228 		tcf_proto_destroy(tp, NULL);
229 		tp = rtnl_dereference(chain->filter_chain);
230 		tcf_chain_put(chain);
231 	}
232 }
233 
234 static void tcf_chain_destroy(struct tcf_chain *chain)
235 {
236 	struct tcf_block *block = chain->block;
237 
238 	list_del(&chain->list);
239 	kfree(chain);
240 	if (list_empty(&block->chain_list))
241 		kfree(block);
242 }
243 
244 static void tcf_chain_hold(struct tcf_chain *chain)
245 {
246 	++chain->refcnt;
247 }
248 
249 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
250 				bool create)
251 {
252 	struct tcf_chain *chain;
253 
254 	list_for_each_entry(chain, &block->chain_list, list) {
255 		if (chain->index == chain_index) {
256 			tcf_chain_hold(chain);
257 			return chain;
258 		}
259 	}
260 
261 	return create ? tcf_chain_create(block, chain_index) : NULL;
262 }
263 EXPORT_SYMBOL(tcf_chain_get);
264 
265 void tcf_chain_put(struct tcf_chain *chain)
266 {
267 	if (--chain->refcnt == 0)
268 		tcf_chain_destroy(chain);
269 }
270 EXPORT_SYMBOL(tcf_chain_put);
271 
272 static bool tcf_block_offload_in_use(struct tcf_block *block)
273 {
274 	return block->offloadcnt;
275 }
276 
277 static int tcf_block_offload_cmd(struct tcf_block *block,
278 				 struct net_device *dev,
279 				 struct tcf_block_ext_info *ei,
280 				 enum tc_block_command command)
281 {
282 	struct tc_block_offload bo = {};
283 
284 	bo.command = command;
285 	bo.binder_type = ei->binder_type;
286 	bo.block = block;
287 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
288 }
289 
290 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
291 				  struct tcf_block_ext_info *ei)
292 {
293 	struct net_device *dev = q->dev_queue->dev;
294 	int err;
295 
296 	if (!dev->netdev_ops->ndo_setup_tc)
297 		goto no_offload_dev_inc;
298 
299 	/* If tc offload feature is disabled and the block we try to bind
300 	 * to already has some offloaded filters, forbid to bind.
301 	 */
302 	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
303 		return -EOPNOTSUPP;
304 
305 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
306 	if (err == -EOPNOTSUPP)
307 		goto no_offload_dev_inc;
308 	return err;
309 
310 no_offload_dev_inc:
311 	if (tcf_block_offload_in_use(block))
312 		return -EOPNOTSUPP;
313 	block->nooffloaddevcnt++;
314 	return 0;
315 }
316 
317 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
318 				     struct tcf_block_ext_info *ei)
319 {
320 	struct net_device *dev = q->dev_queue->dev;
321 	int err;
322 
323 	if (!dev->netdev_ops->ndo_setup_tc)
324 		goto no_offload_dev_dec;
325 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
326 	if (err == -EOPNOTSUPP)
327 		goto no_offload_dev_dec;
328 	return;
329 
330 no_offload_dev_dec:
331 	WARN_ON(block->nooffloaddevcnt-- == 0);
332 }
333 
334 static int
335 tcf_chain_head_change_cb_add(struct tcf_chain *chain,
336 			     struct tcf_block_ext_info *ei,
337 			     struct netlink_ext_ack *extack)
338 {
339 	struct tcf_filter_chain_list_item *item;
340 
341 	item = kmalloc(sizeof(*item), GFP_KERNEL);
342 	if (!item) {
343 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
344 		return -ENOMEM;
345 	}
346 	item->chain_head_change = ei->chain_head_change;
347 	item->chain_head_change_priv = ei->chain_head_change_priv;
348 	if (chain->filter_chain)
349 		tcf_chain_head_change_item(item, chain->filter_chain);
350 	list_add(&item->list, &chain->filter_chain_list);
351 	return 0;
352 }
353 
354 static void
355 tcf_chain_head_change_cb_del(struct tcf_chain *chain,
356 			     struct tcf_block_ext_info *ei)
357 {
358 	struct tcf_filter_chain_list_item *item;
359 
360 	list_for_each_entry(item, &chain->filter_chain_list, list) {
361 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
362 		    (item->chain_head_change == ei->chain_head_change &&
363 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
364 			tcf_chain_head_change_item(item, NULL);
365 			list_del(&item->list);
366 			kfree(item);
367 			return;
368 		}
369 	}
370 	WARN_ON(1);
371 }
372 
373 struct tcf_net {
374 	struct idr idr;
375 };
376 
377 static unsigned int tcf_net_id;
378 
379 static int tcf_block_insert(struct tcf_block *block, struct net *net,
380 			    struct netlink_ext_ack *extack)
381 {
382 	struct tcf_net *tn = net_generic(net, tcf_net_id);
383 
384 	return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
385 			     GFP_KERNEL);
386 }
387 
388 static void tcf_block_remove(struct tcf_block *block, struct net *net)
389 {
390 	struct tcf_net *tn = net_generic(net, tcf_net_id);
391 
392 	idr_remove(&tn->idr, block->index);
393 }
394 
395 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
396 					  u32 block_index,
397 					  struct netlink_ext_ack *extack)
398 {
399 	struct tcf_block *block;
400 	struct tcf_chain *chain;
401 	int err;
402 
403 	block = kzalloc(sizeof(*block), GFP_KERNEL);
404 	if (!block) {
405 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
406 		return ERR_PTR(-ENOMEM);
407 	}
408 	INIT_LIST_HEAD(&block->chain_list);
409 	INIT_LIST_HEAD(&block->cb_list);
410 	INIT_LIST_HEAD(&block->owner_list);
411 
412 	/* Create chain 0 by default, it has to be always present. */
413 	chain = tcf_chain_create(block, 0);
414 	if (!chain) {
415 		NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
416 		err = -ENOMEM;
417 		goto err_chain_create;
418 	}
419 	block->refcnt = 1;
420 	block->net = net;
421 	block->index = block_index;
422 
423 	/* Don't store q pointer for blocks which are shared */
424 	if (!tcf_block_shared(block))
425 		block->q = q;
426 	return block;
427 
428 err_chain_create:
429 	kfree(block);
430 	return ERR_PTR(err);
431 }
432 
433 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
434 {
435 	struct tcf_net *tn = net_generic(net, tcf_net_id);
436 
437 	return idr_find(&tn->idr, block_index);
438 }
439 
440 static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
441 {
442 	return list_first_entry(&block->chain_list, struct tcf_chain, list);
443 }
444 
445 struct tcf_block_owner_item {
446 	struct list_head list;
447 	struct Qdisc *q;
448 	enum tcf_block_binder_type binder_type;
449 };
450 
451 static void
452 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
453 			       struct Qdisc *q,
454 			       enum tcf_block_binder_type binder_type)
455 {
456 	if (block->keep_dst &&
457 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
458 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
459 		netif_keep_dst(qdisc_dev(q));
460 }
461 
462 void tcf_block_netif_keep_dst(struct tcf_block *block)
463 {
464 	struct tcf_block_owner_item *item;
465 
466 	block->keep_dst = true;
467 	list_for_each_entry(item, &block->owner_list, list)
468 		tcf_block_owner_netif_keep_dst(block, item->q,
469 					       item->binder_type);
470 }
471 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
472 
473 static int tcf_block_owner_add(struct tcf_block *block,
474 			       struct Qdisc *q,
475 			       enum tcf_block_binder_type binder_type)
476 {
477 	struct tcf_block_owner_item *item;
478 
479 	item = kmalloc(sizeof(*item), GFP_KERNEL);
480 	if (!item)
481 		return -ENOMEM;
482 	item->q = q;
483 	item->binder_type = binder_type;
484 	list_add(&item->list, &block->owner_list);
485 	return 0;
486 }
487 
488 static void tcf_block_owner_del(struct tcf_block *block,
489 				struct Qdisc *q,
490 				enum tcf_block_binder_type binder_type)
491 {
492 	struct tcf_block_owner_item *item;
493 
494 	list_for_each_entry(item, &block->owner_list, list) {
495 		if (item->q == q && item->binder_type == binder_type) {
496 			list_del(&item->list);
497 			kfree(item);
498 			return;
499 		}
500 	}
501 	WARN_ON(1);
502 }
503 
504 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
505 		      struct tcf_block_ext_info *ei,
506 		      struct netlink_ext_ack *extack)
507 {
508 	struct net *net = qdisc_net(q);
509 	struct tcf_block *block = NULL;
510 	bool created = false;
511 	int err;
512 
513 	if (ei->block_index) {
514 		/* block_index not 0 means the shared block is requested */
515 		block = tcf_block_lookup(net, ei->block_index);
516 		if (block)
517 			block->refcnt++;
518 	}
519 
520 	if (!block) {
521 		block = tcf_block_create(net, q, ei->block_index, extack);
522 		if (IS_ERR(block))
523 			return PTR_ERR(block);
524 		created = true;
525 		if (tcf_block_shared(block)) {
526 			err = tcf_block_insert(block, net, extack);
527 			if (err)
528 				goto err_block_insert;
529 		}
530 	}
531 
532 	err = tcf_block_owner_add(block, q, ei->binder_type);
533 	if (err)
534 		goto err_block_owner_add;
535 
536 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
537 
538 	err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
539 					   ei, extack);
540 	if (err)
541 		goto err_chain_head_change_cb_add;
542 
543 	err = tcf_block_offload_bind(block, q, ei);
544 	if (err)
545 		goto err_block_offload_bind;
546 
547 	*p_block = block;
548 	return 0;
549 
550 err_block_offload_bind:
551 	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
552 err_chain_head_change_cb_add:
553 	tcf_block_owner_del(block, q, ei->binder_type);
554 err_block_owner_add:
555 	if (created) {
556 		if (tcf_block_shared(block))
557 			tcf_block_remove(block, net);
558 err_block_insert:
559 		kfree(tcf_block_chain_zero(block));
560 		kfree(block);
561 	} else {
562 		block->refcnt--;
563 	}
564 	return err;
565 }
566 EXPORT_SYMBOL(tcf_block_get_ext);
567 
568 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
569 {
570 	struct tcf_proto __rcu **p_filter_chain = priv;
571 
572 	rcu_assign_pointer(*p_filter_chain, tp_head);
573 }
574 
575 int tcf_block_get(struct tcf_block **p_block,
576 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
577 		  struct netlink_ext_ack *extack)
578 {
579 	struct tcf_block_ext_info ei = {
580 		.chain_head_change = tcf_chain_head_change_dflt,
581 		.chain_head_change_priv = p_filter_chain,
582 	};
583 
584 	WARN_ON(!p_filter_chain);
585 	return tcf_block_get_ext(p_block, q, &ei, extack);
586 }
587 EXPORT_SYMBOL(tcf_block_get);
588 
589 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
590  * actions should be all removed after flushing.
591  */
592 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
593 		       struct tcf_block_ext_info *ei)
594 {
595 	struct tcf_chain *chain, *tmp;
596 
597 	if (!block)
598 		return;
599 	tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
600 	tcf_block_owner_del(block, q, ei->binder_type);
601 
602 	if (--block->refcnt == 0) {
603 		if (tcf_block_shared(block))
604 			tcf_block_remove(block, block->net);
605 
606 		/* Hold a refcnt for all chains, so that they don't disappear
607 		 * while we are iterating.
608 		 */
609 		list_for_each_entry(chain, &block->chain_list, list)
610 			tcf_chain_hold(chain);
611 
612 		list_for_each_entry(chain, &block->chain_list, list)
613 			tcf_chain_flush(chain);
614 	}
615 
616 	tcf_block_offload_unbind(block, q, ei);
617 
618 	if (block->refcnt == 0) {
619 		/* At this point, all the chains should have refcnt >= 1. */
620 		list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
621 			tcf_chain_put(chain);
622 
623 		/* Finally, put chain 0 and allow block to be freed. */
624 		tcf_chain_put(tcf_block_chain_zero(block));
625 	}
626 }
627 EXPORT_SYMBOL(tcf_block_put_ext);
628 
629 void tcf_block_put(struct tcf_block *block)
630 {
631 	struct tcf_block_ext_info ei = {0, };
632 
633 	if (!block)
634 		return;
635 	tcf_block_put_ext(block, block->q, &ei);
636 }
637 
638 EXPORT_SYMBOL(tcf_block_put);
639 
640 struct tcf_block_cb {
641 	struct list_head list;
642 	tc_setup_cb_t *cb;
643 	void *cb_ident;
644 	void *cb_priv;
645 	unsigned int refcnt;
646 };
647 
648 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
649 {
650 	return block_cb->cb_priv;
651 }
652 EXPORT_SYMBOL(tcf_block_cb_priv);
653 
654 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
655 					 tc_setup_cb_t *cb, void *cb_ident)
656 {	struct tcf_block_cb *block_cb;
657 
658 	list_for_each_entry(block_cb, &block->cb_list, list)
659 		if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
660 			return block_cb;
661 	return NULL;
662 }
663 EXPORT_SYMBOL(tcf_block_cb_lookup);
664 
665 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
666 {
667 	block_cb->refcnt++;
668 }
669 EXPORT_SYMBOL(tcf_block_cb_incref);
670 
671 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
672 {
673 	return --block_cb->refcnt;
674 }
675 EXPORT_SYMBOL(tcf_block_cb_decref);
676 
677 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
678 					     tc_setup_cb_t *cb, void *cb_ident,
679 					     void *cb_priv)
680 {
681 	struct tcf_block_cb *block_cb;
682 
683 	/* At this point, playback of previous block cb calls is not supported,
684 	 * so forbid to register to block which already has some offloaded
685 	 * filters present.
686 	 */
687 	if (tcf_block_offload_in_use(block))
688 		return ERR_PTR(-EOPNOTSUPP);
689 
690 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
691 	if (!block_cb)
692 		return ERR_PTR(-ENOMEM);
693 	block_cb->cb = cb;
694 	block_cb->cb_ident = cb_ident;
695 	block_cb->cb_priv = cb_priv;
696 	list_add(&block_cb->list, &block->cb_list);
697 	return block_cb;
698 }
699 EXPORT_SYMBOL(__tcf_block_cb_register);
700 
701 int tcf_block_cb_register(struct tcf_block *block,
702 			  tc_setup_cb_t *cb, void *cb_ident,
703 			  void *cb_priv)
704 {
705 	struct tcf_block_cb *block_cb;
706 
707 	block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
708 	return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
709 }
710 EXPORT_SYMBOL(tcf_block_cb_register);
711 
712 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
713 {
714 	list_del(&block_cb->list);
715 	kfree(block_cb);
716 }
717 EXPORT_SYMBOL(__tcf_block_cb_unregister);
718 
719 void tcf_block_cb_unregister(struct tcf_block *block,
720 			     tc_setup_cb_t *cb, void *cb_ident)
721 {
722 	struct tcf_block_cb *block_cb;
723 
724 	block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
725 	if (!block_cb)
726 		return;
727 	__tcf_block_cb_unregister(block_cb);
728 }
729 EXPORT_SYMBOL(tcf_block_cb_unregister);
730 
731 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
732 			     void *type_data, bool err_stop)
733 {
734 	struct tcf_block_cb *block_cb;
735 	int ok_count = 0;
736 	int err;
737 
738 	/* Make sure all netdevs sharing this block are offload-capable. */
739 	if (block->nooffloaddevcnt && err_stop)
740 		return -EOPNOTSUPP;
741 
742 	list_for_each_entry(block_cb, &block->cb_list, list) {
743 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
744 		if (err) {
745 			if (err_stop)
746 				return err;
747 		} else {
748 			ok_count++;
749 		}
750 	}
751 	return ok_count;
752 }
753 
754 /* Main classifier routine: scans classifier chain attached
755  * to this qdisc, (optionally) tests for protocol and asks
756  * specific classifiers.
757  */
758 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
759 		 struct tcf_result *res, bool compat_mode)
760 {
761 	__be16 protocol = tc_skb_protocol(skb);
762 #ifdef CONFIG_NET_CLS_ACT
763 	const int max_reclassify_loop = 4;
764 	const struct tcf_proto *orig_tp = tp;
765 	const struct tcf_proto *first_tp;
766 	int limit = 0;
767 
768 reclassify:
769 #endif
770 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
771 		int err;
772 
773 		if (tp->protocol != protocol &&
774 		    tp->protocol != htons(ETH_P_ALL))
775 			continue;
776 
777 		err = tp->classify(skb, tp, res);
778 #ifdef CONFIG_NET_CLS_ACT
779 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
780 			first_tp = orig_tp;
781 			goto reset;
782 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
783 			first_tp = res->goto_tp;
784 			goto reset;
785 		}
786 #endif
787 		if (err >= 0)
788 			return err;
789 	}
790 
791 	return TC_ACT_UNSPEC; /* signal: continue lookup */
792 #ifdef CONFIG_NET_CLS_ACT
793 reset:
794 	if (unlikely(limit++ >= max_reclassify_loop)) {
795 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
796 				       tp->chain->block->index,
797 				       tp->prio & 0xffff,
798 				       ntohs(tp->protocol));
799 		return TC_ACT_SHOT;
800 	}
801 
802 	tp = first_tp;
803 	protocol = tc_skb_protocol(skb);
804 	goto reclassify;
805 #endif
806 }
807 EXPORT_SYMBOL(tcf_classify);
808 
809 struct tcf_chain_info {
810 	struct tcf_proto __rcu **pprev;
811 	struct tcf_proto __rcu *next;
812 };
813 
814 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
815 {
816 	return rtnl_dereference(*chain_info->pprev);
817 }
818 
819 static void tcf_chain_tp_insert(struct tcf_chain *chain,
820 				struct tcf_chain_info *chain_info,
821 				struct tcf_proto *tp)
822 {
823 	if (*chain_info->pprev == chain->filter_chain)
824 		tcf_chain_head_change(chain, tp);
825 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
826 	rcu_assign_pointer(*chain_info->pprev, tp);
827 	tcf_chain_hold(chain);
828 }
829 
830 static void tcf_chain_tp_remove(struct tcf_chain *chain,
831 				struct tcf_chain_info *chain_info,
832 				struct tcf_proto *tp)
833 {
834 	struct tcf_proto *next = rtnl_dereference(chain_info->next);
835 
836 	if (tp == chain->filter_chain)
837 		tcf_chain_head_change(chain, next);
838 	RCU_INIT_POINTER(*chain_info->pprev, next);
839 	tcf_chain_put(chain);
840 }
841 
842 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
843 					   struct tcf_chain_info *chain_info,
844 					   u32 protocol, u32 prio,
845 					   bool prio_allocate)
846 {
847 	struct tcf_proto **pprev;
848 	struct tcf_proto *tp;
849 
850 	/* Check the chain for existence of proto-tcf with this priority */
851 	for (pprev = &chain->filter_chain;
852 	     (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
853 		if (tp->prio >= prio) {
854 			if (tp->prio == prio) {
855 				if (prio_allocate ||
856 				    (tp->protocol != protocol && protocol))
857 					return ERR_PTR(-EINVAL);
858 			} else {
859 				tp = NULL;
860 			}
861 			break;
862 		}
863 	}
864 	chain_info->pprev = pprev;
865 	chain_info->next = tp ? tp->next : NULL;
866 	return tp;
867 }
868 
869 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
870 			 struct tcf_proto *tp, struct tcf_block *block,
871 			 struct Qdisc *q, u32 parent, void *fh,
872 			 u32 portid, u32 seq, u16 flags, int event)
873 {
874 	struct tcmsg *tcm;
875 	struct nlmsghdr  *nlh;
876 	unsigned char *b = skb_tail_pointer(skb);
877 
878 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
879 	if (!nlh)
880 		goto out_nlmsg_trim;
881 	tcm = nlmsg_data(nlh);
882 	tcm->tcm_family = AF_UNSPEC;
883 	tcm->tcm__pad1 = 0;
884 	tcm->tcm__pad2 = 0;
885 	if (q) {
886 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
887 		tcm->tcm_parent = parent;
888 	} else {
889 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
890 		tcm->tcm_block_index = block->index;
891 	}
892 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
893 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
894 		goto nla_put_failure;
895 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
896 		goto nla_put_failure;
897 	if (!fh) {
898 		tcm->tcm_handle = 0;
899 	} else {
900 		if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
901 			goto nla_put_failure;
902 	}
903 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
904 	return skb->len;
905 
906 out_nlmsg_trim:
907 nla_put_failure:
908 	nlmsg_trim(skb, b);
909 	return -1;
910 }
911 
912 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
913 			  struct nlmsghdr *n, struct tcf_proto *tp,
914 			  struct tcf_block *block, struct Qdisc *q,
915 			  u32 parent, void *fh, int event, bool unicast)
916 {
917 	struct sk_buff *skb;
918 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
919 
920 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
921 	if (!skb)
922 		return -ENOBUFS;
923 
924 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
925 			  n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
926 		kfree_skb(skb);
927 		return -EINVAL;
928 	}
929 
930 	if (unicast)
931 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
932 
933 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
934 			      n->nlmsg_flags & NLM_F_ECHO);
935 }
936 
937 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
938 			      struct nlmsghdr *n, struct tcf_proto *tp,
939 			      struct tcf_block *block, struct Qdisc *q,
940 			      u32 parent, void *fh, bool unicast, bool *last,
941 			      struct netlink_ext_ack *extack)
942 {
943 	struct sk_buff *skb;
944 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
945 	int err;
946 
947 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
948 	if (!skb)
949 		return -ENOBUFS;
950 
951 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
952 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
953 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
954 		kfree_skb(skb);
955 		return -EINVAL;
956 	}
957 
958 	err = tp->ops->delete(tp, fh, last, extack);
959 	if (err) {
960 		kfree_skb(skb);
961 		return err;
962 	}
963 
964 	if (unicast)
965 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
966 
967 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
968 			     n->nlmsg_flags & NLM_F_ECHO);
969 	if (err < 0)
970 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
971 	return err;
972 }
973 
974 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
975 				 struct tcf_block *block, struct Qdisc *q,
976 				 u32 parent, struct nlmsghdr *n,
977 				 struct tcf_chain *chain, int event)
978 {
979 	struct tcf_proto *tp;
980 
981 	for (tp = rtnl_dereference(chain->filter_chain);
982 	     tp; tp = rtnl_dereference(tp->next))
983 		tfilter_notify(net, oskb, n, tp, block,
984 			       q, parent, 0, event, false);
985 }
986 
987 /* Add/change/delete/get a filter node */
988 
989 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
990 			  struct netlink_ext_ack *extack)
991 {
992 	struct net *net = sock_net(skb->sk);
993 	struct nlattr *tca[TCA_MAX + 1];
994 	struct tcmsg *t;
995 	u32 protocol;
996 	u32 prio;
997 	bool prio_allocate;
998 	u32 parent;
999 	u32 chain_index;
1000 	struct Qdisc *q = NULL;
1001 	struct tcf_chain_info chain_info;
1002 	struct tcf_chain *chain = NULL;
1003 	struct tcf_block *block;
1004 	struct tcf_proto *tp;
1005 	unsigned long cl;
1006 	void *fh;
1007 	int err;
1008 	int tp_created;
1009 
1010 	if ((n->nlmsg_type != RTM_GETTFILTER) &&
1011 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1012 		return -EPERM;
1013 
1014 replay:
1015 	tp_created = 0;
1016 
1017 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
1018 	if (err < 0)
1019 		return err;
1020 
1021 	t = nlmsg_data(n);
1022 	protocol = TC_H_MIN(t->tcm_info);
1023 	prio = TC_H_MAJ(t->tcm_info);
1024 	prio_allocate = false;
1025 	parent = t->tcm_parent;
1026 	cl = 0;
1027 
1028 	if (prio == 0) {
1029 		switch (n->nlmsg_type) {
1030 		case RTM_DELTFILTER:
1031 			if (protocol || t->tcm_handle || tca[TCA_KIND]) {
1032 				NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1033 				return -ENOENT;
1034 			}
1035 			break;
1036 		case RTM_NEWTFILTER:
1037 			/* If no priority is provided by the user,
1038 			 * we allocate one.
1039 			 */
1040 			if (n->nlmsg_flags & NLM_F_CREATE) {
1041 				prio = TC_H_MAKE(0x80000000U, 0U);
1042 				prio_allocate = true;
1043 				break;
1044 			}
1045 			/* fall-through */
1046 		default:
1047 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1048 			return -ENOENT;
1049 		}
1050 	}
1051 
1052 	/* Find head of filter chain. */
1053 
1054 	if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1055 		block = tcf_block_lookup(net, t->tcm_block_index);
1056 		if (!block) {
1057 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1058 			err = -EINVAL;
1059 			goto errout;
1060 		}
1061 	} else {
1062 		const struct Qdisc_class_ops *cops;
1063 		struct net_device *dev;
1064 
1065 		/* Find link */
1066 		dev = __dev_get_by_index(net, t->tcm_ifindex);
1067 		if (!dev)
1068 			return -ENODEV;
1069 
1070 		/* Find qdisc */
1071 		if (!parent) {
1072 			q = dev->qdisc;
1073 			parent = q->handle;
1074 		} else {
1075 			q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
1076 			if (!q) {
1077 				NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1078 				return -EINVAL;
1079 			}
1080 		}
1081 
1082 		/* Is it classful? */
1083 		cops = q->ops->cl_ops;
1084 		if (!cops) {
1085 			NL_SET_ERR_MSG(extack, "Qdisc not classful");
1086 			return -EINVAL;
1087 		}
1088 
1089 		if (!cops->tcf_block) {
1090 			NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1091 			return -EOPNOTSUPP;
1092 		}
1093 
1094 		/* Do we search for filter, attached to class? */
1095 		if (TC_H_MIN(parent)) {
1096 			cl = cops->find(q, parent);
1097 			if (cl == 0) {
1098 				NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1099 				return -ENOENT;
1100 			}
1101 		}
1102 
1103 		/* And the last stroke */
1104 		block = cops->tcf_block(q, cl, extack);
1105 		if (!block) {
1106 			err = -EINVAL;
1107 			goto errout;
1108 		}
1109 		if (tcf_block_shared(block)) {
1110 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1111 			err = -EOPNOTSUPP;
1112 			goto errout;
1113 		}
1114 	}
1115 
1116 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1117 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
1118 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1119 		err = -EINVAL;
1120 		goto errout;
1121 	}
1122 	chain = tcf_chain_get(block, chain_index,
1123 			      n->nlmsg_type == RTM_NEWTFILTER);
1124 	if (!chain) {
1125 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1126 		err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
1127 		goto errout;
1128 	}
1129 
1130 	if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
1131 		tfilter_notify_chain(net, skb, block, q, parent, n,
1132 				     chain, RTM_DELTFILTER);
1133 		tcf_chain_flush(chain);
1134 		err = 0;
1135 		goto errout;
1136 	}
1137 
1138 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1139 			       prio, prio_allocate);
1140 	if (IS_ERR(tp)) {
1141 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1142 		err = PTR_ERR(tp);
1143 		goto errout;
1144 	}
1145 
1146 	if (tp == NULL) {
1147 		/* Proto-tcf does not exist, create new one */
1148 
1149 		if (tca[TCA_KIND] == NULL || !protocol) {
1150 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1151 			err = -EINVAL;
1152 			goto errout;
1153 		}
1154 
1155 		if (n->nlmsg_type != RTM_NEWTFILTER ||
1156 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
1157 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1158 			err = -ENOENT;
1159 			goto errout;
1160 		}
1161 
1162 		if (prio_allocate)
1163 			prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
1164 
1165 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
1166 				      protocol, prio, chain, extack);
1167 		if (IS_ERR(tp)) {
1168 			err = PTR_ERR(tp);
1169 			goto errout;
1170 		}
1171 		tp_created = 1;
1172 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1173 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1174 		err = -EINVAL;
1175 		goto errout;
1176 	}
1177 
1178 	fh = tp->ops->get(tp, t->tcm_handle);
1179 
1180 	if (!fh) {
1181 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
1182 			tcf_chain_tp_remove(chain, &chain_info, tp);
1183 			tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1184 				       RTM_DELTFILTER, false);
1185 			tcf_proto_destroy(tp, extack);
1186 			err = 0;
1187 			goto errout;
1188 		}
1189 
1190 		if (n->nlmsg_type != RTM_NEWTFILTER ||
1191 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
1192 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1193 			err = -ENOENT;
1194 			goto errout;
1195 		}
1196 	} else {
1197 		bool last;
1198 
1199 		switch (n->nlmsg_type) {
1200 		case RTM_NEWTFILTER:
1201 			if (n->nlmsg_flags & NLM_F_EXCL) {
1202 				if (tp_created)
1203 					tcf_proto_destroy(tp, NULL);
1204 				NL_SET_ERR_MSG(extack, "Filter already exists");
1205 				err = -EEXIST;
1206 				goto errout;
1207 			}
1208 			break;
1209 		case RTM_DELTFILTER:
1210 			err = tfilter_del_notify(net, skb, n, tp, block,
1211 						 q, parent, fh, false, &last,
1212 						 extack);
1213 			if (err)
1214 				goto errout;
1215 			if (last) {
1216 				tcf_chain_tp_remove(chain, &chain_info, tp);
1217 				tcf_proto_destroy(tp, extack);
1218 			}
1219 			goto errout;
1220 		case RTM_GETTFILTER:
1221 			err = tfilter_notify(net, skb, n, tp, block, q, parent,
1222 					     fh, RTM_NEWTFILTER, true);
1223 			if (err < 0)
1224 				NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1225 			goto errout;
1226 		default:
1227 			NL_SET_ERR_MSG(extack, "Invalid netlink message type");
1228 			err = -EINVAL;
1229 			goto errout;
1230 		}
1231 	}
1232 
1233 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
1234 			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1235 			      extack);
1236 	if (err == 0) {
1237 		if (tp_created)
1238 			tcf_chain_tp_insert(chain, &chain_info, tp);
1239 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1240 			       RTM_NEWTFILTER, false);
1241 	} else {
1242 		if (tp_created)
1243 			tcf_proto_destroy(tp, NULL);
1244 	}
1245 
1246 errout:
1247 	if (chain)
1248 		tcf_chain_put(chain);
1249 	if (err == -EAGAIN)
1250 		/* Replay the request. */
1251 		goto replay;
1252 	return err;
1253 }
1254 
1255 struct tcf_dump_args {
1256 	struct tcf_walker w;
1257 	struct sk_buff *skb;
1258 	struct netlink_callback *cb;
1259 	struct tcf_block *block;
1260 	struct Qdisc *q;
1261 	u32 parent;
1262 };
1263 
1264 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1265 {
1266 	struct tcf_dump_args *a = (void *)arg;
1267 	struct net *net = sock_net(a->skb->sk);
1268 
1269 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
1270 			     n, NETLINK_CB(a->cb->skb).portid,
1271 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1272 			     RTM_NEWTFILTER);
1273 }
1274 
1275 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1276 			   struct sk_buff *skb, struct netlink_callback *cb,
1277 			   long index_start, long *p_index)
1278 {
1279 	struct net *net = sock_net(skb->sk);
1280 	struct tcf_block *block = chain->block;
1281 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1282 	struct tcf_dump_args arg;
1283 	struct tcf_proto *tp;
1284 
1285 	for (tp = rtnl_dereference(chain->filter_chain);
1286 	     tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1287 		if (*p_index < index_start)
1288 			continue;
1289 		if (TC_H_MAJ(tcm->tcm_info) &&
1290 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
1291 			continue;
1292 		if (TC_H_MIN(tcm->tcm_info) &&
1293 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
1294 			continue;
1295 		if (*p_index > index_start)
1296 			memset(&cb->args[1], 0,
1297 			       sizeof(cb->args) - sizeof(cb->args[0]));
1298 		if (cb->args[1] == 0) {
1299 			if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
1300 					  NETLINK_CB(cb->skb).portid,
1301 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1302 					  RTM_NEWTFILTER) <= 0)
1303 				return false;
1304 
1305 			cb->args[1] = 1;
1306 		}
1307 		if (!tp->ops->walk)
1308 			continue;
1309 		arg.w.fn = tcf_node_dump;
1310 		arg.skb = skb;
1311 		arg.cb = cb;
1312 		arg.block = block;
1313 		arg.q = q;
1314 		arg.parent = parent;
1315 		arg.w.stop = 0;
1316 		arg.w.skip = cb->args[1] - 1;
1317 		arg.w.count = 0;
1318 		tp->ops->walk(tp, &arg.w);
1319 		cb->args[1] = arg.w.count + 1;
1320 		if (arg.w.stop)
1321 			return false;
1322 	}
1323 	return true;
1324 }
1325 
1326 /* called with RTNL */
1327 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1328 {
1329 	struct net *net = sock_net(skb->sk);
1330 	struct nlattr *tca[TCA_MAX + 1];
1331 	struct Qdisc *q = NULL;
1332 	struct tcf_block *block;
1333 	struct tcf_chain *chain;
1334 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1335 	long index_start;
1336 	long index;
1337 	u32 parent;
1338 	int err;
1339 
1340 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1341 		return skb->len;
1342 
1343 	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1344 	if (err)
1345 		return err;
1346 
1347 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1348 		block = tcf_block_lookup(net, tcm->tcm_block_index);
1349 		if (!block)
1350 			goto out;
1351 		/* If we work with block index, q is NULL and parent value
1352 		 * will never be used in the following code. The check
1353 		 * in tcf_fill_node prevents it. However, compiler does not
1354 		 * see that far, so set parent to zero to silence the warning
1355 		 * about parent being uninitialized.
1356 		 */
1357 		parent = 0;
1358 	} else {
1359 		const struct Qdisc_class_ops *cops;
1360 		struct net_device *dev;
1361 		unsigned long cl = 0;
1362 
1363 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1364 		if (!dev)
1365 			return skb->len;
1366 
1367 		parent = tcm->tcm_parent;
1368 		if (!parent) {
1369 			q = dev->qdisc;
1370 			parent = q->handle;
1371 		} else {
1372 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
1373 		}
1374 		if (!q)
1375 			goto out;
1376 		cops = q->ops->cl_ops;
1377 		if (!cops)
1378 			goto out;
1379 		if (!cops->tcf_block)
1380 			goto out;
1381 		if (TC_H_MIN(tcm->tcm_parent)) {
1382 			cl = cops->find(q, tcm->tcm_parent);
1383 			if (cl == 0)
1384 				goto out;
1385 		}
1386 		block = cops->tcf_block(q, cl, NULL);
1387 		if (!block)
1388 			goto out;
1389 		if (tcf_block_shared(block))
1390 			q = NULL;
1391 	}
1392 
1393 	index_start = cb->args[0];
1394 	index = 0;
1395 
1396 	list_for_each_entry(chain, &block->chain_list, list) {
1397 		if (tca[TCA_CHAIN] &&
1398 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
1399 			continue;
1400 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
1401 				    index_start, &index)) {
1402 			err = -EMSGSIZE;
1403 			break;
1404 		}
1405 	}
1406 
1407 	cb->args[0] = index;
1408 
1409 out:
1410 	/* If we did no progress, the error (EMSGSIZE) is real */
1411 	if (skb->len == 0 && err)
1412 		return err;
1413 	return skb->len;
1414 }
1415 
1416 void tcf_exts_destroy(struct tcf_exts *exts)
1417 {
1418 #ifdef CONFIG_NET_CLS_ACT
1419 	LIST_HEAD(actions);
1420 
1421 	ASSERT_RTNL();
1422 	tcf_exts_to_list(exts, &actions);
1423 	tcf_action_destroy(&actions, TCA_ACT_UNBIND);
1424 	kfree(exts->actions);
1425 	exts->nr_actions = 0;
1426 #endif
1427 }
1428 EXPORT_SYMBOL(tcf_exts_destroy);
1429 
1430 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
1431 		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
1432 		      struct netlink_ext_ack *extack)
1433 {
1434 #ifdef CONFIG_NET_CLS_ACT
1435 	{
1436 		struct tc_action *act;
1437 		size_t attr_size = 0;
1438 
1439 		if (exts->police && tb[exts->police]) {
1440 			act = tcf_action_init_1(net, tp, tb[exts->police],
1441 						rate_tlv, "police", ovr,
1442 						TCA_ACT_BIND, extack);
1443 			if (IS_ERR(act))
1444 				return PTR_ERR(act);
1445 
1446 			act->type = exts->type = TCA_OLD_COMPAT;
1447 			exts->actions[0] = act;
1448 			exts->nr_actions = 1;
1449 		} else if (exts->action && tb[exts->action]) {
1450 			LIST_HEAD(actions);
1451 			int err, i = 0;
1452 
1453 			err = tcf_action_init(net, tp, tb[exts->action],
1454 					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
1455 					      &actions, &attr_size, extack);
1456 			if (err)
1457 				return err;
1458 			list_for_each_entry(act, &actions, list)
1459 				exts->actions[i++] = act;
1460 			exts->nr_actions = i;
1461 		}
1462 		exts->net = net;
1463 	}
1464 #else
1465 	if ((exts->action && tb[exts->action]) ||
1466 	    (exts->police && tb[exts->police])) {
1467 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
1468 		return -EOPNOTSUPP;
1469 	}
1470 #endif
1471 
1472 	return 0;
1473 }
1474 EXPORT_SYMBOL(tcf_exts_validate);
1475 
1476 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
1477 {
1478 #ifdef CONFIG_NET_CLS_ACT
1479 	struct tcf_exts old = *dst;
1480 
1481 	*dst = *src;
1482 	tcf_exts_destroy(&old);
1483 #endif
1484 }
1485 EXPORT_SYMBOL(tcf_exts_change);
1486 
1487 #ifdef CONFIG_NET_CLS_ACT
1488 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
1489 {
1490 	if (exts->nr_actions == 0)
1491 		return NULL;
1492 	else
1493 		return exts->actions[0];
1494 }
1495 #endif
1496 
1497 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
1498 {
1499 #ifdef CONFIG_NET_CLS_ACT
1500 	struct nlattr *nest;
1501 
1502 	if (exts->action && tcf_exts_has_actions(exts)) {
1503 		/*
1504 		 * again for backward compatible mode - we want
1505 		 * to work with both old and new modes of entering
1506 		 * tc data even if iproute2  was newer - jhs
1507 		 */
1508 		if (exts->type != TCA_OLD_COMPAT) {
1509 			LIST_HEAD(actions);
1510 
1511 			nest = nla_nest_start(skb, exts->action);
1512 			if (nest == NULL)
1513 				goto nla_put_failure;
1514 
1515 			tcf_exts_to_list(exts, &actions);
1516 			if (tcf_action_dump(skb, &actions, 0, 0) < 0)
1517 				goto nla_put_failure;
1518 			nla_nest_end(skb, nest);
1519 		} else if (exts->police) {
1520 			struct tc_action *act = tcf_exts_first_act(exts);
1521 			nest = nla_nest_start(skb, exts->police);
1522 			if (nest == NULL || !act)
1523 				goto nla_put_failure;
1524 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
1525 				goto nla_put_failure;
1526 			nla_nest_end(skb, nest);
1527 		}
1528 	}
1529 	return 0;
1530 
1531 nla_put_failure:
1532 	nla_nest_cancel(skb, nest);
1533 	return -1;
1534 #else
1535 	return 0;
1536 #endif
1537 }
1538 EXPORT_SYMBOL(tcf_exts_dump);
1539 
1540 
1541 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
1542 {
1543 #ifdef CONFIG_NET_CLS_ACT
1544 	struct tc_action *a = tcf_exts_first_act(exts);
1545 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
1546 		return -1;
1547 #endif
1548 	return 0;
1549 }
1550 EXPORT_SYMBOL(tcf_exts_dump_stats);
1551 
1552 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
1553 				       enum tc_setup_type type,
1554 				       void *type_data, bool err_stop)
1555 {
1556 	int ok_count = 0;
1557 #ifdef CONFIG_NET_CLS_ACT
1558 	const struct tc_action *a;
1559 	struct net_device *dev;
1560 	int i, ret;
1561 
1562 	if (!tcf_exts_has_actions(exts))
1563 		return 0;
1564 
1565 	for (i = 0; i < exts->nr_actions; i++) {
1566 		a = exts->actions[i];
1567 		if (!a->ops->get_dev)
1568 			continue;
1569 		dev = a->ops->get_dev(a);
1570 		if (!dev)
1571 			continue;
1572 		ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
1573 		if (ret < 0)
1574 			return ret;
1575 		ok_count += ret;
1576 	}
1577 #endif
1578 	return ok_count;
1579 }
1580 
1581 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1582 		     enum tc_setup_type type, void *type_data, bool err_stop)
1583 {
1584 	int ok_count;
1585 	int ret;
1586 
1587 	ret = tcf_block_cb_call(block, type, type_data, err_stop);
1588 	if (ret < 0)
1589 		return ret;
1590 	ok_count = ret;
1591 
1592 	if (!exts || ok_count)
1593 		return ok_count;
1594 	ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1595 	if (ret < 0)
1596 		return ret;
1597 	ok_count += ret;
1598 
1599 	return ok_count;
1600 }
1601 EXPORT_SYMBOL(tc_setup_cb_call);
1602 
1603 static __net_init int tcf_net_init(struct net *net)
1604 {
1605 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1606 
1607 	idr_init(&tn->idr);
1608 	return 0;
1609 }
1610 
1611 static void __net_exit tcf_net_exit(struct net *net)
1612 {
1613 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1614 
1615 	idr_destroy(&tn->idr);
1616 }
1617 
1618 static struct pernet_operations tcf_net_ops = {
1619 	.init = tcf_net_init,
1620 	.exit = tcf_net_exit,
1621 	.id   = &tcf_net_id,
1622 	.size = sizeof(struct tcf_net),
1623 };
1624 
1625 static int __init tc_filter_init(void)
1626 {
1627 	int err;
1628 
1629 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1630 	if (!tc_filter_wq)
1631 		return -ENOMEM;
1632 
1633 	err = register_pernet_subsys(&tcf_net_ops);
1634 	if (err)
1635 		goto err_register_pernet_subsys;
1636 
1637 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1638 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1639 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
1640 		      tc_dump_tfilter, 0);
1641 
1642 	return 0;
1643 
1644 err_register_pernet_subsys:
1645 	destroy_workqueue(tc_filter_wq);
1646 	return err;
1647 }
1648 
1649 subsys_initcall(tc_filter_init);
1650