xref: /openbmc/linux/net/sched/cls_api.c (revision 08990494e59d1ee43f02a687042b7b30ca260bad)
1 /*
2  * net/sched/cls_api.c	Packet classifier API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/rhashtable.h>
29 #include <net/net_namespace.h>
30 #include <net/sock.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 
35 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
36 
37 /* The list of all installed classifier types */
38 static LIST_HEAD(tcf_proto_base);
39 
40 /* Protects list of registered TC modules. It is pure SMP lock. */
41 static DEFINE_RWLOCK(cls_mod_lock);
42 
43 /* Find classifier type by string name */
44 
45 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
46 {
47 	const struct tcf_proto_ops *t, *res = NULL;
48 
49 	if (kind) {
50 		read_lock(&cls_mod_lock);
51 		list_for_each_entry(t, &tcf_proto_base, head) {
52 			if (strcmp(kind, t->kind) == 0) {
53 				if (try_module_get(t->owner))
54 					res = t;
55 				break;
56 			}
57 		}
58 		read_unlock(&cls_mod_lock);
59 	}
60 	return res;
61 }
62 
63 static const struct tcf_proto_ops *
64 tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
65 {
66 	const struct tcf_proto_ops *ops;
67 
68 	ops = __tcf_proto_lookup_ops(kind);
69 	if (ops)
70 		return ops;
71 #ifdef CONFIG_MODULES
72 	rtnl_unlock();
73 	request_module("cls_%s", kind);
74 	rtnl_lock();
75 	ops = __tcf_proto_lookup_ops(kind);
76 	/* We dropped the RTNL semaphore in order to perform
77 	 * the module load. So, even if we succeeded in loading
78 	 * the module we have to replay the request. We indicate
79 	 * this using -EAGAIN.
80 	 */
81 	if (ops) {
82 		module_put(ops->owner);
83 		return ERR_PTR(-EAGAIN);
84 	}
85 #endif
86 	NL_SET_ERR_MSG(extack, "TC classifier not found");
87 	return ERR_PTR(-ENOENT);
88 }
89 
90 /* Register(unregister) new classifier type */
91 
92 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
93 {
94 	struct tcf_proto_ops *t;
95 	int rc = -EEXIST;
96 
97 	write_lock(&cls_mod_lock);
98 	list_for_each_entry(t, &tcf_proto_base, head)
99 		if (!strcmp(ops->kind, t->kind))
100 			goto out;
101 
102 	list_add_tail(&ops->head, &tcf_proto_base);
103 	rc = 0;
104 out:
105 	write_unlock(&cls_mod_lock);
106 	return rc;
107 }
108 EXPORT_SYMBOL(register_tcf_proto_ops);
109 
110 static struct workqueue_struct *tc_filter_wq;
111 
112 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
113 {
114 	struct tcf_proto_ops *t;
115 	int rc = -ENOENT;
116 
117 	/* Wait for outstanding call_rcu()s, if any, from a
118 	 * tcf_proto_ops's destroy() handler.
119 	 */
120 	rcu_barrier();
121 	flush_workqueue(tc_filter_wq);
122 
123 	write_lock(&cls_mod_lock);
124 	list_for_each_entry(t, &tcf_proto_base, head) {
125 		if (t == ops) {
126 			list_del(&t->head);
127 			rc = 0;
128 			break;
129 		}
130 	}
131 	write_unlock(&cls_mod_lock);
132 	return rc;
133 }
134 EXPORT_SYMBOL(unregister_tcf_proto_ops);
135 
136 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
137 {
138 	INIT_RCU_WORK(rwork, func);
139 	return queue_rcu_work(tc_filter_wq, rwork);
140 }
141 EXPORT_SYMBOL(tcf_queue_work);
142 
143 /* Select new prio value from the range, managed by kernel. */
144 
145 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
146 {
147 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
148 
149 	if (tp)
150 		first = tp->prio - 1;
151 
152 	return TC_H_MAJ(first);
153 }
154 
155 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
156 					  u32 prio, struct tcf_chain *chain,
157 					  struct netlink_ext_ack *extack)
158 {
159 	struct tcf_proto *tp;
160 	int err;
161 
162 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
163 	if (!tp)
164 		return ERR_PTR(-ENOBUFS);
165 
166 	tp->ops = tcf_proto_lookup_ops(kind, extack);
167 	if (IS_ERR(tp->ops)) {
168 		err = PTR_ERR(tp->ops);
169 		goto errout;
170 	}
171 	tp->classify = tp->ops->classify;
172 	tp->protocol = protocol;
173 	tp->prio = prio;
174 	tp->chain = chain;
175 
176 	err = tp->ops->init(tp);
177 	if (err) {
178 		module_put(tp->ops->owner);
179 		goto errout;
180 	}
181 	return tp;
182 
183 errout:
184 	kfree(tp);
185 	return ERR_PTR(err);
186 }
187 
188 static void tcf_proto_destroy(struct tcf_proto *tp,
189 			      struct netlink_ext_ack *extack)
190 {
191 	tp->ops->destroy(tp, extack);
192 	module_put(tp->ops->owner);
193 	kfree_rcu(tp, rcu);
194 }
195 
196 struct tcf_filter_chain_list_item {
197 	struct list_head list;
198 	tcf_chain_head_change_t *chain_head_change;
199 	void *chain_head_change_priv;
200 };
201 
202 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
203 					  u32 chain_index)
204 {
205 	struct tcf_chain *chain;
206 
207 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
208 	if (!chain)
209 		return NULL;
210 	list_add_tail(&chain->list, &block->chain_list);
211 	chain->block = block;
212 	chain->index = chain_index;
213 	chain->refcnt = 1;
214 	if (!chain->index)
215 		block->chain0.chain = chain;
216 	return chain;
217 }
218 
219 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
220 				       struct tcf_proto *tp_head)
221 {
222 	if (item->chain_head_change)
223 		item->chain_head_change(tp_head, item->chain_head_change_priv);
224 }
225 
226 static void tcf_chain0_head_change(struct tcf_chain *chain,
227 				   struct tcf_proto *tp_head)
228 {
229 	struct tcf_filter_chain_list_item *item;
230 	struct tcf_block *block = chain->block;
231 
232 	if (chain->index)
233 		return;
234 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
235 		tcf_chain_head_change_item(item, tp_head);
236 }
237 
238 static void tcf_chain_destroy(struct tcf_chain *chain)
239 {
240 	struct tcf_block *block = chain->block;
241 
242 	list_del(&chain->list);
243 	if (!chain->index)
244 		block->chain0.chain = NULL;
245 	kfree(chain);
246 	if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
247 		kfree_rcu(block, rcu);
248 }
249 
250 static void tcf_chain_hold(struct tcf_chain *chain)
251 {
252 	++chain->refcnt;
253 }
254 
255 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
256 {
257 	/* In case all the references are action references, this
258 	 * chain should not be shown to the user.
259 	 */
260 	return chain->refcnt == chain->action_refcnt;
261 }
262 
263 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
264 					  u32 chain_index)
265 {
266 	struct tcf_chain *chain;
267 
268 	list_for_each_entry(chain, &block->chain_list, list) {
269 		if (chain->index == chain_index)
270 			return chain;
271 	}
272 	return NULL;
273 }
274 
275 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
276 			   u32 seq, u16 flags, int event, bool unicast);
277 
278 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
279 					 u32 chain_index, bool create,
280 					 bool by_act)
281 {
282 	struct tcf_chain *chain = tcf_chain_lookup(block, chain_index);
283 
284 	if (chain) {
285 		tcf_chain_hold(chain);
286 	} else {
287 		if (!create)
288 			return NULL;
289 		chain = tcf_chain_create(block, chain_index);
290 		if (!chain)
291 			return NULL;
292 	}
293 
294 	if (by_act)
295 		++chain->action_refcnt;
296 
297 	/* Send notification only in case we got the first
298 	 * non-action reference. Until then, the chain acts only as
299 	 * a placeholder for actions pointing to it and user ought
300 	 * not know about them.
301 	 */
302 	if (chain->refcnt - chain->action_refcnt == 1 && !by_act)
303 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
304 				RTM_NEWCHAIN, false);
305 
306 	return chain;
307 }
308 
309 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
310 				       bool create)
311 {
312 	return __tcf_chain_get(block, chain_index, create, false);
313 }
314 
315 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
316 {
317 	return __tcf_chain_get(block, chain_index, true, true);
318 }
319 EXPORT_SYMBOL(tcf_chain_get_by_act);
320 
321 static void tc_chain_tmplt_del(struct tcf_chain *chain);
322 
323 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act)
324 {
325 	if (by_act)
326 		chain->action_refcnt--;
327 	chain->refcnt--;
328 
329 	/* The last dropped non-action reference will trigger notification. */
330 	if (chain->refcnt - chain->action_refcnt == 0 && !by_act)
331 		tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
332 
333 	if (chain->refcnt == 0) {
334 		tc_chain_tmplt_del(chain);
335 		tcf_chain_destroy(chain);
336 	}
337 }
338 
339 static void tcf_chain_put(struct tcf_chain *chain)
340 {
341 	__tcf_chain_put(chain, false);
342 }
343 
344 void tcf_chain_put_by_act(struct tcf_chain *chain)
345 {
346 	__tcf_chain_put(chain, true);
347 }
348 EXPORT_SYMBOL(tcf_chain_put_by_act);
349 
350 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
351 {
352 	if (chain->explicitly_created)
353 		tcf_chain_put(chain);
354 }
355 
356 static void tcf_chain_flush(struct tcf_chain *chain)
357 {
358 	struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
359 
360 	tcf_chain0_head_change(chain, NULL);
361 	while (tp) {
362 		RCU_INIT_POINTER(chain->filter_chain, tp->next);
363 		tcf_proto_destroy(tp, NULL);
364 		tp = rtnl_dereference(chain->filter_chain);
365 		tcf_chain_put(chain);
366 	}
367 }
368 
369 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
370 {
371 	const struct Qdisc_class_ops *cops;
372 	struct Qdisc *qdisc;
373 
374 	if (!dev_ingress_queue(dev))
375 		return NULL;
376 
377 	qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
378 	if (!qdisc)
379 		return NULL;
380 
381 	cops = qdisc->ops->cl_ops;
382 	if (!cops)
383 		return NULL;
384 
385 	if (!cops->tcf_block)
386 		return NULL;
387 
388 	return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
389 }
390 
391 static struct rhashtable indr_setup_block_ht;
392 
393 struct tc_indr_block_dev {
394 	struct rhash_head ht_node;
395 	struct net_device *dev;
396 	unsigned int refcnt;
397 	struct list_head cb_list;
398 	struct tcf_block *block;
399 };
400 
401 struct tc_indr_block_cb {
402 	struct list_head list;
403 	void *cb_priv;
404 	tc_indr_block_bind_cb_t *cb;
405 	void *cb_ident;
406 };
407 
408 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
409 	.key_offset	= offsetof(struct tc_indr_block_dev, dev),
410 	.head_offset	= offsetof(struct tc_indr_block_dev, ht_node),
411 	.key_len	= sizeof(struct net_device *),
412 };
413 
414 static struct tc_indr_block_dev *
415 tc_indr_block_dev_lookup(struct net_device *dev)
416 {
417 	return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
418 				      tc_indr_setup_block_ht_params);
419 }
420 
421 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
422 {
423 	struct tc_indr_block_dev *indr_dev;
424 
425 	indr_dev = tc_indr_block_dev_lookup(dev);
426 	if (indr_dev)
427 		goto inc_ref;
428 
429 	indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
430 	if (!indr_dev)
431 		return NULL;
432 
433 	INIT_LIST_HEAD(&indr_dev->cb_list);
434 	indr_dev->dev = dev;
435 	indr_dev->block = tc_dev_ingress_block(dev);
436 	if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
437 				   tc_indr_setup_block_ht_params)) {
438 		kfree(indr_dev);
439 		return NULL;
440 	}
441 
442 inc_ref:
443 	indr_dev->refcnt++;
444 	return indr_dev;
445 }
446 
447 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
448 {
449 	if (--indr_dev->refcnt)
450 		return;
451 
452 	rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
453 			       tc_indr_setup_block_ht_params);
454 	kfree(indr_dev);
455 }
456 
457 static struct tc_indr_block_cb *
458 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
459 			tc_indr_block_bind_cb_t *cb, void *cb_ident)
460 {
461 	struct tc_indr_block_cb *indr_block_cb;
462 
463 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
464 		if (indr_block_cb->cb == cb &&
465 		    indr_block_cb->cb_ident == cb_ident)
466 			return indr_block_cb;
467 	return NULL;
468 }
469 
470 static struct tc_indr_block_cb *
471 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
472 		     tc_indr_block_bind_cb_t *cb, void *cb_ident)
473 {
474 	struct tc_indr_block_cb *indr_block_cb;
475 
476 	indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
477 	if (indr_block_cb)
478 		return ERR_PTR(-EEXIST);
479 
480 	indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
481 	if (!indr_block_cb)
482 		return ERR_PTR(-ENOMEM);
483 
484 	indr_block_cb->cb_priv = cb_priv;
485 	indr_block_cb->cb = cb;
486 	indr_block_cb->cb_ident = cb_ident;
487 	list_add(&indr_block_cb->list, &indr_dev->cb_list);
488 
489 	return indr_block_cb;
490 }
491 
492 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
493 {
494 	list_del(&indr_block_cb->list);
495 	kfree(indr_block_cb);
496 }
497 
498 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
499 				  struct tc_indr_block_cb *indr_block_cb,
500 				  enum tc_block_command command)
501 {
502 	struct tc_block_offload bo = {
503 		.command	= command,
504 		.binder_type	= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
505 		.block		= indr_dev->block,
506 	};
507 
508 	if (!indr_dev->block)
509 		return;
510 
511 	indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
512 			  &bo);
513 }
514 
515 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
516 				tc_indr_block_bind_cb_t *cb, void *cb_ident)
517 {
518 	struct tc_indr_block_cb *indr_block_cb;
519 	struct tc_indr_block_dev *indr_dev;
520 	int err;
521 
522 	indr_dev = tc_indr_block_dev_get(dev);
523 	if (!indr_dev)
524 		return -ENOMEM;
525 
526 	indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
527 	err = PTR_ERR_OR_ZERO(indr_block_cb);
528 	if (err)
529 		goto err_dev_put;
530 
531 	tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
532 	return 0;
533 
534 err_dev_put:
535 	tc_indr_block_dev_put(indr_dev);
536 	return err;
537 }
538 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
539 
540 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
541 			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
542 {
543 	int err;
544 
545 	rtnl_lock();
546 	err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
547 	rtnl_unlock();
548 
549 	return err;
550 }
551 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
552 
553 void __tc_indr_block_cb_unregister(struct net_device *dev,
554 				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
555 {
556 	struct tc_indr_block_cb *indr_block_cb;
557 	struct tc_indr_block_dev *indr_dev;
558 
559 	indr_dev = tc_indr_block_dev_lookup(dev);
560 	if (!indr_dev)
561 		return;
562 
563 	indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
564 	if (!indr_block_cb)
565 		return;
566 
567 	/* Send unbind message if required to free any block cbs. */
568 	tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
569 	tc_indr_block_cb_del(indr_block_cb);
570 	tc_indr_block_dev_put(indr_dev);
571 }
572 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
573 
574 void tc_indr_block_cb_unregister(struct net_device *dev,
575 				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
576 {
577 	rtnl_lock();
578 	__tc_indr_block_cb_unregister(dev, cb, cb_ident);
579 	rtnl_unlock();
580 }
581 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
582 
583 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
584 			       struct tcf_block_ext_info *ei,
585 			       enum tc_block_command command,
586 			       struct netlink_ext_ack *extack)
587 {
588 	struct tc_indr_block_cb *indr_block_cb;
589 	struct tc_indr_block_dev *indr_dev;
590 	struct tc_block_offload bo = {
591 		.command	= command,
592 		.binder_type	= ei->binder_type,
593 		.block		= block,
594 		.extack		= extack,
595 	};
596 
597 	indr_dev = tc_indr_block_dev_lookup(dev);
598 	if (!indr_dev)
599 		return;
600 
601 	indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
602 
603 	list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
604 		indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
605 				  &bo);
606 }
607 
608 static bool tcf_block_offload_in_use(struct tcf_block *block)
609 {
610 	return block->offloadcnt;
611 }
612 
613 static int tcf_block_offload_cmd(struct tcf_block *block,
614 				 struct net_device *dev,
615 				 struct tcf_block_ext_info *ei,
616 				 enum tc_block_command command,
617 				 struct netlink_ext_ack *extack)
618 {
619 	struct tc_block_offload bo = {};
620 
621 	bo.command = command;
622 	bo.binder_type = ei->binder_type;
623 	bo.block = block;
624 	bo.extack = extack;
625 	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
626 }
627 
628 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
629 				  struct tcf_block_ext_info *ei,
630 				  struct netlink_ext_ack *extack)
631 {
632 	struct net_device *dev = q->dev_queue->dev;
633 	int err;
634 
635 	if (!dev->netdev_ops->ndo_setup_tc)
636 		goto no_offload_dev_inc;
637 
638 	/* If tc offload feature is disabled and the block we try to bind
639 	 * to already has some offloaded filters, forbid to bind.
640 	 */
641 	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
642 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
643 		return -EOPNOTSUPP;
644 	}
645 
646 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
647 	if (err == -EOPNOTSUPP)
648 		goto no_offload_dev_inc;
649 	if (err)
650 		return err;
651 
652 	tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
653 	return 0;
654 
655 no_offload_dev_inc:
656 	if (tcf_block_offload_in_use(block))
657 		return -EOPNOTSUPP;
658 	block->nooffloaddevcnt++;
659 	tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
660 	return 0;
661 }
662 
663 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
664 				     struct tcf_block_ext_info *ei)
665 {
666 	struct net_device *dev = q->dev_queue->dev;
667 	int err;
668 
669 	tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
670 
671 	if (!dev->netdev_ops->ndo_setup_tc)
672 		goto no_offload_dev_dec;
673 	err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
674 	if (err == -EOPNOTSUPP)
675 		goto no_offload_dev_dec;
676 	return;
677 
678 no_offload_dev_dec:
679 	WARN_ON(block->nooffloaddevcnt-- == 0);
680 }
681 
682 static int
683 tcf_chain0_head_change_cb_add(struct tcf_block *block,
684 			      struct tcf_block_ext_info *ei,
685 			      struct netlink_ext_ack *extack)
686 {
687 	struct tcf_chain *chain0 = block->chain0.chain;
688 	struct tcf_filter_chain_list_item *item;
689 
690 	item = kmalloc(sizeof(*item), GFP_KERNEL);
691 	if (!item) {
692 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
693 		return -ENOMEM;
694 	}
695 	item->chain_head_change = ei->chain_head_change;
696 	item->chain_head_change_priv = ei->chain_head_change_priv;
697 	if (chain0 && chain0->filter_chain)
698 		tcf_chain_head_change_item(item, chain0->filter_chain);
699 	list_add(&item->list, &block->chain0.filter_chain_list);
700 	return 0;
701 }
702 
703 static void
704 tcf_chain0_head_change_cb_del(struct tcf_block *block,
705 			      struct tcf_block_ext_info *ei)
706 {
707 	struct tcf_chain *chain0 = block->chain0.chain;
708 	struct tcf_filter_chain_list_item *item;
709 
710 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
711 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
712 		    (item->chain_head_change == ei->chain_head_change &&
713 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
714 			if (chain0)
715 				tcf_chain_head_change_item(item, NULL);
716 			list_del(&item->list);
717 			kfree(item);
718 			return;
719 		}
720 	}
721 	WARN_ON(1);
722 }
723 
724 struct tcf_net {
725 	spinlock_t idr_lock; /* Protects idr */
726 	struct idr idr;
727 };
728 
729 static unsigned int tcf_net_id;
730 
731 static int tcf_block_insert(struct tcf_block *block, struct net *net,
732 			    struct netlink_ext_ack *extack)
733 {
734 	struct tcf_net *tn = net_generic(net, tcf_net_id);
735 	int err;
736 
737 	idr_preload(GFP_KERNEL);
738 	spin_lock(&tn->idr_lock);
739 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
740 			    GFP_NOWAIT);
741 	spin_unlock(&tn->idr_lock);
742 	idr_preload_end();
743 
744 	return err;
745 }
746 
747 static void tcf_block_remove(struct tcf_block *block, struct net *net)
748 {
749 	struct tcf_net *tn = net_generic(net, tcf_net_id);
750 
751 	spin_lock(&tn->idr_lock);
752 	idr_remove(&tn->idr, block->index);
753 	spin_unlock(&tn->idr_lock);
754 }
755 
756 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
757 					  u32 block_index,
758 					  struct netlink_ext_ack *extack)
759 {
760 	struct tcf_block *block;
761 
762 	block = kzalloc(sizeof(*block), GFP_KERNEL);
763 	if (!block) {
764 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
765 		return ERR_PTR(-ENOMEM);
766 	}
767 	INIT_LIST_HEAD(&block->chain_list);
768 	INIT_LIST_HEAD(&block->cb_list);
769 	INIT_LIST_HEAD(&block->owner_list);
770 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
771 
772 	refcount_set(&block->refcnt, 1);
773 	block->net = net;
774 	block->index = block_index;
775 
776 	/* Don't store q pointer for blocks which are shared */
777 	if (!tcf_block_shared(block))
778 		block->q = q;
779 	return block;
780 }
781 
782 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
783 {
784 	struct tcf_net *tn = net_generic(net, tcf_net_id);
785 
786 	return idr_find(&tn->idr, block_index);
787 }
788 
789 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
790 {
791 	struct tcf_block *block;
792 
793 	rcu_read_lock();
794 	block = tcf_block_lookup(net, block_index);
795 	if (block && !refcount_inc_not_zero(&block->refcnt))
796 		block = NULL;
797 	rcu_read_unlock();
798 
799 	return block;
800 }
801 
802 static void tcf_block_flush_all_chains(struct tcf_block *block)
803 {
804 	struct tcf_chain *chain;
805 
806 	/* Hold a refcnt for all chains, so that they don't disappear
807 	 * while we are iterating.
808 	 */
809 	list_for_each_entry(chain, &block->chain_list, list)
810 		tcf_chain_hold(chain);
811 
812 	list_for_each_entry(chain, &block->chain_list, list)
813 		tcf_chain_flush(chain);
814 }
815 
816 static void tcf_block_put_all_chains(struct tcf_block *block)
817 {
818 	struct tcf_chain *chain, *tmp;
819 
820 	/* At this point, all the chains should have refcnt >= 1. */
821 	list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
822 		tcf_chain_put_explicitly_created(chain);
823 		tcf_chain_put(chain);
824 	}
825 }
826 
827 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
828 			    struct tcf_block_ext_info *ei)
829 {
830 	if (refcount_dec_and_test(&block->refcnt)) {
831 		/* Flushing/putting all chains will cause the block to be
832 		 * deallocated when last chain is freed. However, if chain_list
833 		 * is empty, block has to be manually deallocated. After block
834 		 * reference counter reached 0, it is no longer possible to
835 		 * increment it or add new chains to block.
836 		 */
837 		bool free_block = list_empty(&block->chain_list);
838 
839 		if (tcf_block_shared(block))
840 			tcf_block_remove(block, block->net);
841 		if (!free_block)
842 			tcf_block_flush_all_chains(block);
843 
844 		if (q)
845 			tcf_block_offload_unbind(block, q, ei);
846 
847 		if (free_block)
848 			kfree_rcu(block, rcu);
849 		else
850 			tcf_block_put_all_chains(block);
851 	} else if (q) {
852 		tcf_block_offload_unbind(block, q, ei);
853 	}
854 }
855 
856 static void tcf_block_refcnt_put(struct tcf_block *block)
857 {
858 	__tcf_block_put(block, NULL, NULL);
859 }
860 
861 /* Find tcf block.
862  * Set q, parent, cl when appropriate.
863  */
864 
865 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
866 					u32 *parent, unsigned long *cl,
867 					int ifindex, u32 block_index,
868 					struct netlink_ext_ack *extack)
869 {
870 	struct tcf_block *block;
871 	int err = 0;
872 
873 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
874 		block = tcf_block_refcnt_get(net, block_index);
875 		if (!block) {
876 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
877 			return ERR_PTR(-EINVAL);
878 		}
879 	} else {
880 		const struct Qdisc_class_ops *cops;
881 		struct net_device *dev;
882 
883 		rcu_read_lock();
884 
885 		/* Find link */
886 		dev = dev_get_by_index_rcu(net, ifindex);
887 		if (!dev) {
888 			rcu_read_unlock();
889 			return ERR_PTR(-ENODEV);
890 		}
891 
892 		/* Find qdisc */
893 		if (!*parent) {
894 			*q = dev->qdisc;
895 			*parent = (*q)->handle;
896 		} else {
897 			*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
898 			if (!*q) {
899 				NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
900 				err = -EINVAL;
901 				goto errout_rcu;
902 			}
903 		}
904 
905 		*q = qdisc_refcount_inc_nz(*q);
906 		if (!*q) {
907 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
908 			err = -EINVAL;
909 			goto errout_rcu;
910 		}
911 
912 		/* Is it classful? */
913 		cops = (*q)->ops->cl_ops;
914 		if (!cops) {
915 			NL_SET_ERR_MSG(extack, "Qdisc not classful");
916 			err = -EINVAL;
917 			goto errout_rcu;
918 		}
919 
920 		if (!cops->tcf_block) {
921 			NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
922 			err = -EOPNOTSUPP;
923 			goto errout_rcu;
924 		}
925 
926 		/* At this point we know that qdisc is not noop_qdisc,
927 		 * which means that qdisc holds a reference to net_device
928 		 * and we hold a reference to qdisc, so it is safe to release
929 		 * rcu read lock.
930 		 */
931 		rcu_read_unlock();
932 
933 		/* Do we search for filter, attached to class? */
934 		if (TC_H_MIN(*parent)) {
935 			*cl = cops->find(*q, *parent);
936 			if (*cl == 0) {
937 				NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
938 				err = -ENOENT;
939 				goto errout_qdisc;
940 			}
941 		}
942 
943 		/* And the last stroke */
944 		block = cops->tcf_block(*q, *cl, extack);
945 		if (!block) {
946 			err = -EINVAL;
947 			goto errout_qdisc;
948 		}
949 		if (tcf_block_shared(block)) {
950 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
951 			err = -EOPNOTSUPP;
952 			goto errout_qdisc;
953 		}
954 
955 		/* Always take reference to block in order to support execution
956 		 * of rules update path of cls API without rtnl lock. Caller
957 		 * must release block when it is finished using it. 'if' block
958 		 * of this conditional obtain reference to block by calling
959 		 * tcf_block_refcnt_get().
960 		 */
961 		refcount_inc(&block->refcnt);
962 	}
963 
964 	return block;
965 
966 errout_rcu:
967 	rcu_read_unlock();
968 errout_qdisc:
969 	if (*q) {
970 		qdisc_put(*q);
971 		*q = NULL;
972 	}
973 	return ERR_PTR(err);
974 }
975 
976 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
977 {
978 	if (!IS_ERR_OR_NULL(block))
979 		tcf_block_refcnt_put(block);
980 
981 	if (q)
982 		qdisc_put(q);
983 }
984 
985 struct tcf_block_owner_item {
986 	struct list_head list;
987 	struct Qdisc *q;
988 	enum tcf_block_binder_type binder_type;
989 };
990 
991 static void
992 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
993 			       struct Qdisc *q,
994 			       enum tcf_block_binder_type binder_type)
995 {
996 	if (block->keep_dst &&
997 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
998 	    binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
999 		netif_keep_dst(qdisc_dev(q));
1000 }
1001 
1002 void tcf_block_netif_keep_dst(struct tcf_block *block)
1003 {
1004 	struct tcf_block_owner_item *item;
1005 
1006 	block->keep_dst = true;
1007 	list_for_each_entry(item, &block->owner_list, list)
1008 		tcf_block_owner_netif_keep_dst(block, item->q,
1009 					       item->binder_type);
1010 }
1011 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1012 
1013 static int tcf_block_owner_add(struct tcf_block *block,
1014 			       struct Qdisc *q,
1015 			       enum tcf_block_binder_type binder_type)
1016 {
1017 	struct tcf_block_owner_item *item;
1018 
1019 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1020 	if (!item)
1021 		return -ENOMEM;
1022 	item->q = q;
1023 	item->binder_type = binder_type;
1024 	list_add(&item->list, &block->owner_list);
1025 	return 0;
1026 }
1027 
1028 static void tcf_block_owner_del(struct tcf_block *block,
1029 				struct Qdisc *q,
1030 				enum tcf_block_binder_type binder_type)
1031 {
1032 	struct tcf_block_owner_item *item;
1033 
1034 	list_for_each_entry(item, &block->owner_list, list) {
1035 		if (item->q == q && item->binder_type == binder_type) {
1036 			list_del(&item->list);
1037 			kfree(item);
1038 			return;
1039 		}
1040 	}
1041 	WARN_ON(1);
1042 }
1043 
1044 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1045 		      struct tcf_block_ext_info *ei,
1046 		      struct netlink_ext_ack *extack)
1047 {
1048 	struct net *net = qdisc_net(q);
1049 	struct tcf_block *block = NULL;
1050 	int err;
1051 
1052 	if (ei->block_index)
1053 		/* block_index not 0 means the shared block is requested */
1054 		block = tcf_block_refcnt_get(net, ei->block_index);
1055 
1056 	if (!block) {
1057 		block = tcf_block_create(net, q, ei->block_index, extack);
1058 		if (IS_ERR(block))
1059 			return PTR_ERR(block);
1060 		if (tcf_block_shared(block)) {
1061 			err = tcf_block_insert(block, net, extack);
1062 			if (err)
1063 				goto err_block_insert;
1064 		}
1065 	}
1066 
1067 	err = tcf_block_owner_add(block, q, ei->binder_type);
1068 	if (err)
1069 		goto err_block_owner_add;
1070 
1071 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1072 
1073 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1074 	if (err)
1075 		goto err_chain0_head_change_cb_add;
1076 
1077 	err = tcf_block_offload_bind(block, q, ei, extack);
1078 	if (err)
1079 		goto err_block_offload_bind;
1080 
1081 	*p_block = block;
1082 	return 0;
1083 
1084 err_block_offload_bind:
1085 	tcf_chain0_head_change_cb_del(block, ei);
1086 err_chain0_head_change_cb_add:
1087 	tcf_block_owner_del(block, q, ei->binder_type);
1088 err_block_owner_add:
1089 err_block_insert:
1090 	tcf_block_refcnt_put(block);
1091 	return err;
1092 }
1093 EXPORT_SYMBOL(tcf_block_get_ext);
1094 
1095 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1096 {
1097 	struct tcf_proto __rcu **p_filter_chain = priv;
1098 
1099 	rcu_assign_pointer(*p_filter_chain, tp_head);
1100 }
1101 
1102 int tcf_block_get(struct tcf_block **p_block,
1103 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1104 		  struct netlink_ext_ack *extack)
1105 {
1106 	struct tcf_block_ext_info ei = {
1107 		.chain_head_change = tcf_chain_head_change_dflt,
1108 		.chain_head_change_priv = p_filter_chain,
1109 	};
1110 
1111 	WARN_ON(!p_filter_chain);
1112 	return tcf_block_get_ext(p_block, q, &ei, extack);
1113 }
1114 EXPORT_SYMBOL(tcf_block_get);
1115 
1116 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1117  * actions should be all removed after flushing.
1118  */
1119 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1120 		       struct tcf_block_ext_info *ei)
1121 {
1122 	if (!block)
1123 		return;
1124 	tcf_chain0_head_change_cb_del(block, ei);
1125 	tcf_block_owner_del(block, q, ei->binder_type);
1126 
1127 	__tcf_block_put(block, q, ei);
1128 }
1129 EXPORT_SYMBOL(tcf_block_put_ext);
1130 
1131 void tcf_block_put(struct tcf_block *block)
1132 {
1133 	struct tcf_block_ext_info ei = {0, };
1134 
1135 	if (!block)
1136 		return;
1137 	tcf_block_put_ext(block, block->q, &ei);
1138 }
1139 
1140 EXPORT_SYMBOL(tcf_block_put);
1141 
1142 struct tcf_block_cb {
1143 	struct list_head list;
1144 	tc_setup_cb_t *cb;
1145 	void *cb_ident;
1146 	void *cb_priv;
1147 	unsigned int refcnt;
1148 };
1149 
1150 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
1151 {
1152 	return block_cb->cb_priv;
1153 }
1154 EXPORT_SYMBOL(tcf_block_cb_priv);
1155 
1156 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
1157 					 tc_setup_cb_t *cb, void *cb_ident)
1158 {	struct tcf_block_cb *block_cb;
1159 
1160 	list_for_each_entry(block_cb, &block->cb_list, list)
1161 		if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
1162 			return block_cb;
1163 	return NULL;
1164 }
1165 EXPORT_SYMBOL(tcf_block_cb_lookup);
1166 
1167 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
1168 {
1169 	block_cb->refcnt++;
1170 }
1171 EXPORT_SYMBOL(tcf_block_cb_incref);
1172 
1173 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
1174 {
1175 	return --block_cb->refcnt;
1176 }
1177 EXPORT_SYMBOL(tcf_block_cb_decref);
1178 
1179 static int
1180 tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1181 			    void *cb_priv, bool add, bool offload_in_use,
1182 			    struct netlink_ext_ack *extack)
1183 {
1184 	struct tcf_chain *chain;
1185 	struct tcf_proto *tp;
1186 	int err;
1187 
1188 	list_for_each_entry(chain, &block->chain_list, list) {
1189 		for (tp = rtnl_dereference(chain->filter_chain); tp;
1190 		     tp = rtnl_dereference(tp->next)) {
1191 			if (tp->ops->reoffload) {
1192 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1193 							 extack);
1194 				if (err && add)
1195 					goto err_playback_remove;
1196 			} else if (add && offload_in_use) {
1197 				err = -EOPNOTSUPP;
1198 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1199 				goto err_playback_remove;
1200 			}
1201 		}
1202 	}
1203 
1204 	return 0;
1205 
1206 err_playback_remove:
1207 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1208 				    extack);
1209 	return err;
1210 }
1211 
1212 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
1213 					     tc_setup_cb_t *cb, void *cb_ident,
1214 					     void *cb_priv,
1215 					     struct netlink_ext_ack *extack)
1216 {
1217 	struct tcf_block_cb *block_cb;
1218 	int err;
1219 
1220 	/* Replay any already present rules */
1221 	err = tcf_block_playback_offloads(block, cb, cb_priv, true,
1222 					  tcf_block_offload_in_use(block),
1223 					  extack);
1224 	if (err)
1225 		return ERR_PTR(err);
1226 
1227 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
1228 	if (!block_cb)
1229 		return ERR_PTR(-ENOMEM);
1230 	block_cb->cb = cb;
1231 	block_cb->cb_ident = cb_ident;
1232 	block_cb->cb_priv = cb_priv;
1233 	list_add(&block_cb->list, &block->cb_list);
1234 	return block_cb;
1235 }
1236 EXPORT_SYMBOL(__tcf_block_cb_register);
1237 
1238 int tcf_block_cb_register(struct tcf_block *block,
1239 			  tc_setup_cb_t *cb, void *cb_ident,
1240 			  void *cb_priv, struct netlink_ext_ack *extack)
1241 {
1242 	struct tcf_block_cb *block_cb;
1243 
1244 	block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
1245 					   extack);
1246 	return PTR_ERR_OR_ZERO(block_cb);
1247 }
1248 EXPORT_SYMBOL(tcf_block_cb_register);
1249 
1250 void __tcf_block_cb_unregister(struct tcf_block *block,
1251 			       struct tcf_block_cb *block_cb)
1252 {
1253 	tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
1254 				    false, tcf_block_offload_in_use(block),
1255 				    NULL);
1256 	list_del(&block_cb->list);
1257 	kfree(block_cb);
1258 }
1259 EXPORT_SYMBOL(__tcf_block_cb_unregister);
1260 
1261 void tcf_block_cb_unregister(struct tcf_block *block,
1262 			     tc_setup_cb_t *cb, void *cb_ident)
1263 {
1264 	struct tcf_block_cb *block_cb;
1265 
1266 	block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
1267 	if (!block_cb)
1268 		return;
1269 	__tcf_block_cb_unregister(block, block_cb);
1270 }
1271 EXPORT_SYMBOL(tcf_block_cb_unregister);
1272 
1273 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
1274 			     void *type_data, bool err_stop)
1275 {
1276 	struct tcf_block_cb *block_cb;
1277 	int ok_count = 0;
1278 	int err;
1279 
1280 	/* Make sure all netdevs sharing this block are offload-capable. */
1281 	if (block->nooffloaddevcnt && err_stop)
1282 		return -EOPNOTSUPP;
1283 
1284 	list_for_each_entry(block_cb, &block->cb_list, list) {
1285 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
1286 		if (err) {
1287 			if (err_stop)
1288 				return err;
1289 		} else {
1290 			ok_count++;
1291 		}
1292 	}
1293 	return ok_count;
1294 }
1295 
1296 /* Main classifier routine: scans classifier chain attached
1297  * to this qdisc, (optionally) tests for protocol and asks
1298  * specific classifiers.
1299  */
1300 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1301 		 struct tcf_result *res, bool compat_mode)
1302 {
1303 	__be16 protocol = tc_skb_protocol(skb);
1304 #ifdef CONFIG_NET_CLS_ACT
1305 	const int max_reclassify_loop = 4;
1306 	const struct tcf_proto *orig_tp = tp;
1307 	const struct tcf_proto *first_tp;
1308 	int limit = 0;
1309 
1310 reclassify:
1311 #endif
1312 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1313 		int err;
1314 
1315 		if (tp->protocol != protocol &&
1316 		    tp->protocol != htons(ETH_P_ALL))
1317 			continue;
1318 
1319 		err = tp->classify(skb, tp, res);
1320 #ifdef CONFIG_NET_CLS_ACT
1321 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1322 			first_tp = orig_tp;
1323 			goto reset;
1324 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1325 			first_tp = res->goto_tp;
1326 			goto reset;
1327 		}
1328 #endif
1329 		if (err >= 0)
1330 			return err;
1331 	}
1332 
1333 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1334 #ifdef CONFIG_NET_CLS_ACT
1335 reset:
1336 	if (unlikely(limit++ >= max_reclassify_loop)) {
1337 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1338 				       tp->chain->block->index,
1339 				       tp->prio & 0xffff,
1340 				       ntohs(tp->protocol));
1341 		return TC_ACT_SHOT;
1342 	}
1343 
1344 	tp = first_tp;
1345 	protocol = tc_skb_protocol(skb);
1346 	goto reclassify;
1347 #endif
1348 }
1349 EXPORT_SYMBOL(tcf_classify);
1350 
1351 struct tcf_chain_info {
1352 	struct tcf_proto __rcu **pprev;
1353 	struct tcf_proto __rcu *next;
1354 };
1355 
1356 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
1357 {
1358 	return rtnl_dereference(*chain_info->pprev);
1359 }
1360 
1361 static void tcf_chain_tp_insert(struct tcf_chain *chain,
1362 				struct tcf_chain_info *chain_info,
1363 				struct tcf_proto *tp)
1364 {
1365 	if (*chain_info->pprev == chain->filter_chain)
1366 		tcf_chain0_head_change(chain, tp);
1367 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
1368 	rcu_assign_pointer(*chain_info->pprev, tp);
1369 	tcf_chain_hold(chain);
1370 }
1371 
1372 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1373 				struct tcf_chain_info *chain_info,
1374 				struct tcf_proto *tp)
1375 {
1376 	struct tcf_proto *next = rtnl_dereference(chain_info->next);
1377 
1378 	if (tp == chain->filter_chain)
1379 		tcf_chain0_head_change(chain, next);
1380 	RCU_INIT_POINTER(*chain_info->pprev, next);
1381 	tcf_chain_put(chain);
1382 }
1383 
1384 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1385 					   struct tcf_chain_info *chain_info,
1386 					   u32 protocol, u32 prio,
1387 					   bool prio_allocate)
1388 {
1389 	struct tcf_proto **pprev;
1390 	struct tcf_proto *tp;
1391 
1392 	/* Check the chain for existence of proto-tcf with this priority */
1393 	for (pprev = &chain->filter_chain;
1394 	     (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
1395 		if (tp->prio >= prio) {
1396 			if (tp->prio == prio) {
1397 				if (prio_allocate ||
1398 				    (tp->protocol != protocol && protocol))
1399 					return ERR_PTR(-EINVAL);
1400 			} else {
1401 				tp = NULL;
1402 			}
1403 			break;
1404 		}
1405 	}
1406 	chain_info->pprev = pprev;
1407 	chain_info->next = tp ? tp->next : NULL;
1408 	return tp;
1409 }
1410 
1411 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1412 			 struct tcf_proto *tp, struct tcf_block *block,
1413 			 struct Qdisc *q, u32 parent, void *fh,
1414 			 u32 portid, u32 seq, u16 flags, int event)
1415 {
1416 	struct tcmsg *tcm;
1417 	struct nlmsghdr  *nlh;
1418 	unsigned char *b = skb_tail_pointer(skb);
1419 
1420 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1421 	if (!nlh)
1422 		goto out_nlmsg_trim;
1423 	tcm = nlmsg_data(nlh);
1424 	tcm->tcm_family = AF_UNSPEC;
1425 	tcm->tcm__pad1 = 0;
1426 	tcm->tcm__pad2 = 0;
1427 	if (q) {
1428 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1429 		tcm->tcm_parent = parent;
1430 	} else {
1431 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1432 		tcm->tcm_block_index = block->index;
1433 	}
1434 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1435 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1436 		goto nla_put_failure;
1437 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1438 		goto nla_put_failure;
1439 	if (!fh) {
1440 		tcm->tcm_handle = 0;
1441 	} else {
1442 		if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
1443 			goto nla_put_failure;
1444 	}
1445 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1446 	return skb->len;
1447 
1448 out_nlmsg_trim:
1449 nla_put_failure:
1450 	nlmsg_trim(skb, b);
1451 	return -1;
1452 }
1453 
1454 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1455 			  struct nlmsghdr *n, struct tcf_proto *tp,
1456 			  struct tcf_block *block, struct Qdisc *q,
1457 			  u32 parent, void *fh, int event, bool unicast)
1458 {
1459 	struct sk_buff *skb;
1460 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1461 
1462 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1463 	if (!skb)
1464 		return -ENOBUFS;
1465 
1466 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1467 			  n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
1468 		kfree_skb(skb);
1469 		return -EINVAL;
1470 	}
1471 
1472 	if (unicast)
1473 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1474 
1475 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1476 			      n->nlmsg_flags & NLM_F_ECHO);
1477 }
1478 
1479 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1480 			      struct nlmsghdr *n, struct tcf_proto *tp,
1481 			      struct tcf_block *block, struct Qdisc *q,
1482 			      u32 parent, void *fh, bool unicast, bool *last,
1483 			      struct netlink_ext_ack *extack)
1484 {
1485 	struct sk_buff *skb;
1486 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1487 	int err;
1488 
1489 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1490 	if (!skb)
1491 		return -ENOBUFS;
1492 
1493 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1494 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
1495 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1496 		kfree_skb(skb);
1497 		return -EINVAL;
1498 	}
1499 
1500 	err = tp->ops->delete(tp, fh, last, extack);
1501 	if (err) {
1502 		kfree_skb(skb);
1503 		return err;
1504 	}
1505 
1506 	if (unicast)
1507 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1508 
1509 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1510 			     n->nlmsg_flags & NLM_F_ECHO);
1511 	if (err < 0)
1512 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1513 	return err;
1514 }
1515 
1516 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1517 				 struct tcf_block *block, struct Qdisc *q,
1518 				 u32 parent, struct nlmsghdr *n,
1519 				 struct tcf_chain *chain, int event)
1520 {
1521 	struct tcf_proto *tp;
1522 
1523 	for (tp = rtnl_dereference(chain->filter_chain);
1524 	     tp; tp = rtnl_dereference(tp->next))
1525 		tfilter_notify(net, oskb, n, tp, block,
1526 			       q, parent, NULL, event, false);
1527 }
1528 
1529 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1530 			  struct netlink_ext_ack *extack)
1531 {
1532 	struct net *net = sock_net(skb->sk);
1533 	struct nlattr *tca[TCA_MAX + 1];
1534 	struct tcmsg *t;
1535 	u32 protocol;
1536 	u32 prio;
1537 	bool prio_allocate;
1538 	u32 parent;
1539 	u32 chain_index;
1540 	struct Qdisc *q = NULL;
1541 	struct tcf_chain_info chain_info;
1542 	struct tcf_chain *chain = NULL;
1543 	struct tcf_block *block;
1544 	struct tcf_proto *tp;
1545 	unsigned long cl;
1546 	void *fh;
1547 	int err;
1548 	int tp_created;
1549 
1550 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1551 		return -EPERM;
1552 
1553 replay:
1554 	tp_created = 0;
1555 
1556 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1557 	if (err < 0)
1558 		return err;
1559 
1560 	t = nlmsg_data(n);
1561 	protocol = TC_H_MIN(t->tcm_info);
1562 	prio = TC_H_MAJ(t->tcm_info);
1563 	prio_allocate = false;
1564 	parent = t->tcm_parent;
1565 	cl = 0;
1566 
1567 	if (prio == 0) {
1568 		/* If no priority is provided by the user,
1569 		 * we allocate one.
1570 		 */
1571 		if (n->nlmsg_flags & NLM_F_CREATE) {
1572 			prio = TC_H_MAKE(0x80000000U, 0U);
1573 			prio_allocate = true;
1574 		} else {
1575 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1576 			return -ENOENT;
1577 		}
1578 	}
1579 
1580 	/* Find head of filter chain. */
1581 
1582 	block = tcf_block_find(net, &q, &parent, &cl,
1583 			       t->tcm_ifindex, t->tcm_block_index, extack);
1584 	if (IS_ERR(block)) {
1585 		err = PTR_ERR(block);
1586 		goto errout;
1587 	}
1588 
1589 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1590 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
1591 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1592 		err = -EINVAL;
1593 		goto errout;
1594 	}
1595 	chain = tcf_chain_get(block, chain_index, true);
1596 	if (!chain) {
1597 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
1598 		err = -ENOMEM;
1599 		goto errout;
1600 	}
1601 
1602 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1603 			       prio, prio_allocate);
1604 	if (IS_ERR(tp)) {
1605 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1606 		err = PTR_ERR(tp);
1607 		goto errout;
1608 	}
1609 
1610 	if (tp == NULL) {
1611 		/* Proto-tcf does not exist, create new one */
1612 
1613 		if (tca[TCA_KIND] == NULL || !protocol) {
1614 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1615 			err = -EINVAL;
1616 			goto errout;
1617 		}
1618 
1619 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1620 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1621 			err = -ENOENT;
1622 			goto errout;
1623 		}
1624 
1625 		if (prio_allocate)
1626 			prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
1627 
1628 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
1629 				      protocol, prio, chain, extack);
1630 		if (IS_ERR(tp)) {
1631 			err = PTR_ERR(tp);
1632 			goto errout;
1633 		}
1634 		tp_created = 1;
1635 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1636 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1637 		err = -EINVAL;
1638 		goto errout;
1639 	}
1640 
1641 	fh = tp->ops->get(tp, t->tcm_handle);
1642 
1643 	if (!fh) {
1644 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1645 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1646 			err = -ENOENT;
1647 			goto errout;
1648 		}
1649 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
1650 		NL_SET_ERR_MSG(extack, "Filter already exists");
1651 		err = -EEXIST;
1652 		goto errout;
1653 	}
1654 
1655 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
1656 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
1657 		err = -EINVAL;
1658 		goto errout;
1659 	}
1660 
1661 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
1662 			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1663 			      extack);
1664 	if (err == 0) {
1665 		if (tp_created)
1666 			tcf_chain_tp_insert(chain, &chain_info, tp);
1667 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1668 			       RTM_NEWTFILTER, false);
1669 	} else {
1670 		if (tp_created)
1671 			tcf_proto_destroy(tp, NULL);
1672 	}
1673 
1674 errout:
1675 	if (chain)
1676 		tcf_chain_put(chain);
1677 	tcf_block_release(q, block);
1678 	if (err == -EAGAIN)
1679 		/* Replay the request. */
1680 		goto replay;
1681 	return err;
1682 }
1683 
1684 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1685 			  struct netlink_ext_ack *extack)
1686 {
1687 	struct net *net = sock_net(skb->sk);
1688 	struct nlattr *tca[TCA_MAX + 1];
1689 	struct tcmsg *t;
1690 	u32 protocol;
1691 	u32 prio;
1692 	u32 parent;
1693 	u32 chain_index;
1694 	struct Qdisc *q = NULL;
1695 	struct tcf_chain_info chain_info;
1696 	struct tcf_chain *chain = NULL;
1697 	struct tcf_block *block;
1698 	struct tcf_proto *tp = NULL;
1699 	unsigned long cl = 0;
1700 	void *fh = NULL;
1701 	int err;
1702 
1703 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1704 		return -EPERM;
1705 
1706 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1707 	if (err < 0)
1708 		return err;
1709 
1710 	t = nlmsg_data(n);
1711 	protocol = TC_H_MIN(t->tcm_info);
1712 	prio = TC_H_MAJ(t->tcm_info);
1713 	parent = t->tcm_parent;
1714 
1715 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
1716 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1717 		return -ENOENT;
1718 	}
1719 
1720 	/* Find head of filter chain. */
1721 
1722 	block = tcf_block_find(net, &q, &parent, &cl,
1723 			       t->tcm_ifindex, t->tcm_block_index, extack);
1724 	if (IS_ERR(block)) {
1725 		err = PTR_ERR(block);
1726 		goto errout;
1727 	}
1728 
1729 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1730 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
1731 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1732 		err = -EINVAL;
1733 		goto errout;
1734 	}
1735 	chain = tcf_chain_get(block, chain_index, false);
1736 	if (!chain) {
1737 		/* User requested flush on non-existent chain. Nothing to do,
1738 		 * so just return success.
1739 		 */
1740 		if (prio == 0) {
1741 			err = 0;
1742 			goto errout;
1743 		}
1744 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1745 		err = -ENOENT;
1746 		goto errout;
1747 	}
1748 
1749 	if (prio == 0) {
1750 		tfilter_notify_chain(net, skb, block, q, parent, n,
1751 				     chain, RTM_DELTFILTER);
1752 		tcf_chain_flush(chain);
1753 		err = 0;
1754 		goto errout;
1755 	}
1756 
1757 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1758 			       prio, false);
1759 	if (!tp || IS_ERR(tp)) {
1760 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1761 		err = tp ? PTR_ERR(tp) : -ENOENT;
1762 		goto errout;
1763 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1764 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1765 		err = -EINVAL;
1766 		goto errout;
1767 	}
1768 
1769 	fh = tp->ops->get(tp, t->tcm_handle);
1770 
1771 	if (!fh) {
1772 		if (t->tcm_handle == 0) {
1773 			tcf_chain_tp_remove(chain, &chain_info, tp);
1774 			tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1775 				       RTM_DELTFILTER, false);
1776 			tcf_proto_destroy(tp, extack);
1777 			err = 0;
1778 		} else {
1779 			NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1780 			err = -ENOENT;
1781 		}
1782 	} else {
1783 		bool last;
1784 
1785 		err = tfilter_del_notify(net, skb, n, tp, block,
1786 					 q, parent, fh, false, &last,
1787 					 extack);
1788 		if (err)
1789 			goto errout;
1790 		if (last) {
1791 			tcf_chain_tp_remove(chain, &chain_info, tp);
1792 			tcf_proto_destroy(tp, extack);
1793 		}
1794 	}
1795 
1796 errout:
1797 	if (chain)
1798 		tcf_chain_put(chain);
1799 	tcf_block_release(q, block);
1800 	return err;
1801 }
1802 
1803 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1804 			  struct netlink_ext_ack *extack)
1805 {
1806 	struct net *net = sock_net(skb->sk);
1807 	struct nlattr *tca[TCA_MAX + 1];
1808 	struct tcmsg *t;
1809 	u32 protocol;
1810 	u32 prio;
1811 	u32 parent;
1812 	u32 chain_index;
1813 	struct Qdisc *q = NULL;
1814 	struct tcf_chain_info chain_info;
1815 	struct tcf_chain *chain = NULL;
1816 	struct tcf_block *block;
1817 	struct tcf_proto *tp = NULL;
1818 	unsigned long cl = 0;
1819 	void *fh = NULL;
1820 	int err;
1821 
1822 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1823 	if (err < 0)
1824 		return err;
1825 
1826 	t = nlmsg_data(n);
1827 	protocol = TC_H_MIN(t->tcm_info);
1828 	prio = TC_H_MAJ(t->tcm_info);
1829 	parent = t->tcm_parent;
1830 
1831 	if (prio == 0) {
1832 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1833 		return -ENOENT;
1834 	}
1835 
1836 	/* Find head of filter chain. */
1837 
1838 	block = tcf_block_find(net, &q, &parent, &cl,
1839 			       t->tcm_ifindex, t->tcm_block_index, extack);
1840 	if (IS_ERR(block)) {
1841 		err = PTR_ERR(block);
1842 		goto errout;
1843 	}
1844 
1845 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1846 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
1847 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1848 		err = -EINVAL;
1849 		goto errout;
1850 	}
1851 	chain = tcf_chain_get(block, chain_index, false);
1852 	if (!chain) {
1853 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1854 		err = -EINVAL;
1855 		goto errout;
1856 	}
1857 
1858 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1859 			       prio, false);
1860 	if (!tp || IS_ERR(tp)) {
1861 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1862 		err = tp ? PTR_ERR(tp) : -ENOENT;
1863 		goto errout;
1864 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1865 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1866 		err = -EINVAL;
1867 		goto errout;
1868 	}
1869 
1870 	fh = tp->ops->get(tp, t->tcm_handle);
1871 
1872 	if (!fh) {
1873 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1874 		err = -ENOENT;
1875 	} else {
1876 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
1877 				     fh, RTM_NEWTFILTER, true);
1878 		if (err < 0)
1879 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1880 	}
1881 
1882 errout:
1883 	if (chain)
1884 		tcf_chain_put(chain);
1885 	tcf_block_release(q, block);
1886 	return err;
1887 }
1888 
1889 struct tcf_dump_args {
1890 	struct tcf_walker w;
1891 	struct sk_buff *skb;
1892 	struct netlink_callback *cb;
1893 	struct tcf_block *block;
1894 	struct Qdisc *q;
1895 	u32 parent;
1896 };
1897 
1898 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1899 {
1900 	struct tcf_dump_args *a = (void *)arg;
1901 	struct net *net = sock_net(a->skb->sk);
1902 
1903 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
1904 			     n, NETLINK_CB(a->cb->skb).portid,
1905 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1906 			     RTM_NEWTFILTER);
1907 }
1908 
1909 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1910 			   struct sk_buff *skb, struct netlink_callback *cb,
1911 			   long index_start, long *p_index)
1912 {
1913 	struct net *net = sock_net(skb->sk);
1914 	struct tcf_block *block = chain->block;
1915 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1916 	struct tcf_dump_args arg;
1917 	struct tcf_proto *tp;
1918 
1919 	for (tp = rtnl_dereference(chain->filter_chain);
1920 	     tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1921 		if (*p_index < index_start)
1922 			continue;
1923 		if (TC_H_MAJ(tcm->tcm_info) &&
1924 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
1925 			continue;
1926 		if (TC_H_MIN(tcm->tcm_info) &&
1927 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
1928 			continue;
1929 		if (*p_index > index_start)
1930 			memset(&cb->args[1], 0,
1931 			       sizeof(cb->args) - sizeof(cb->args[0]));
1932 		if (cb->args[1] == 0) {
1933 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
1934 					  NETLINK_CB(cb->skb).portid,
1935 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1936 					  RTM_NEWTFILTER) <= 0)
1937 				return false;
1938 
1939 			cb->args[1] = 1;
1940 		}
1941 		if (!tp->ops->walk)
1942 			continue;
1943 		arg.w.fn = tcf_node_dump;
1944 		arg.skb = skb;
1945 		arg.cb = cb;
1946 		arg.block = block;
1947 		arg.q = q;
1948 		arg.parent = parent;
1949 		arg.w.stop = 0;
1950 		arg.w.skip = cb->args[1] - 1;
1951 		arg.w.count = 0;
1952 		arg.w.cookie = cb->args[2];
1953 		tp->ops->walk(tp, &arg.w);
1954 		cb->args[2] = arg.w.cookie;
1955 		cb->args[1] = arg.w.count + 1;
1956 		if (arg.w.stop)
1957 			return false;
1958 	}
1959 	return true;
1960 }
1961 
1962 /* called with RTNL */
1963 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1964 {
1965 	struct net *net = sock_net(skb->sk);
1966 	struct nlattr *tca[TCA_MAX + 1];
1967 	struct Qdisc *q = NULL;
1968 	struct tcf_block *block;
1969 	struct tcf_chain *chain;
1970 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1971 	long index_start;
1972 	long index;
1973 	u32 parent;
1974 	int err;
1975 
1976 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1977 		return skb->len;
1978 
1979 	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
1980 			  cb->extack);
1981 	if (err)
1982 		return err;
1983 
1984 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1985 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
1986 		if (!block)
1987 			goto out;
1988 		/* If we work with block index, q is NULL and parent value
1989 		 * will never be used in the following code. The check
1990 		 * in tcf_fill_node prevents it. However, compiler does not
1991 		 * see that far, so set parent to zero to silence the warning
1992 		 * about parent being uninitialized.
1993 		 */
1994 		parent = 0;
1995 	} else {
1996 		const struct Qdisc_class_ops *cops;
1997 		struct net_device *dev;
1998 		unsigned long cl = 0;
1999 
2000 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2001 		if (!dev)
2002 			return skb->len;
2003 
2004 		parent = tcm->tcm_parent;
2005 		if (!parent) {
2006 			q = dev->qdisc;
2007 			parent = q->handle;
2008 		} else {
2009 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2010 		}
2011 		if (!q)
2012 			goto out;
2013 		cops = q->ops->cl_ops;
2014 		if (!cops)
2015 			goto out;
2016 		if (!cops->tcf_block)
2017 			goto out;
2018 		if (TC_H_MIN(tcm->tcm_parent)) {
2019 			cl = cops->find(q, tcm->tcm_parent);
2020 			if (cl == 0)
2021 				goto out;
2022 		}
2023 		block = cops->tcf_block(q, cl, NULL);
2024 		if (!block)
2025 			goto out;
2026 		if (tcf_block_shared(block))
2027 			q = NULL;
2028 	}
2029 
2030 	index_start = cb->args[0];
2031 	index = 0;
2032 
2033 	list_for_each_entry(chain, &block->chain_list, list) {
2034 		if (tca[TCA_CHAIN] &&
2035 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2036 			continue;
2037 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2038 				    index_start, &index)) {
2039 			err = -EMSGSIZE;
2040 			break;
2041 		}
2042 	}
2043 
2044 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2045 		tcf_block_refcnt_put(block);
2046 	cb->args[0] = index;
2047 
2048 out:
2049 	/* If we did no progress, the error (EMSGSIZE) is real */
2050 	if (skb->len == 0 && err)
2051 		return err;
2052 	return skb->len;
2053 }
2054 
2055 static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
2056 			      struct sk_buff *skb, struct tcf_block *block,
2057 			      u32 portid, u32 seq, u16 flags, int event)
2058 {
2059 	unsigned char *b = skb_tail_pointer(skb);
2060 	const struct tcf_proto_ops *ops;
2061 	struct nlmsghdr *nlh;
2062 	struct tcmsg *tcm;
2063 	void *priv;
2064 
2065 	ops = chain->tmplt_ops;
2066 	priv = chain->tmplt_priv;
2067 
2068 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2069 	if (!nlh)
2070 		goto out_nlmsg_trim;
2071 	tcm = nlmsg_data(nlh);
2072 	tcm->tcm_family = AF_UNSPEC;
2073 	tcm->tcm__pad1 = 0;
2074 	tcm->tcm__pad2 = 0;
2075 	tcm->tcm_handle = 0;
2076 	if (block->q) {
2077 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2078 		tcm->tcm_parent = block->q->handle;
2079 	} else {
2080 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2081 		tcm->tcm_block_index = block->index;
2082 	}
2083 
2084 	if (nla_put_u32(skb, TCA_CHAIN, chain->index))
2085 		goto nla_put_failure;
2086 
2087 	if (ops) {
2088 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2089 			goto nla_put_failure;
2090 		if (ops->tmplt_dump(skb, net, priv) < 0)
2091 			goto nla_put_failure;
2092 	}
2093 
2094 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2095 	return skb->len;
2096 
2097 out_nlmsg_trim:
2098 nla_put_failure:
2099 	nlmsg_trim(skb, b);
2100 	return -EMSGSIZE;
2101 }
2102 
2103 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2104 			   u32 seq, u16 flags, int event, bool unicast)
2105 {
2106 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2107 	struct tcf_block *block = chain->block;
2108 	struct net *net = block->net;
2109 	struct sk_buff *skb;
2110 
2111 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2112 	if (!skb)
2113 		return -ENOBUFS;
2114 
2115 	if (tc_chain_fill_node(chain, net, skb, block, portid,
2116 			       seq, flags, event) <= 0) {
2117 		kfree_skb(skb);
2118 		return -EINVAL;
2119 	}
2120 
2121 	if (unicast)
2122 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2123 
2124 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2125 }
2126 
2127 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2128 			      struct nlattr **tca,
2129 			      struct netlink_ext_ack *extack)
2130 {
2131 	const struct tcf_proto_ops *ops;
2132 	void *tmplt_priv;
2133 
2134 	/* If kind is not set, user did not specify template. */
2135 	if (!tca[TCA_KIND])
2136 		return 0;
2137 
2138 	ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
2139 	if (IS_ERR(ops))
2140 		return PTR_ERR(ops);
2141 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2142 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2143 		return -EOPNOTSUPP;
2144 	}
2145 
2146 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2147 	if (IS_ERR(tmplt_priv)) {
2148 		module_put(ops->owner);
2149 		return PTR_ERR(tmplt_priv);
2150 	}
2151 	chain->tmplt_ops = ops;
2152 	chain->tmplt_priv = tmplt_priv;
2153 	return 0;
2154 }
2155 
2156 static void tc_chain_tmplt_del(struct tcf_chain *chain)
2157 {
2158 	const struct tcf_proto_ops *ops = chain->tmplt_ops;
2159 
2160 	/* If template ops are set, no work to do for us. */
2161 	if (!ops)
2162 		return;
2163 
2164 	ops->tmplt_destroy(chain->tmplt_priv);
2165 	module_put(ops->owner);
2166 }
2167 
2168 /* Add/delete/get a chain */
2169 
2170 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2171 			struct netlink_ext_ack *extack)
2172 {
2173 	struct net *net = sock_net(skb->sk);
2174 	struct nlattr *tca[TCA_MAX + 1];
2175 	struct tcmsg *t;
2176 	u32 parent;
2177 	u32 chain_index;
2178 	struct Qdisc *q = NULL;
2179 	struct tcf_chain *chain = NULL;
2180 	struct tcf_block *block;
2181 	unsigned long cl;
2182 	int err;
2183 
2184 	if (n->nlmsg_type != RTM_GETCHAIN &&
2185 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2186 		return -EPERM;
2187 
2188 replay:
2189 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2190 	if (err < 0)
2191 		return err;
2192 
2193 	t = nlmsg_data(n);
2194 	parent = t->tcm_parent;
2195 	cl = 0;
2196 
2197 	block = tcf_block_find(net, &q, &parent, &cl,
2198 			       t->tcm_ifindex, t->tcm_block_index, extack);
2199 	if (IS_ERR(block))
2200 		return PTR_ERR(block);
2201 
2202 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2203 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2204 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2205 		err = -EINVAL;
2206 		goto errout_block;
2207 	}
2208 	chain = tcf_chain_lookup(block, chain_index);
2209 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2210 		if (chain) {
2211 			if (tcf_chain_held_by_acts_only(chain)) {
2212 				/* The chain exists only because there is
2213 				 * some action referencing it.
2214 				 */
2215 				tcf_chain_hold(chain);
2216 			} else {
2217 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2218 				err = -EEXIST;
2219 				goto errout_block;
2220 			}
2221 		} else {
2222 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2223 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2224 				err = -ENOENT;
2225 				goto errout_block;
2226 			}
2227 			chain = tcf_chain_create(block, chain_index);
2228 			if (!chain) {
2229 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2230 				err = -ENOMEM;
2231 				goto errout_block;
2232 			}
2233 		}
2234 	} else {
2235 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2236 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2237 			err = -EINVAL;
2238 			goto errout_block;
2239 		}
2240 		tcf_chain_hold(chain);
2241 	}
2242 
2243 	switch (n->nlmsg_type) {
2244 	case RTM_NEWCHAIN:
2245 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2246 		if (err)
2247 			goto errout;
2248 		/* In case the chain was successfully added, take a reference
2249 		 * to the chain. This ensures that an empty chain
2250 		 * does not disappear at the end of this function.
2251 		 */
2252 		tcf_chain_hold(chain);
2253 		chain->explicitly_created = true;
2254 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2255 				RTM_NEWCHAIN, false);
2256 		break;
2257 	case RTM_DELCHAIN:
2258 		tfilter_notify_chain(net, skb, block, q, parent, n,
2259 				     chain, RTM_DELTFILTER);
2260 		/* Flush the chain first as the user requested chain removal. */
2261 		tcf_chain_flush(chain);
2262 		/* In case the chain was successfully deleted, put a reference
2263 		 * to the chain previously taken during addition.
2264 		 */
2265 		tcf_chain_put_explicitly_created(chain);
2266 		chain->explicitly_created = false;
2267 		break;
2268 	case RTM_GETCHAIN:
2269 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2270 				      n->nlmsg_seq, n->nlmsg_type, true);
2271 		if (err < 0)
2272 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2273 		break;
2274 	default:
2275 		err = -EOPNOTSUPP;
2276 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2277 		goto errout;
2278 	}
2279 
2280 errout:
2281 	tcf_chain_put(chain);
2282 errout_block:
2283 	tcf_block_release(q, block);
2284 	if (err == -EAGAIN)
2285 		/* Replay the request. */
2286 		goto replay;
2287 	return err;
2288 }
2289 
2290 /* called with RTNL */
2291 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2292 {
2293 	struct net *net = sock_net(skb->sk);
2294 	struct nlattr *tca[TCA_MAX + 1];
2295 	struct Qdisc *q = NULL;
2296 	struct tcf_block *block;
2297 	struct tcf_chain *chain;
2298 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2299 	long index_start;
2300 	long index;
2301 	u32 parent;
2302 	int err;
2303 
2304 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2305 		return skb->len;
2306 
2307 	err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
2308 			  cb->extack);
2309 	if (err)
2310 		return err;
2311 
2312 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2313 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2314 		if (!block)
2315 			goto out;
2316 		/* If we work with block index, q is NULL and parent value
2317 		 * will never be used in the following code. The check
2318 		 * in tcf_fill_node prevents it. However, compiler does not
2319 		 * see that far, so set parent to zero to silence the warning
2320 		 * about parent being uninitialized.
2321 		 */
2322 		parent = 0;
2323 	} else {
2324 		const struct Qdisc_class_ops *cops;
2325 		struct net_device *dev;
2326 		unsigned long cl = 0;
2327 
2328 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2329 		if (!dev)
2330 			return skb->len;
2331 
2332 		parent = tcm->tcm_parent;
2333 		if (!parent) {
2334 			q = dev->qdisc;
2335 			parent = q->handle;
2336 		} else {
2337 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2338 		}
2339 		if (!q)
2340 			goto out;
2341 		cops = q->ops->cl_ops;
2342 		if (!cops)
2343 			goto out;
2344 		if (!cops->tcf_block)
2345 			goto out;
2346 		if (TC_H_MIN(tcm->tcm_parent)) {
2347 			cl = cops->find(q, tcm->tcm_parent);
2348 			if (cl == 0)
2349 				goto out;
2350 		}
2351 		block = cops->tcf_block(q, cl, NULL);
2352 		if (!block)
2353 			goto out;
2354 		if (tcf_block_shared(block))
2355 			q = NULL;
2356 	}
2357 
2358 	index_start = cb->args[0];
2359 	index = 0;
2360 
2361 	list_for_each_entry(chain, &block->chain_list, list) {
2362 		if ((tca[TCA_CHAIN] &&
2363 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2364 			continue;
2365 		if (index < index_start) {
2366 			index++;
2367 			continue;
2368 		}
2369 		if (tcf_chain_held_by_acts_only(chain))
2370 			continue;
2371 		err = tc_chain_fill_node(chain, net, skb, block,
2372 					 NETLINK_CB(cb->skb).portid,
2373 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2374 					 RTM_NEWCHAIN);
2375 		if (err <= 0)
2376 			break;
2377 		index++;
2378 	}
2379 
2380 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2381 		tcf_block_refcnt_put(block);
2382 	cb->args[0] = index;
2383 
2384 out:
2385 	/* If we did no progress, the error (EMSGSIZE) is real */
2386 	if (skb->len == 0 && err)
2387 		return err;
2388 	return skb->len;
2389 }
2390 
2391 void tcf_exts_destroy(struct tcf_exts *exts)
2392 {
2393 #ifdef CONFIG_NET_CLS_ACT
2394 	tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
2395 	kfree(exts->actions);
2396 	exts->nr_actions = 0;
2397 #endif
2398 }
2399 EXPORT_SYMBOL(tcf_exts_destroy);
2400 
2401 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
2402 		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
2403 		      struct netlink_ext_ack *extack)
2404 {
2405 #ifdef CONFIG_NET_CLS_ACT
2406 	{
2407 		struct tc_action *act;
2408 		size_t attr_size = 0;
2409 
2410 		if (exts->police && tb[exts->police]) {
2411 			act = tcf_action_init_1(net, tp, tb[exts->police],
2412 						rate_tlv, "police", ovr,
2413 						TCA_ACT_BIND, true, extack);
2414 			if (IS_ERR(act))
2415 				return PTR_ERR(act);
2416 
2417 			act->type = exts->type = TCA_OLD_COMPAT;
2418 			exts->actions[0] = act;
2419 			exts->nr_actions = 1;
2420 		} else if (exts->action && tb[exts->action]) {
2421 			int err;
2422 
2423 			err = tcf_action_init(net, tp, tb[exts->action],
2424 					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
2425 					      exts->actions, &attr_size, true,
2426 					      extack);
2427 			if (err < 0)
2428 				return err;
2429 			exts->nr_actions = err;
2430 		}
2431 		exts->net = net;
2432 	}
2433 #else
2434 	if ((exts->action && tb[exts->action]) ||
2435 	    (exts->police && tb[exts->police])) {
2436 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
2437 		return -EOPNOTSUPP;
2438 	}
2439 #endif
2440 
2441 	return 0;
2442 }
2443 EXPORT_SYMBOL(tcf_exts_validate);
2444 
2445 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
2446 {
2447 #ifdef CONFIG_NET_CLS_ACT
2448 	struct tcf_exts old = *dst;
2449 
2450 	*dst = *src;
2451 	tcf_exts_destroy(&old);
2452 #endif
2453 }
2454 EXPORT_SYMBOL(tcf_exts_change);
2455 
2456 #ifdef CONFIG_NET_CLS_ACT
2457 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2458 {
2459 	if (exts->nr_actions == 0)
2460 		return NULL;
2461 	else
2462 		return exts->actions[0];
2463 }
2464 #endif
2465 
2466 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
2467 {
2468 #ifdef CONFIG_NET_CLS_ACT
2469 	struct nlattr *nest;
2470 
2471 	if (exts->action && tcf_exts_has_actions(exts)) {
2472 		/*
2473 		 * again for backward compatible mode - we want
2474 		 * to work with both old and new modes of entering
2475 		 * tc data even if iproute2  was newer - jhs
2476 		 */
2477 		if (exts->type != TCA_OLD_COMPAT) {
2478 			nest = nla_nest_start(skb, exts->action);
2479 			if (nest == NULL)
2480 				goto nla_put_failure;
2481 
2482 			if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
2483 				goto nla_put_failure;
2484 			nla_nest_end(skb, nest);
2485 		} else if (exts->police) {
2486 			struct tc_action *act = tcf_exts_first_act(exts);
2487 			nest = nla_nest_start(skb, exts->police);
2488 			if (nest == NULL || !act)
2489 				goto nla_put_failure;
2490 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
2491 				goto nla_put_failure;
2492 			nla_nest_end(skb, nest);
2493 		}
2494 	}
2495 	return 0;
2496 
2497 nla_put_failure:
2498 	nla_nest_cancel(skb, nest);
2499 	return -1;
2500 #else
2501 	return 0;
2502 #endif
2503 }
2504 EXPORT_SYMBOL(tcf_exts_dump);
2505 
2506 
2507 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
2508 {
2509 #ifdef CONFIG_NET_CLS_ACT
2510 	struct tc_action *a = tcf_exts_first_act(exts);
2511 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
2512 		return -1;
2513 #endif
2514 	return 0;
2515 }
2516 EXPORT_SYMBOL(tcf_exts_dump_stats);
2517 
2518 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
2519 				       enum tc_setup_type type,
2520 				       void *type_data, bool err_stop)
2521 {
2522 	int ok_count = 0;
2523 #ifdef CONFIG_NET_CLS_ACT
2524 	const struct tc_action *a;
2525 	struct net_device *dev;
2526 	int i, ret;
2527 
2528 	if (!tcf_exts_has_actions(exts))
2529 		return 0;
2530 
2531 	for (i = 0; i < exts->nr_actions; i++) {
2532 		a = exts->actions[i];
2533 		if (!a->ops->get_dev)
2534 			continue;
2535 		dev = a->ops->get_dev(a);
2536 		if (!dev)
2537 			continue;
2538 		ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
2539 		a->ops->put_dev(dev);
2540 		if (ret < 0)
2541 			return ret;
2542 		ok_count += ret;
2543 	}
2544 #endif
2545 	return ok_count;
2546 }
2547 
2548 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
2549 		     enum tc_setup_type type, void *type_data, bool err_stop)
2550 {
2551 	int ok_count;
2552 	int ret;
2553 
2554 	ret = tcf_block_cb_call(block, type, type_data, err_stop);
2555 	if (ret < 0)
2556 		return ret;
2557 	ok_count = ret;
2558 
2559 	if (!exts || ok_count)
2560 		return ok_count;
2561 	ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
2562 	if (ret < 0)
2563 		return ret;
2564 	ok_count += ret;
2565 
2566 	return ok_count;
2567 }
2568 EXPORT_SYMBOL(tc_setup_cb_call);
2569 
2570 static __net_init int tcf_net_init(struct net *net)
2571 {
2572 	struct tcf_net *tn = net_generic(net, tcf_net_id);
2573 
2574 	spin_lock_init(&tn->idr_lock);
2575 	idr_init(&tn->idr);
2576 	return 0;
2577 }
2578 
2579 static void __net_exit tcf_net_exit(struct net *net)
2580 {
2581 	struct tcf_net *tn = net_generic(net, tcf_net_id);
2582 
2583 	idr_destroy(&tn->idr);
2584 }
2585 
2586 static struct pernet_operations tcf_net_ops = {
2587 	.init = tcf_net_init,
2588 	.exit = tcf_net_exit,
2589 	.id   = &tcf_net_id,
2590 	.size = sizeof(struct tcf_net),
2591 };
2592 
2593 static int __init tc_filter_init(void)
2594 {
2595 	int err;
2596 
2597 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
2598 	if (!tc_filter_wq)
2599 		return -ENOMEM;
2600 
2601 	err = register_pernet_subsys(&tcf_net_ops);
2602 	if (err)
2603 		goto err_register_pernet_subsys;
2604 
2605 	err = rhashtable_init(&indr_setup_block_ht,
2606 			      &tc_indr_setup_block_ht_params);
2607 	if (err)
2608 		goto err_rhash_setup_block_ht;
2609 
2610 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
2611 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
2612 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
2613 		      tc_dump_tfilter, 0);
2614 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
2615 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
2616 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
2617 		      tc_dump_chain, 0);
2618 
2619 	return 0;
2620 
2621 err_rhash_setup_block_ht:
2622 	unregister_pernet_subsys(&tcf_net_ops);
2623 err_register_pernet_subsys:
2624 	destroy_workqueue(tc_filter_wq);
2625 	return err;
2626 }
2627 
2628 subsys_initcall(tc_filter_init);
2629