xref: /openbmc/linux/net/sched/cls_api.c (revision 0ca8d3ca4561535f97b31e7b8de569c69bc3b27b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43 
44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53 {
54 	return jhash_3words(tp->chain->index, tp->prio,
55 			    (__force __u32)tp->protocol, 0);
56 }
57 
58 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59 					struct tcf_proto *tp)
60 {
61 	struct tcf_block *block = chain->block;
62 
63 	mutex_lock(&block->proto_destroy_lock);
64 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65 		     destroy_obj_hashfn(tp));
66 	mutex_unlock(&block->proto_destroy_lock);
67 }
68 
69 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70 			  const struct tcf_proto *tp2)
71 {
72 	return tp1->chain->index == tp2->chain->index &&
73 	       tp1->prio == tp2->prio &&
74 	       tp1->protocol == tp2->protocol;
75 }
76 
77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78 					struct tcf_proto *tp)
79 {
80 	u32 hash = destroy_obj_hashfn(tp);
81 	struct tcf_proto *iter;
82 	bool found = false;
83 
84 	rcu_read_lock();
85 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86 				   destroy_ht_node, hash) {
87 		if (tcf_proto_cmp(tp, iter)) {
88 			found = true;
89 			break;
90 		}
91 	}
92 	rcu_read_unlock();
93 
94 	return found;
95 }
96 
97 static void
98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99 {
100 	struct tcf_block *block = chain->block;
101 
102 	mutex_lock(&block->proto_destroy_lock);
103 	if (hash_hashed(&tp->destroy_ht_node))
104 		hash_del_rcu(&tp->destroy_ht_node);
105 	mutex_unlock(&block->proto_destroy_lock);
106 }
107 
108 /* Find classifier type by string name */
109 
110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111 {
112 	const struct tcf_proto_ops *t, *res = NULL;
113 
114 	if (kind) {
115 		read_lock(&cls_mod_lock);
116 		list_for_each_entry(t, &tcf_proto_base, head) {
117 			if (strcmp(kind, t->kind) == 0) {
118 				if (try_module_get(t->owner))
119 					res = t;
120 				break;
121 			}
122 		}
123 		read_unlock(&cls_mod_lock);
124 	}
125 	return res;
126 }
127 
128 static const struct tcf_proto_ops *
129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130 		     struct netlink_ext_ack *extack)
131 {
132 	const struct tcf_proto_ops *ops;
133 
134 	ops = __tcf_proto_lookup_ops(kind);
135 	if (ops)
136 		return ops;
137 #ifdef CONFIG_MODULES
138 	if (rtnl_held)
139 		rtnl_unlock();
140 	request_module("cls_%s", kind);
141 	if (rtnl_held)
142 		rtnl_lock();
143 	ops = __tcf_proto_lookup_ops(kind);
144 	/* We dropped the RTNL semaphore in order to perform
145 	 * the module load. So, even if we succeeded in loading
146 	 * the module we have to replay the request. We indicate
147 	 * this using -EAGAIN.
148 	 */
149 	if (ops) {
150 		module_put(ops->owner);
151 		return ERR_PTR(-EAGAIN);
152 	}
153 #endif
154 	NL_SET_ERR_MSG(extack, "TC classifier not found");
155 	return ERR_PTR(-ENOENT);
156 }
157 
158 /* Register(unregister) new classifier type */
159 
160 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161 {
162 	struct tcf_proto_ops *t;
163 	int rc = -EEXIST;
164 
165 	write_lock(&cls_mod_lock);
166 	list_for_each_entry(t, &tcf_proto_base, head)
167 		if (!strcmp(ops->kind, t->kind))
168 			goto out;
169 
170 	list_add_tail(&ops->head, &tcf_proto_base);
171 	rc = 0;
172 out:
173 	write_unlock(&cls_mod_lock);
174 	return rc;
175 }
176 EXPORT_SYMBOL(register_tcf_proto_ops);
177 
178 static struct workqueue_struct *tc_filter_wq;
179 
180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181 {
182 	struct tcf_proto_ops *t;
183 	int rc = -ENOENT;
184 
185 	/* Wait for outstanding call_rcu()s, if any, from a
186 	 * tcf_proto_ops's destroy() handler.
187 	 */
188 	rcu_barrier();
189 	flush_workqueue(tc_filter_wq);
190 
191 	write_lock(&cls_mod_lock);
192 	list_for_each_entry(t, &tcf_proto_base, head) {
193 		if (t == ops) {
194 			list_del(&t->head);
195 			rc = 0;
196 			break;
197 		}
198 	}
199 	write_unlock(&cls_mod_lock);
200 	return rc;
201 }
202 EXPORT_SYMBOL(unregister_tcf_proto_ops);
203 
204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205 {
206 	INIT_RCU_WORK(rwork, func);
207 	return queue_rcu_work(tc_filter_wq, rwork);
208 }
209 EXPORT_SYMBOL(tcf_queue_work);
210 
211 /* Select new prio value from the range, managed by kernel. */
212 
213 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214 {
215 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
216 
217 	if (tp)
218 		first = tp->prio - 1;
219 
220 	return TC_H_MAJ(first);
221 }
222 
223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224 {
225 	if (kind)
226 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
227 	memset(name, 0, IFNAMSIZ);
228 	return false;
229 }
230 
231 static bool tcf_proto_is_unlocked(const char *kind)
232 {
233 	const struct tcf_proto_ops *ops;
234 	bool ret;
235 
236 	if (strlen(kind) == 0)
237 		return false;
238 
239 	ops = tcf_proto_lookup_ops(kind, false, NULL);
240 	/* On error return false to take rtnl lock. Proto lookup/create
241 	 * functions will perform lookup again and properly handle errors.
242 	 */
243 	if (IS_ERR(ops))
244 		return false;
245 
246 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247 	module_put(ops->owner);
248 	return ret;
249 }
250 
251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252 					  u32 prio, struct tcf_chain *chain,
253 					  bool rtnl_held,
254 					  struct netlink_ext_ack *extack)
255 {
256 	struct tcf_proto *tp;
257 	int err;
258 
259 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260 	if (!tp)
261 		return ERR_PTR(-ENOBUFS);
262 
263 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
264 	if (IS_ERR(tp->ops)) {
265 		err = PTR_ERR(tp->ops);
266 		goto errout;
267 	}
268 	tp->classify = tp->ops->classify;
269 	tp->protocol = protocol;
270 	tp->prio = prio;
271 	tp->chain = chain;
272 	spin_lock_init(&tp->lock);
273 	refcount_set(&tp->refcnt, 1);
274 
275 	err = tp->ops->init(tp);
276 	if (err) {
277 		module_put(tp->ops->owner);
278 		goto errout;
279 	}
280 	return tp;
281 
282 errout:
283 	kfree(tp);
284 	return ERR_PTR(err);
285 }
286 
287 static void tcf_proto_get(struct tcf_proto *tp)
288 {
289 	refcount_inc(&tp->refcnt);
290 }
291 
292 static void tcf_chain_put(struct tcf_chain *chain);
293 
294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295 			      bool sig_destroy, struct netlink_ext_ack *extack)
296 {
297 	tp->ops->destroy(tp, rtnl_held, extack);
298 	if (sig_destroy)
299 		tcf_proto_signal_destroyed(tp->chain, tp);
300 	tcf_chain_put(tp->chain);
301 	module_put(tp->ops->owner);
302 	kfree_rcu(tp, rcu);
303 }
304 
305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306 			  struct netlink_ext_ack *extack)
307 {
308 	if (refcount_dec_and_test(&tp->refcnt))
309 		tcf_proto_destroy(tp, rtnl_held, true, extack);
310 }
311 
312 static bool tcf_proto_check_delete(struct tcf_proto *tp)
313 {
314 	if (tp->ops->delete_empty)
315 		return tp->ops->delete_empty(tp);
316 
317 	tp->deleting = true;
318 	return tp->deleting;
319 }
320 
321 static void tcf_proto_mark_delete(struct tcf_proto *tp)
322 {
323 	spin_lock(&tp->lock);
324 	tp->deleting = true;
325 	spin_unlock(&tp->lock);
326 }
327 
328 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329 {
330 	bool deleting;
331 
332 	spin_lock(&tp->lock);
333 	deleting = tp->deleting;
334 	spin_unlock(&tp->lock);
335 
336 	return deleting;
337 }
338 
339 #define ASSERT_BLOCK_LOCKED(block)					\
340 	lockdep_assert_held(&(block)->lock)
341 
342 struct tcf_filter_chain_list_item {
343 	struct list_head list;
344 	tcf_chain_head_change_t *chain_head_change;
345 	void *chain_head_change_priv;
346 };
347 
348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349 					  u32 chain_index)
350 {
351 	struct tcf_chain *chain;
352 
353 	ASSERT_BLOCK_LOCKED(block);
354 
355 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356 	if (!chain)
357 		return NULL;
358 	list_add_tail_rcu(&chain->list, &block->chain_list);
359 	mutex_init(&chain->filter_chain_lock);
360 	chain->block = block;
361 	chain->index = chain_index;
362 	chain->refcnt = 1;
363 	if (!chain->index)
364 		block->chain0.chain = chain;
365 	return chain;
366 }
367 
368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369 				       struct tcf_proto *tp_head)
370 {
371 	if (item->chain_head_change)
372 		item->chain_head_change(tp_head, item->chain_head_change_priv);
373 }
374 
375 static void tcf_chain0_head_change(struct tcf_chain *chain,
376 				   struct tcf_proto *tp_head)
377 {
378 	struct tcf_filter_chain_list_item *item;
379 	struct tcf_block *block = chain->block;
380 
381 	if (chain->index)
382 		return;
383 
384 	mutex_lock(&block->lock);
385 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386 		tcf_chain_head_change_item(item, tp_head);
387 	mutex_unlock(&block->lock);
388 }
389 
390 /* Returns true if block can be safely freed. */
391 
392 static bool tcf_chain_detach(struct tcf_chain *chain)
393 {
394 	struct tcf_block *block = chain->block;
395 
396 	ASSERT_BLOCK_LOCKED(block);
397 
398 	list_del_rcu(&chain->list);
399 	if (!chain->index)
400 		block->chain0.chain = NULL;
401 
402 	if (list_empty(&block->chain_list) &&
403 	    refcount_read(&block->refcnt) == 0)
404 		return true;
405 
406 	return false;
407 }
408 
409 static void tcf_block_destroy(struct tcf_block *block)
410 {
411 	mutex_destroy(&block->lock);
412 	mutex_destroy(&block->proto_destroy_lock);
413 	kfree_rcu(block, rcu);
414 }
415 
416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417 {
418 	struct tcf_block *block = chain->block;
419 
420 	mutex_destroy(&chain->filter_chain_lock);
421 	kfree_rcu(chain, rcu);
422 	if (free_block)
423 		tcf_block_destroy(block);
424 }
425 
426 static void tcf_chain_hold(struct tcf_chain *chain)
427 {
428 	ASSERT_BLOCK_LOCKED(chain->block);
429 
430 	++chain->refcnt;
431 }
432 
433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434 {
435 	ASSERT_BLOCK_LOCKED(chain->block);
436 
437 	/* In case all the references are action references, this
438 	 * chain should not be shown to the user.
439 	 */
440 	return chain->refcnt == chain->action_refcnt;
441 }
442 
443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444 					  u32 chain_index)
445 {
446 	struct tcf_chain *chain;
447 
448 	ASSERT_BLOCK_LOCKED(block);
449 
450 	list_for_each_entry(chain, &block->chain_list, list) {
451 		if (chain->index == chain_index)
452 			return chain;
453 	}
454 	return NULL;
455 }
456 
457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459 					      u32 chain_index)
460 {
461 	struct tcf_chain *chain;
462 
463 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
464 		if (chain->index == chain_index)
465 			return chain;
466 	}
467 	return NULL;
468 }
469 #endif
470 
471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472 			   u32 seq, u16 flags, int event, bool unicast);
473 
474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475 					 u32 chain_index, bool create,
476 					 bool by_act)
477 {
478 	struct tcf_chain *chain = NULL;
479 	bool is_first_reference;
480 
481 	mutex_lock(&block->lock);
482 	chain = tcf_chain_lookup(block, chain_index);
483 	if (chain) {
484 		tcf_chain_hold(chain);
485 	} else {
486 		if (!create)
487 			goto errout;
488 		chain = tcf_chain_create(block, chain_index);
489 		if (!chain)
490 			goto errout;
491 	}
492 
493 	if (by_act)
494 		++chain->action_refcnt;
495 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496 	mutex_unlock(&block->lock);
497 
498 	/* Send notification only in case we got the first
499 	 * non-action reference. Until then, the chain acts only as
500 	 * a placeholder for actions pointing to it and user ought
501 	 * not know about them.
502 	 */
503 	if (is_first_reference && !by_act)
504 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505 				RTM_NEWCHAIN, false);
506 
507 	return chain;
508 
509 errout:
510 	mutex_unlock(&block->lock);
511 	return chain;
512 }
513 
514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515 				       bool create)
516 {
517 	return __tcf_chain_get(block, chain_index, create, false);
518 }
519 
520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521 {
522 	return __tcf_chain_get(block, chain_index, true, true);
523 }
524 EXPORT_SYMBOL(tcf_chain_get_by_act);
525 
526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527 			       void *tmplt_priv);
528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529 				  void *tmplt_priv, u32 chain_index,
530 				  struct tcf_block *block, struct sk_buff *oskb,
531 				  u32 seq, u16 flags, bool unicast);
532 
533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534 			    bool explicitly_created)
535 {
536 	struct tcf_block *block = chain->block;
537 	const struct tcf_proto_ops *tmplt_ops;
538 	bool free_block = false;
539 	unsigned int refcnt;
540 	void *tmplt_priv;
541 
542 	mutex_lock(&block->lock);
543 	if (explicitly_created) {
544 		if (!chain->explicitly_created) {
545 			mutex_unlock(&block->lock);
546 			return;
547 		}
548 		chain->explicitly_created = false;
549 	}
550 
551 	if (by_act)
552 		chain->action_refcnt--;
553 
554 	/* tc_chain_notify_delete can't be called while holding block lock.
555 	 * However, when block is unlocked chain can be changed concurrently, so
556 	 * save these to temporary variables.
557 	 */
558 	refcnt = --chain->refcnt;
559 	tmplt_ops = chain->tmplt_ops;
560 	tmplt_priv = chain->tmplt_priv;
561 
562 	/* The last dropped non-action reference will trigger notification. */
563 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
564 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565 				       block, NULL, 0, 0, false);
566 		/* Last reference to chain, no need to lock. */
567 		chain->flushing = false;
568 	}
569 
570 	if (refcnt == 0)
571 		free_block = tcf_chain_detach(chain);
572 	mutex_unlock(&block->lock);
573 
574 	if (refcnt == 0) {
575 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576 		tcf_chain_destroy(chain, free_block);
577 	}
578 }
579 
580 static void tcf_chain_put(struct tcf_chain *chain)
581 {
582 	__tcf_chain_put(chain, false, false);
583 }
584 
585 void tcf_chain_put_by_act(struct tcf_chain *chain)
586 {
587 	__tcf_chain_put(chain, true, false);
588 }
589 EXPORT_SYMBOL(tcf_chain_put_by_act);
590 
591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592 {
593 	__tcf_chain_put(chain, false, true);
594 }
595 
596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
597 {
598 	struct tcf_proto *tp, *tp_next;
599 
600 	mutex_lock(&chain->filter_chain_lock);
601 	tp = tcf_chain_dereference(chain->filter_chain, chain);
602 	while (tp) {
603 		tp_next = rcu_dereference_protected(tp->next, 1);
604 		tcf_proto_signal_destroying(chain, tp);
605 		tp = tp_next;
606 	}
607 	tp = tcf_chain_dereference(chain->filter_chain, chain);
608 	RCU_INIT_POINTER(chain->filter_chain, NULL);
609 	tcf_chain0_head_change(chain, NULL);
610 	chain->flushing = true;
611 	mutex_unlock(&chain->filter_chain_lock);
612 
613 	while (tp) {
614 		tp_next = rcu_dereference_protected(tp->next, 1);
615 		tcf_proto_put(tp, rtnl_held, NULL);
616 		tp = tp_next;
617 	}
618 }
619 
620 static int tcf_block_setup(struct tcf_block *block,
621 			   struct flow_block_offload *bo);
622 
623 static void tcf_block_offload_init(struct flow_block_offload *bo,
624 				   struct net_device *dev, struct Qdisc *sch,
625 				   enum flow_block_command command,
626 				   enum flow_block_binder_type binder_type,
627 				   struct flow_block *flow_block,
628 				   bool shared, struct netlink_ext_ack *extack)
629 {
630 	bo->net = dev_net(dev);
631 	bo->command = command;
632 	bo->binder_type = binder_type;
633 	bo->block = flow_block;
634 	bo->block_shared = shared;
635 	bo->extack = extack;
636 	bo->sch = sch;
637 	INIT_LIST_HEAD(&bo->cb_list);
638 }
639 
640 static void tcf_block_unbind(struct tcf_block *block,
641 			     struct flow_block_offload *bo);
642 
643 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
644 {
645 	struct tcf_block *block = block_cb->indr.data;
646 	struct net_device *dev = block_cb->indr.dev;
647 	struct Qdisc *sch = block_cb->indr.sch;
648 	struct netlink_ext_ack extack = {};
649 	struct flow_block_offload bo = {};
650 
651 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
652 			       block_cb->indr.binder_type,
653 			       &block->flow_block, tcf_block_shared(block),
654 			       &extack);
655 	rtnl_lock();
656 	down_write(&block->cb_lock);
657 	list_del(&block_cb->driver_list);
658 	list_move(&block_cb->list, &bo.cb_list);
659 	tcf_block_unbind(block, &bo);
660 	up_write(&block->cb_lock);
661 	rtnl_unlock();
662 }
663 
664 static bool tcf_block_offload_in_use(struct tcf_block *block)
665 {
666 	return atomic_read(&block->offloadcnt);
667 }
668 
669 static int tcf_block_offload_cmd(struct tcf_block *block,
670 				 struct net_device *dev, struct Qdisc *sch,
671 				 struct tcf_block_ext_info *ei,
672 				 enum flow_block_command command,
673 				 struct netlink_ext_ack *extack)
674 {
675 	struct flow_block_offload bo = {};
676 
677 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
678 			       &block->flow_block, tcf_block_shared(block),
679 			       extack);
680 
681 	if (dev->netdev_ops->ndo_setup_tc) {
682 		int err;
683 
684 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
685 		if (err < 0) {
686 			if (err != -EOPNOTSUPP)
687 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
688 			return err;
689 		}
690 
691 		return tcf_block_setup(block, &bo);
692 	}
693 
694 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
695 				    tc_block_indr_cleanup);
696 	tcf_block_setup(block, &bo);
697 
698 	return -EOPNOTSUPP;
699 }
700 
701 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
702 				  struct tcf_block_ext_info *ei,
703 				  struct netlink_ext_ack *extack)
704 {
705 	struct net_device *dev = q->dev_queue->dev;
706 	int err;
707 
708 	down_write(&block->cb_lock);
709 
710 	/* If tc offload feature is disabled and the block we try to bind
711 	 * to already has some offloaded filters, forbid to bind.
712 	 */
713 	if (dev->netdev_ops->ndo_setup_tc &&
714 	    !tc_can_offload(dev) &&
715 	    tcf_block_offload_in_use(block)) {
716 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
717 		err = -EOPNOTSUPP;
718 		goto err_unlock;
719 	}
720 
721 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
722 	if (err == -EOPNOTSUPP)
723 		goto no_offload_dev_inc;
724 	if (err)
725 		goto err_unlock;
726 
727 	up_write(&block->cb_lock);
728 	return 0;
729 
730 no_offload_dev_inc:
731 	if (tcf_block_offload_in_use(block))
732 		goto err_unlock;
733 
734 	err = 0;
735 	block->nooffloaddevcnt++;
736 err_unlock:
737 	up_write(&block->cb_lock);
738 	return err;
739 }
740 
741 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
742 				     struct tcf_block_ext_info *ei)
743 {
744 	struct net_device *dev = q->dev_queue->dev;
745 	int err;
746 
747 	down_write(&block->cb_lock);
748 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
749 	if (err == -EOPNOTSUPP)
750 		goto no_offload_dev_dec;
751 	up_write(&block->cb_lock);
752 	return;
753 
754 no_offload_dev_dec:
755 	WARN_ON(block->nooffloaddevcnt-- == 0);
756 	up_write(&block->cb_lock);
757 }
758 
759 static int
760 tcf_chain0_head_change_cb_add(struct tcf_block *block,
761 			      struct tcf_block_ext_info *ei,
762 			      struct netlink_ext_ack *extack)
763 {
764 	struct tcf_filter_chain_list_item *item;
765 	struct tcf_chain *chain0;
766 
767 	item = kmalloc(sizeof(*item), GFP_KERNEL);
768 	if (!item) {
769 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
770 		return -ENOMEM;
771 	}
772 	item->chain_head_change = ei->chain_head_change;
773 	item->chain_head_change_priv = ei->chain_head_change_priv;
774 
775 	mutex_lock(&block->lock);
776 	chain0 = block->chain0.chain;
777 	if (chain0)
778 		tcf_chain_hold(chain0);
779 	else
780 		list_add(&item->list, &block->chain0.filter_chain_list);
781 	mutex_unlock(&block->lock);
782 
783 	if (chain0) {
784 		struct tcf_proto *tp_head;
785 
786 		mutex_lock(&chain0->filter_chain_lock);
787 
788 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
789 		if (tp_head)
790 			tcf_chain_head_change_item(item, tp_head);
791 
792 		mutex_lock(&block->lock);
793 		list_add(&item->list, &block->chain0.filter_chain_list);
794 		mutex_unlock(&block->lock);
795 
796 		mutex_unlock(&chain0->filter_chain_lock);
797 		tcf_chain_put(chain0);
798 	}
799 
800 	return 0;
801 }
802 
803 static void
804 tcf_chain0_head_change_cb_del(struct tcf_block *block,
805 			      struct tcf_block_ext_info *ei)
806 {
807 	struct tcf_filter_chain_list_item *item;
808 
809 	mutex_lock(&block->lock);
810 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
811 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
812 		    (item->chain_head_change == ei->chain_head_change &&
813 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
814 			if (block->chain0.chain)
815 				tcf_chain_head_change_item(item, NULL);
816 			list_del(&item->list);
817 			mutex_unlock(&block->lock);
818 
819 			kfree(item);
820 			return;
821 		}
822 	}
823 	mutex_unlock(&block->lock);
824 	WARN_ON(1);
825 }
826 
827 struct tcf_net {
828 	spinlock_t idr_lock; /* Protects idr */
829 	struct idr idr;
830 };
831 
832 static unsigned int tcf_net_id;
833 
834 static int tcf_block_insert(struct tcf_block *block, struct net *net,
835 			    struct netlink_ext_ack *extack)
836 {
837 	struct tcf_net *tn = net_generic(net, tcf_net_id);
838 	int err;
839 
840 	idr_preload(GFP_KERNEL);
841 	spin_lock(&tn->idr_lock);
842 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
843 			    GFP_NOWAIT);
844 	spin_unlock(&tn->idr_lock);
845 	idr_preload_end();
846 
847 	return err;
848 }
849 
850 static void tcf_block_remove(struct tcf_block *block, struct net *net)
851 {
852 	struct tcf_net *tn = net_generic(net, tcf_net_id);
853 
854 	spin_lock(&tn->idr_lock);
855 	idr_remove(&tn->idr, block->index);
856 	spin_unlock(&tn->idr_lock);
857 }
858 
859 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
860 					  u32 block_index,
861 					  struct netlink_ext_ack *extack)
862 {
863 	struct tcf_block *block;
864 
865 	block = kzalloc(sizeof(*block), GFP_KERNEL);
866 	if (!block) {
867 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
868 		return ERR_PTR(-ENOMEM);
869 	}
870 	mutex_init(&block->lock);
871 	mutex_init(&block->proto_destroy_lock);
872 	init_rwsem(&block->cb_lock);
873 	flow_block_init(&block->flow_block);
874 	INIT_LIST_HEAD(&block->chain_list);
875 	INIT_LIST_HEAD(&block->owner_list);
876 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
877 
878 	refcount_set(&block->refcnt, 1);
879 	block->net = net;
880 	block->index = block_index;
881 
882 	/* Don't store q pointer for blocks which are shared */
883 	if (!tcf_block_shared(block))
884 		block->q = q;
885 	return block;
886 }
887 
888 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
889 {
890 	struct tcf_net *tn = net_generic(net, tcf_net_id);
891 
892 	return idr_find(&tn->idr, block_index);
893 }
894 
895 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
896 {
897 	struct tcf_block *block;
898 
899 	rcu_read_lock();
900 	block = tcf_block_lookup(net, block_index);
901 	if (block && !refcount_inc_not_zero(&block->refcnt))
902 		block = NULL;
903 	rcu_read_unlock();
904 
905 	return block;
906 }
907 
908 static struct tcf_chain *
909 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
910 {
911 	mutex_lock(&block->lock);
912 	if (chain)
913 		chain = list_is_last(&chain->list, &block->chain_list) ?
914 			NULL : list_next_entry(chain, list);
915 	else
916 		chain = list_first_entry_or_null(&block->chain_list,
917 						 struct tcf_chain, list);
918 
919 	/* skip all action-only chains */
920 	while (chain && tcf_chain_held_by_acts_only(chain))
921 		chain = list_is_last(&chain->list, &block->chain_list) ?
922 			NULL : list_next_entry(chain, list);
923 
924 	if (chain)
925 		tcf_chain_hold(chain);
926 	mutex_unlock(&block->lock);
927 
928 	return chain;
929 }
930 
931 /* Function to be used by all clients that want to iterate over all chains on
932  * block. It properly obtains block->lock and takes reference to chain before
933  * returning it. Users of this function must be tolerant to concurrent chain
934  * insertion/deletion or ensure that no concurrent chain modification is
935  * possible. Note that all netlink dump callbacks cannot guarantee to provide
936  * consistent dump because rtnl lock is released each time skb is filled with
937  * data and sent to user-space.
938  */
939 
940 struct tcf_chain *
941 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
942 {
943 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
944 
945 	if (chain)
946 		tcf_chain_put(chain);
947 
948 	return chain_next;
949 }
950 EXPORT_SYMBOL(tcf_get_next_chain);
951 
952 static struct tcf_proto *
953 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
954 {
955 	u32 prio = 0;
956 
957 	ASSERT_RTNL();
958 	mutex_lock(&chain->filter_chain_lock);
959 
960 	if (!tp) {
961 		tp = tcf_chain_dereference(chain->filter_chain, chain);
962 	} else if (tcf_proto_is_deleting(tp)) {
963 		/* 'deleting' flag is set and chain->filter_chain_lock was
964 		 * unlocked, which means next pointer could be invalid. Restart
965 		 * search.
966 		 */
967 		prio = tp->prio + 1;
968 		tp = tcf_chain_dereference(chain->filter_chain, chain);
969 
970 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
971 			if (!tp->deleting && tp->prio >= prio)
972 				break;
973 	} else {
974 		tp = tcf_chain_dereference(tp->next, chain);
975 	}
976 
977 	if (tp)
978 		tcf_proto_get(tp);
979 
980 	mutex_unlock(&chain->filter_chain_lock);
981 
982 	return tp;
983 }
984 
985 /* Function to be used by all clients that want to iterate over all tp's on
986  * chain. Users of this function must be tolerant to concurrent tp
987  * insertion/deletion or ensure that no concurrent chain modification is
988  * possible. Note that all netlink dump callbacks cannot guarantee to provide
989  * consistent dump because rtnl lock is released each time skb is filled with
990  * data and sent to user-space.
991  */
992 
993 struct tcf_proto *
994 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
995 {
996 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
997 
998 	if (tp)
999 		tcf_proto_put(tp, true, NULL);
1000 
1001 	return tp_next;
1002 }
1003 EXPORT_SYMBOL(tcf_get_next_proto);
1004 
1005 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1006 {
1007 	struct tcf_chain *chain;
1008 
1009 	/* Last reference to block. At this point chains cannot be added or
1010 	 * removed concurrently.
1011 	 */
1012 	for (chain = tcf_get_next_chain(block, NULL);
1013 	     chain;
1014 	     chain = tcf_get_next_chain(block, chain)) {
1015 		tcf_chain_put_explicitly_created(chain);
1016 		tcf_chain_flush(chain, rtnl_held);
1017 	}
1018 }
1019 
1020 /* Lookup Qdisc and increments its reference counter.
1021  * Set parent, if necessary.
1022  */
1023 
1024 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1025 			    u32 *parent, int ifindex, bool rtnl_held,
1026 			    struct netlink_ext_ack *extack)
1027 {
1028 	const struct Qdisc_class_ops *cops;
1029 	struct net_device *dev;
1030 	int err = 0;
1031 
1032 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1033 		return 0;
1034 
1035 	rcu_read_lock();
1036 
1037 	/* Find link */
1038 	dev = dev_get_by_index_rcu(net, ifindex);
1039 	if (!dev) {
1040 		rcu_read_unlock();
1041 		return -ENODEV;
1042 	}
1043 
1044 	/* Find qdisc */
1045 	if (!*parent) {
1046 		*q = dev->qdisc;
1047 		*parent = (*q)->handle;
1048 	} else {
1049 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1050 		if (!*q) {
1051 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1052 			err = -EINVAL;
1053 			goto errout_rcu;
1054 		}
1055 	}
1056 
1057 	*q = qdisc_refcount_inc_nz(*q);
1058 	if (!*q) {
1059 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1060 		err = -EINVAL;
1061 		goto errout_rcu;
1062 	}
1063 
1064 	/* Is it classful? */
1065 	cops = (*q)->ops->cl_ops;
1066 	if (!cops) {
1067 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1068 		err = -EINVAL;
1069 		goto errout_qdisc;
1070 	}
1071 
1072 	if (!cops->tcf_block) {
1073 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1074 		err = -EOPNOTSUPP;
1075 		goto errout_qdisc;
1076 	}
1077 
1078 errout_rcu:
1079 	/* At this point we know that qdisc is not noop_qdisc,
1080 	 * which means that qdisc holds a reference to net_device
1081 	 * and we hold a reference to qdisc, so it is safe to release
1082 	 * rcu read lock.
1083 	 */
1084 	rcu_read_unlock();
1085 	return err;
1086 
1087 errout_qdisc:
1088 	rcu_read_unlock();
1089 
1090 	if (rtnl_held)
1091 		qdisc_put(*q);
1092 	else
1093 		qdisc_put_unlocked(*q);
1094 	*q = NULL;
1095 
1096 	return err;
1097 }
1098 
1099 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1100 			       int ifindex, struct netlink_ext_ack *extack)
1101 {
1102 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1103 		return 0;
1104 
1105 	/* Do we search for filter, attached to class? */
1106 	if (TC_H_MIN(parent)) {
1107 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1108 
1109 		*cl = cops->find(q, parent);
1110 		if (*cl == 0) {
1111 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1112 			return -ENOENT;
1113 		}
1114 	}
1115 
1116 	return 0;
1117 }
1118 
1119 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1120 					  unsigned long cl, int ifindex,
1121 					  u32 block_index,
1122 					  struct netlink_ext_ack *extack)
1123 {
1124 	struct tcf_block *block;
1125 
1126 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1127 		block = tcf_block_refcnt_get(net, block_index);
1128 		if (!block) {
1129 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1130 			return ERR_PTR(-EINVAL);
1131 		}
1132 	} else {
1133 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1134 
1135 		block = cops->tcf_block(q, cl, extack);
1136 		if (!block)
1137 			return ERR_PTR(-EINVAL);
1138 
1139 		if (tcf_block_shared(block)) {
1140 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1141 			return ERR_PTR(-EOPNOTSUPP);
1142 		}
1143 
1144 		/* Always take reference to block in order to support execution
1145 		 * of rules update path of cls API without rtnl lock. Caller
1146 		 * must release block when it is finished using it. 'if' block
1147 		 * of this conditional obtain reference to block by calling
1148 		 * tcf_block_refcnt_get().
1149 		 */
1150 		refcount_inc(&block->refcnt);
1151 	}
1152 
1153 	return block;
1154 }
1155 
1156 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1157 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1158 {
1159 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1160 		/* Flushing/putting all chains will cause the block to be
1161 		 * deallocated when last chain is freed. However, if chain_list
1162 		 * is empty, block has to be manually deallocated. After block
1163 		 * reference counter reached 0, it is no longer possible to
1164 		 * increment it or add new chains to block.
1165 		 */
1166 		bool free_block = list_empty(&block->chain_list);
1167 
1168 		mutex_unlock(&block->lock);
1169 		if (tcf_block_shared(block))
1170 			tcf_block_remove(block, block->net);
1171 
1172 		if (q)
1173 			tcf_block_offload_unbind(block, q, ei);
1174 
1175 		if (free_block)
1176 			tcf_block_destroy(block);
1177 		else
1178 			tcf_block_flush_all_chains(block, rtnl_held);
1179 	} else if (q) {
1180 		tcf_block_offload_unbind(block, q, ei);
1181 	}
1182 }
1183 
1184 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1185 {
1186 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1187 }
1188 
1189 /* Find tcf block.
1190  * Set q, parent, cl when appropriate.
1191  */
1192 
1193 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1194 					u32 *parent, unsigned long *cl,
1195 					int ifindex, u32 block_index,
1196 					struct netlink_ext_ack *extack)
1197 {
1198 	struct tcf_block *block;
1199 	int err = 0;
1200 
1201 	ASSERT_RTNL();
1202 
1203 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1204 	if (err)
1205 		goto errout;
1206 
1207 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1208 	if (err)
1209 		goto errout_qdisc;
1210 
1211 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1212 	if (IS_ERR(block)) {
1213 		err = PTR_ERR(block);
1214 		goto errout_qdisc;
1215 	}
1216 
1217 	return block;
1218 
1219 errout_qdisc:
1220 	if (*q)
1221 		qdisc_put(*q);
1222 errout:
1223 	*q = NULL;
1224 	return ERR_PTR(err);
1225 }
1226 
1227 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1228 			      bool rtnl_held)
1229 {
1230 	if (!IS_ERR_OR_NULL(block))
1231 		tcf_block_refcnt_put(block, rtnl_held);
1232 
1233 	if (q) {
1234 		if (rtnl_held)
1235 			qdisc_put(q);
1236 		else
1237 			qdisc_put_unlocked(q);
1238 	}
1239 }
1240 
1241 struct tcf_block_owner_item {
1242 	struct list_head list;
1243 	struct Qdisc *q;
1244 	enum flow_block_binder_type binder_type;
1245 };
1246 
1247 static void
1248 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1249 			       struct Qdisc *q,
1250 			       enum flow_block_binder_type binder_type)
1251 {
1252 	if (block->keep_dst &&
1253 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1254 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1255 		netif_keep_dst(qdisc_dev(q));
1256 }
1257 
1258 void tcf_block_netif_keep_dst(struct tcf_block *block)
1259 {
1260 	struct tcf_block_owner_item *item;
1261 
1262 	block->keep_dst = true;
1263 	list_for_each_entry(item, &block->owner_list, list)
1264 		tcf_block_owner_netif_keep_dst(block, item->q,
1265 					       item->binder_type);
1266 }
1267 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1268 
1269 static int tcf_block_owner_add(struct tcf_block *block,
1270 			       struct Qdisc *q,
1271 			       enum flow_block_binder_type binder_type)
1272 {
1273 	struct tcf_block_owner_item *item;
1274 
1275 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1276 	if (!item)
1277 		return -ENOMEM;
1278 	item->q = q;
1279 	item->binder_type = binder_type;
1280 	list_add(&item->list, &block->owner_list);
1281 	return 0;
1282 }
1283 
1284 static void tcf_block_owner_del(struct tcf_block *block,
1285 				struct Qdisc *q,
1286 				enum flow_block_binder_type binder_type)
1287 {
1288 	struct tcf_block_owner_item *item;
1289 
1290 	list_for_each_entry(item, &block->owner_list, list) {
1291 		if (item->q == q && item->binder_type == binder_type) {
1292 			list_del(&item->list);
1293 			kfree(item);
1294 			return;
1295 		}
1296 	}
1297 	WARN_ON(1);
1298 }
1299 
1300 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1301 		      struct tcf_block_ext_info *ei,
1302 		      struct netlink_ext_ack *extack)
1303 {
1304 	struct net *net = qdisc_net(q);
1305 	struct tcf_block *block = NULL;
1306 	int err;
1307 
1308 	if (ei->block_index)
1309 		/* block_index not 0 means the shared block is requested */
1310 		block = tcf_block_refcnt_get(net, ei->block_index);
1311 
1312 	if (!block) {
1313 		block = tcf_block_create(net, q, ei->block_index, extack);
1314 		if (IS_ERR(block))
1315 			return PTR_ERR(block);
1316 		if (tcf_block_shared(block)) {
1317 			err = tcf_block_insert(block, net, extack);
1318 			if (err)
1319 				goto err_block_insert;
1320 		}
1321 	}
1322 
1323 	err = tcf_block_owner_add(block, q, ei->binder_type);
1324 	if (err)
1325 		goto err_block_owner_add;
1326 
1327 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1328 
1329 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1330 	if (err)
1331 		goto err_chain0_head_change_cb_add;
1332 
1333 	err = tcf_block_offload_bind(block, q, ei, extack);
1334 	if (err)
1335 		goto err_block_offload_bind;
1336 
1337 	*p_block = block;
1338 	return 0;
1339 
1340 err_block_offload_bind:
1341 	tcf_chain0_head_change_cb_del(block, ei);
1342 err_chain0_head_change_cb_add:
1343 	tcf_block_owner_del(block, q, ei->binder_type);
1344 err_block_owner_add:
1345 err_block_insert:
1346 	tcf_block_refcnt_put(block, true);
1347 	return err;
1348 }
1349 EXPORT_SYMBOL(tcf_block_get_ext);
1350 
1351 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1352 {
1353 	struct tcf_proto __rcu **p_filter_chain = priv;
1354 
1355 	rcu_assign_pointer(*p_filter_chain, tp_head);
1356 }
1357 
1358 int tcf_block_get(struct tcf_block **p_block,
1359 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1360 		  struct netlink_ext_ack *extack)
1361 {
1362 	struct tcf_block_ext_info ei = {
1363 		.chain_head_change = tcf_chain_head_change_dflt,
1364 		.chain_head_change_priv = p_filter_chain,
1365 	};
1366 
1367 	WARN_ON(!p_filter_chain);
1368 	return tcf_block_get_ext(p_block, q, &ei, extack);
1369 }
1370 EXPORT_SYMBOL(tcf_block_get);
1371 
1372 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1373  * actions should be all removed after flushing.
1374  */
1375 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1376 		       struct tcf_block_ext_info *ei)
1377 {
1378 	if (!block)
1379 		return;
1380 	tcf_chain0_head_change_cb_del(block, ei);
1381 	tcf_block_owner_del(block, q, ei->binder_type);
1382 
1383 	__tcf_block_put(block, q, ei, true);
1384 }
1385 EXPORT_SYMBOL(tcf_block_put_ext);
1386 
1387 void tcf_block_put(struct tcf_block *block)
1388 {
1389 	struct tcf_block_ext_info ei = {0, };
1390 
1391 	if (!block)
1392 		return;
1393 	tcf_block_put_ext(block, block->q, &ei);
1394 }
1395 
1396 EXPORT_SYMBOL(tcf_block_put);
1397 
1398 static int
1399 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1400 			    void *cb_priv, bool add, bool offload_in_use,
1401 			    struct netlink_ext_ack *extack)
1402 {
1403 	struct tcf_chain *chain, *chain_prev;
1404 	struct tcf_proto *tp, *tp_prev;
1405 	int err;
1406 
1407 	lockdep_assert_held(&block->cb_lock);
1408 
1409 	for (chain = __tcf_get_next_chain(block, NULL);
1410 	     chain;
1411 	     chain_prev = chain,
1412 		     chain = __tcf_get_next_chain(block, chain),
1413 		     tcf_chain_put(chain_prev)) {
1414 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1415 		     tp_prev = tp,
1416 			     tp = __tcf_get_next_proto(chain, tp),
1417 			     tcf_proto_put(tp_prev, true, NULL)) {
1418 			if (tp->ops->reoffload) {
1419 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1420 							 extack);
1421 				if (err && add)
1422 					goto err_playback_remove;
1423 			} else if (add && offload_in_use) {
1424 				err = -EOPNOTSUPP;
1425 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1426 				goto err_playback_remove;
1427 			}
1428 		}
1429 	}
1430 
1431 	return 0;
1432 
1433 err_playback_remove:
1434 	tcf_proto_put(tp, true, NULL);
1435 	tcf_chain_put(chain);
1436 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1437 				    extack);
1438 	return err;
1439 }
1440 
1441 static int tcf_block_bind(struct tcf_block *block,
1442 			  struct flow_block_offload *bo)
1443 {
1444 	struct flow_block_cb *block_cb, *next;
1445 	int err, i = 0;
1446 
1447 	lockdep_assert_held(&block->cb_lock);
1448 
1449 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1450 		err = tcf_block_playback_offloads(block, block_cb->cb,
1451 						  block_cb->cb_priv, true,
1452 						  tcf_block_offload_in_use(block),
1453 						  bo->extack);
1454 		if (err)
1455 			goto err_unroll;
1456 		if (!bo->unlocked_driver_cb)
1457 			block->lockeddevcnt++;
1458 
1459 		i++;
1460 	}
1461 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1462 
1463 	return 0;
1464 
1465 err_unroll:
1466 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1467 		if (i-- > 0) {
1468 			list_del(&block_cb->list);
1469 			tcf_block_playback_offloads(block, block_cb->cb,
1470 						    block_cb->cb_priv, false,
1471 						    tcf_block_offload_in_use(block),
1472 						    NULL);
1473 			if (!bo->unlocked_driver_cb)
1474 				block->lockeddevcnt--;
1475 		}
1476 		flow_block_cb_free(block_cb);
1477 	}
1478 
1479 	return err;
1480 }
1481 
1482 static void tcf_block_unbind(struct tcf_block *block,
1483 			     struct flow_block_offload *bo)
1484 {
1485 	struct flow_block_cb *block_cb, *next;
1486 
1487 	lockdep_assert_held(&block->cb_lock);
1488 
1489 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1490 		tcf_block_playback_offloads(block, block_cb->cb,
1491 					    block_cb->cb_priv, false,
1492 					    tcf_block_offload_in_use(block),
1493 					    NULL);
1494 		list_del(&block_cb->list);
1495 		flow_block_cb_free(block_cb);
1496 		if (!bo->unlocked_driver_cb)
1497 			block->lockeddevcnt--;
1498 	}
1499 }
1500 
1501 static int tcf_block_setup(struct tcf_block *block,
1502 			   struct flow_block_offload *bo)
1503 {
1504 	int err;
1505 
1506 	switch (bo->command) {
1507 	case FLOW_BLOCK_BIND:
1508 		err = tcf_block_bind(block, bo);
1509 		break;
1510 	case FLOW_BLOCK_UNBIND:
1511 		err = 0;
1512 		tcf_block_unbind(block, bo);
1513 		break;
1514 	default:
1515 		WARN_ON_ONCE(1);
1516 		err = -EOPNOTSUPP;
1517 	}
1518 
1519 	return err;
1520 }
1521 
1522 /* Main classifier routine: scans classifier chain attached
1523  * to this qdisc, (optionally) tests for protocol and asks
1524  * specific classifiers.
1525  */
1526 static inline int __tcf_classify(struct sk_buff *skb,
1527 				 const struct tcf_proto *tp,
1528 				 const struct tcf_proto *orig_tp,
1529 				 struct tcf_result *res,
1530 				 bool compat_mode,
1531 				 u32 *last_executed_chain)
1532 {
1533 #ifdef CONFIG_NET_CLS_ACT
1534 	const int max_reclassify_loop = 16;
1535 	const struct tcf_proto *first_tp;
1536 	int limit = 0;
1537 
1538 reclassify:
1539 #endif
1540 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1541 		__be16 protocol = skb_protocol(skb, false);
1542 		int err;
1543 
1544 		if (tp->protocol != protocol &&
1545 		    tp->protocol != htons(ETH_P_ALL))
1546 			continue;
1547 
1548 		err = tp->classify(skb, tp, res);
1549 #ifdef CONFIG_NET_CLS_ACT
1550 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1551 			first_tp = orig_tp;
1552 			*last_executed_chain = first_tp->chain->index;
1553 			goto reset;
1554 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1555 			first_tp = res->goto_tp;
1556 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1557 			goto reset;
1558 		}
1559 #endif
1560 		if (err >= 0)
1561 			return err;
1562 	}
1563 
1564 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1565 #ifdef CONFIG_NET_CLS_ACT
1566 reset:
1567 	if (unlikely(limit++ >= max_reclassify_loop)) {
1568 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1569 				       tp->chain->block->index,
1570 				       tp->prio & 0xffff,
1571 				       ntohs(tp->protocol));
1572 		return TC_ACT_SHOT;
1573 	}
1574 
1575 	tp = first_tp;
1576 	goto reclassify;
1577 #endif
1578 }
1579 
1580 int tcf_classify(struct sk_buff *skb,
1581 		 const struct tcf_block *block,
1582 		 const struct tcf_proto *tp,
1583 		 struct tcf_result *res, bool compat_mode)
1584 {
1585 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1586 	u32 last_executed_chain = 0;
1587 
1588 	return __tcf_classify(skb, tp, tp, res, compat_mode,
1589 			      &last_executed_chain);
1590 #else
1591 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1592 	const struct tcf_proto *orig_tp = tp;
1593 	struct tc_skb_ext *ext;
1594 	int ret;
1595 
1596 	if (block) {
1597 		ext = skb_ext_find(skb, TC_SKB_EXT);
1598 
1599 		if (ext && ext->chain) {
1600 			struct tcf_chain *fchain;
1601 
1602 			fchain = tcf_chain_lookup_rcu(block, ext->chain);
1603 			if (!fchain)
1604 				return TC_ACT_SHOT;
1605 
1606 			/* Consume, so cloned/redirect skbs won't inherit ext */
1607 			skb_ext_del(skb, TC_SKB_EXT);
1608 
1609 			tp = rcu_dereference_bh(fchain->filter_chain);
1610 			last_executed_chain = fchain->index;
1611 		}
1612 	}
1613 
1614 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1615 			     &last_executed_chain);
1616 
1617 	/* If we missed on some chain */
1618 	if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1619 		ext = tc_skb_ext_alloc(skb);
1620 		if (WARN_ON_ONCE(!ext))
1621 			return TC_ACT_SHOT;
1622 		ext->chain = last_executed_chain;
1623 		ext->mru = qdisc_skb_cb(skb)->mru;
1624 		ext->post_ct = qdisc_skb_cb(skb)->post_ct;
1625 	}
1626 
1627 	return ret;
1628 #endif
1629 }
1630 EXPORT_SYMBOL(tcf_classify);
1631 
1632 struct tcf_chain_info {
1633 	struct tcf_proto __rcu **pprev;
1634 	struct tcf_proto __rcu *next;
1635 };
1636 
1637 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1638 					   struct tcf_chain_info *chain_info)
1639 {
1640 	return tcf_chain_dereference(*chain_info->pprev, chain);
1641 }
1642 
1643 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1644 			       struct tcf_chain_info *chain_info,
1645 			       struct tcf_proto *tp)
1646 {
1647 	if (chain->flushing)
1648 		return -EAGAIN;
1649 
1650 	if (*chain_info->pprev == chain->filter_chain)
1651 		tcf_chain0_head_change(chain, tp);
1652 	tcf_proto_get(tp);
1653 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1654 	rcu_assign_pointer(*chain_info->pprev, tp);
1655 
1656 	return 0;
1657 }
1658 
1659 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1660 				struct tcf_chain_info *chain_info,
1661 				struct tcf_proto *tp)
1662 {
1663 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1664 
1665 	tcf_proto_mark_delete(tp);
1666 	if (tp == chain->filter_chain)
1667 		tcf_chain0_head_change(chain, next);
1668 	RCU_INIT_POINTER(*chain_info->pprev, next);
1669 }
1670 
1671 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1672 					   struct tcf_chain_info *chain_info,
1673 					   u32 protocol, u32 prio,
1674 					   bool prio_allocate);
1675 
1676 /* Try to insert new proto.
1677  * If proto with specified priority already exists, free new proto
1678  * and return existing one.
1679  */
1680 
1681 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1682 						    struct tcf_proto *tp_new,
1683 						    u32 protocol, u32 prio,
1684 						    bool rtnl_held)
1685 {
1686 	struct tcf_chain_info chain_info;
1687 	struct tcf_proto *tp;
1688 	int err = 0;
1689 
1690 	mutex_lock(&chain->filter_chain_lock);
1691 
1692 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1693 		mutex_unlock(&chain->filter_chain_lock);
1694 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1695 		return ERR_PTR(-EAGAIN);
1696 	}
1697 
1698 	tp = tcf_chain_tp_find(chain, &chain_info,
1699 			       protocol, prio, false);
1700 	if (!tp)
1701 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1702 	mutex_unlock(&chain->filter_chain_lock);
1703 
1704 	if (tp) {
1705 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1706 		tp_new = tp;
1707 	} else if (err) {
1708 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1709 		tp_new = ERR_PTR(err);
1710 	}
1711 
1712 	return tp_new;
1713 }
1714 
1715 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1716 				      struct tcf_proto *tp, bool rtnl_held,
1717 				      struct netlink_ext_ack *extack)
1718 {
1719 	struct tcf_chain_info chain_info;
1720 	struct tcf_proto *tp_iter;
1721 	struct tcf_proto **pprev;
1722 	struct tcf_proto *next;
1723 
1724 	mutex_lock(&chain->filter_chain_lock);
1725 
1726 	/* Atomically find and remove tp from chain. */
1727 	for (pprev = &chain->filter_chain;
1728 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1729 	     pprev = &tp_iter->next) {
1730 		if (tp_iter == tp) {
1731 			chain_info.pprev = pprev;
1732 			chain_info.next = tp_iter->next;
1733 			WARN_ON(tp_iter->deleting);
1734 			break;
1735 		}
1736 	}
1737 	/* Verify that tp still exists and no new filters were inserted
1738 	 * concurrently.
1739 	 * Mark tp for deletion if it is empty.
1740 	 */
1741 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1742 		mutex_unlock(&chain->filter_chain_lock);
1743 		return;
1744 	}
1745 
1746 	tcf_proto_signal_destroying(chain, tp);
1747 	next = tcf_chain_dereference(chain_info.next, chain);
1748 	if (tp == chain->filter_chain)
1749 		tcf_chain0_head_change(chain, next);
1750 	RCU_INIT_POINTER(*chain_info.pprev, next);
1751 	mutex_unlock(&chain->filter_chain_lock);
1752 
1753 	tcf_proto_put(tp, rtnl_held, extack);
1754 }
1755 
1756 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1757 					   struct tcf_chain_info *chain_info,
1758 					   u32 protocol, u32 prio,
1759 					   bool prio_allocate)
1760 {
1761 	struct tcf_proto **pprev;
1762 	struct tcf_proto *tp;
1763 
1764 	/* Check the chain for existence of proto-tcf with this priority */
1765 	for (pprev = &chain->filter_chain;
1766 	     (tp = tcf_chain_dereference(*pprev, chain));
1767 	     pprev = &tp->next) {
1768 		if (tp->prio >= prio) {
1769 			if (tp->prio == prio) {
1770 				if (prio_allocate ||
1771 				    (tp->protocol != protocol && protocol))
1772 					return ERR_PTR(-EINVAL);
1773 			} else {
1774 				tp = NULL;
1775 			}
1776 			break;
1777 		}
1778 	}
1779 	chain_info->pprev = pprev;
1780 	if (tp) {
1781 		chain_info->next = tp->next;
1782 		tcf_proto_get(tp);
1783 	} else {
1784 		chain_info->next = NULL;
1785 	}
1786 	return tp;
1787 }
1788 
1789 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1790 			 struct tcf_proto *tp, struct tcf_block *block,
1791 			 struct Qdisc *q, u32 parent, void *fh,
1792 			 u32 portid, u32 seq, u16 flags, int event,
1793 			 bool terse_dump, bool rtnl_held)
1794 {
1795 	struct tcmsg *tcm;
1796 	struct nlmsghdr  *nlh;
1797 	unsigned char *b = skb_tail_pointer(skb);
1798 
1799 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1800 	if (!nlh)
1801 		goto out_nlmsg_trim;
1802 	tcm = nlmsg_data(nlh);
1803 	tcm->tcm_family = AF_UNSPEC;
1804 	tcm->tcm__pad1 = 0;
1805 	tcm->tcm__pad2 = 0;
1806 	if (q) {
1807 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1808 		tcm->tcm_parent = parent;
1809 	} else {
1810 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1811 		tcm->tcm_block_index = block->index;
1812 	}
1813 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1814 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1815 		goto nla_put_failure;
1816 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1817 		goto nla_put_failure;
1818 	if (!fh) {
1819 		tcm->tcm_handle = 0;
1820 	} else if (terse_dump) {
1821 		if (tp->ops->terse_dump) {
1822 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1823 						rtnl_held) < 0)
1824 				goto nla_put_failure;
1825 		} else {
1826 			goto cls_op_not_supp;
1827 		}
1828 	} else {
1829 		if (tp->ops->dump &&
1830 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1831 			goto nla_put_failure;
1832 	}
1833 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1834 	return skb->len;
1835 
1836 out_nlmsg_trim:
1837 nla_put_failure:
1838 cls_op_not_supp:
1839 	nlmsg_trim(skb, b);
1840 	return -1;
1841 }
1842 
1843 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1844 			  struct nlmsghdr *n, struct tcf_proto *tp,
1845 			  struct tcf_block *block, struct Qdisc *q,
1846 			  u32 parent, void *fh, int event, bool unicast,
1847 			  bool rtnl_held)
1848 {
1849 	struct sk_buff *skb;
1850 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1851 	int err = 0;
1852 
1853 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1854 	if (!skb)
1855 		return -ENOBUFS;
1856 
1857 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1858 			  n->nlmsg_seq, n->nlmsg_flags, event,
1859 			  false, rtnl_held) <= 0) {
1860 		kfree_skb(skb);
1861 		return -EINVAL;
1862 	}
1863 
1864 	if (unicast)
1865 		err = rtnl_unicast(skb, net, portid);
1866 	else
1867 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1868 				     n->nlmsg_flags & NLM_F_ECHO);
1869 	return err;
1870 }
1871 
1872 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1873 			      struct nlmsghdr *n, struct tcf_proto *tp,
1874 			      struct tcf_block *block, struct Qdisc *q,
1875 			      u32 parent, void *fh, bool unicast, bool *last,
1876 			      bool rtnl_held, struct netlink_ext_ack *extack)
1877 {
1878 	struct sk_buff *skb;
1879 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1880 	int err;
1881 
1882 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1883 	if (!skb)
1884 		return -ENOBUFS;
1885 
1886 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1887 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1888 			  false, rtnl_held) <= 0) {
1889 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1890 		kfree_skb(skb);
1891 		return -EINVAL;
1892 	}
1893 
1894 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1895 	if (err) {
1896 		kfree_skb(skb);
1897 		return err;
1898 	}
1899 
1900 	if (unicast)
1901 		err = rtnl_unicast(skb, net, portid);
1902 	else
1903 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1904 				     n->nlmsg_flags & NLM_F_ECHO);
1905 	if (err < 0)
1906 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1907 
1908 	return err;
1909 }
1910 
1911 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1912 				 struct tcf_block *block, struct Qdisc *q,
1913 				 u32 parent, struct nlmsghdr *n,
1914 				 struct tcf_chain *chain, int event)
1915 {
1916 	struct tcf_proto *tp;
1917 
1918 	for (tp = tcf_get_next_proto(chain, NULL);
1919 	     tp; tp = tcf_get_next_proto(chain, tp))
1920 		tfilter_notify(net, oskb, n, tp, block,
1921 			       q, parent, NULL, event, false, true);
1922 }
1923 
1924 static void tfilter_put(struct tcf_proto *tp, void *fh)
1925 {
1926 	if (tp->ops->put && fh)
1927 		tp->ops->put(tp, fh);
1928 }
1929 
1930 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1931 			  struct netlink_ext_ack *extack)
1932 {
1933 	struct net *net = sock_net(skb->sk);
1934 	struct nlattr *tca[TCA_MAX + 1];
1935 	char name[IFNAMSIZ];
1936 	struct tcmsg *t;
1937 	u32 protocol;
1938 	u32 prio;
1939 	bool prio_allocate;
1940 	u32 parent;
1941 	u32 chain_index;
1942 	struct Qdisc *q = NULL;
1943 	struct tcf_chain_info chain_info;
1944 	struct tcf_chain *chain = NULL;
1945 	struct tcf_block *block;
1946 	struct tcf_proto *tp;
1947 	unsigned long cl;
1948 	void *fh;
1949 	int err;
1950 	int tp_created;
1951 	bool rtnl_held = false;
1952 	u32 flags = 0;
1953 
1954 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1955 		return -EPERM;
1956 
1957 replay:
1958 	tp_created = 0;
1959 
1960 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1961 				     rtm_tca_policy, extack);
1962 	if (err < 0)
1963 		return err;
1964 
1965 	t = nlmsg_data(n);
1966 	protocol = TC_H_MIN(t->tcm_info);
1967 	prio = TC_H_MAJ(t->tcm_info);
1968 	prio_allocate = false;
1969 	parent = t->tcm_parent;
1970 	tp = NULL;
1971 	cl = 0;
1972 	block = NULL;
1973 
1974 	if (prio == 0) {
1975 		/* If no priority is provided by the user,
1976 		 * we allocate one.
1977 		 */
1978 		if (n->nlmsg_flags & NLM_F_CREATE) {
1979 			prio = TC_H_MAKE(0x80000000U, 0U);
1980 			prio_allocate = true;
1981 		} else {
1982 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1983 			return -ENOENT;
1984 		}
1985 	}
1986 
1987 	/* Find head of filter chain. */
1988 
1989 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1990 	if (err)
1991 		return err;
1992 
1993 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1994 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1995 		err = -EINVAL;
1996 		goto errout;
1997 	}
1998 
1999 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2000 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2001 	 * type is not specified, classifier is not unlocked.
2002 	 */
2003 	if (rtnl_held ||
2004 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2005 	    !tcf_proto_is_unlocked(name)) {
2006 		rtnl_held = true;
2007 		rtnl_lock();
2008 	}
2009 
2010 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2011 	if (err)
2012 		goto errout;
2013 
2014 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2015 				 extack);
2016 	if (IS_ERR(block)) {
2017 		err = PTR_ERR(block);
2018 		goto errout;
2019 	}
2020 	block->classid = parent;
2021 
2022 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2023 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2024 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2025 		err = -EINVAL;
2026 		goto errout;
2027 	}
2028 	chain = tcf_chain_get(block, chain_index, true);
2029 	if (!chain) {
2030 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2031 		err = -ENOMEM;
2032 		goto errout;
2033 	}
2034 
2035 	mutex_lock(&chain->filter_chain_lock);
2036 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2037 			       prio, prio_allocate);
2038 	if (IS_ERR(tp)) {
2039 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2040 		err = PTR_ERR(tp);
2041 		goto errout_locked;
2042 	}
2043 
2044 	if (tp == NULL) {
2045 		struct tcf_proto *tp_new = NULL;
2046 
2047 		if (chain->flushing) {
2048 			err = -EAGAIN;
2049 			goto errout_locked;
2050 		}
2051 
2052 		/* Proto-tcf does not exist, create new one */
2053 
2054 		if (tca[TCA_KIND] == NULL || !protocol) {
2055 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2056 			err = -EINVAL;
2057 			goto errout_locked;
2058 		}
2059 
2060 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2061 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2062 			err = -ENOENT;
2063 			goto errout_locked;
2064 		}
2065 
2066 		if (prio_allocate)
2067 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2068 							       &chain_info));
2069 
2070 		mutex_unlock(&chain->filter_chain_lock);
2071 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2072 					  rtnl_held, extack);
2073 		if (IS_ERR(tp_new)) {
2074 			err = PTR_ERR(tp_new);
2075 			goto errout_tp;
2076 		}
2077 
2078 		tp_created = 1;
2079 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2080 						rtnl_held);
2081 		if (IS_ERR(tp)) {
2082 			err = PTR_ERR(tp);
2083 			goto errout_tp;
2084 		}
2085 	} else {
2086 		mutex_unlock(&chain->filter_chain_lock);
2087 	}
2088 
2089 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2090 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2091 		err = -EINVAL;
2092 		goto errout;
2093 	}
2094 
2095 	fh = tp->ops->get(tp, t->tcm_handle);
2096 
2097 	if (!fh) {
2098 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2099 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2100 			err = -ENOENT;
2101 			goto errout;
2102 		}
2103 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2104 		tfilter_put(tp, fh);
2105 		NL_SET_ERR_MSG(extack, "Filter already exists");
2106 		err = -EEXIST;
2107 		goto errout;
2108 	}
2109 
2110 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2111 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2112 		err = -EINVAL;
2113 		goto errout;
2114 	}
2115 
2116 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2117 		flags |= TCA_ACT_FLAGS_REPLACE;
2118 	if (!rtnl_held)
2119 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2120 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2121 			      flags, extack);
2122 	if (err == 0) {
2123 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2124 			       RTM_NEWTFILTER, false, rtnl_held);
2125 		tfilter_put(tp, fh);
2126 		/* q pointer is NULL for shared blocks */
2127 		if (q)
2128 			q->flags &= ~TCQ_F_CAN_BYPASS;
2129 	}
2130 
2131 errout:
2132 	if (err && tp_created)
2133 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2134 errout_tp:
2135 	if (chain) {
2136 		if (tp && !IS_ERR(tp))
2137 			tcf_proto_put(tp, rtnl_held, NULL);
2138 		if (!tp_created)
2139 			tcf_chain_put(chain);
2140 	}
2141 	tcf_block_release(q, block, rtnl_held);
2142 
2143 	if (rtnl_held)
2144 		rtnl_unlock();
2145 
2146 	if (err == -EAGAIN) {
2147 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2148 		 * of target chain.
2149 		 */
2150 		rtnl_held = true;
2151 		/* Replay the request. */
2152 		goto replay;
2153 	}
2154 	return err;
2155 
2156 errout_locked:
2157 	mutex_unlock(&chain->filter_chain_lock);
2158 	goto errout;
2159 }
2160 
2161 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2162 			  struct netlink_ext_ack *extack)
2163 {
2164 	struct net *net = sock_net(skb->sk);
2165 	struct nlattr *tca[TCA_MAX + 1];
2166 	char name[IFNAMSIZ];
2167 	struct tcmsg *t;
2168 	u32 protocol;
2169 	u32 prio;
2170 	u32 parent;
2171 	u32 chain_index;
2172 	struct Qdisc *q = NULL;
2173 	struct tcf_chain_info chain_info;
2174 	struct tcf_chain *chain = NULL;
2175 	struct tcf_block *block = NULL;
2176 	struct tcf_proto *tp = NULL;
2177 	unsigned long cl = 0;
2178 	void *fh = NULL;
2179 	int err;
2180 	bool rtnl_held = false;
2181 
2182 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2183 		return -EPERM;
2184 
2185 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2186 				     rtm_tca_policy, extack);
2187 	if (err < 0)
2188 		return err;
2189 
2190 	t = nlmsg_data(n);
2191 	protocol = TC_H_MIN(t->tcm_info);
2192 	prio = TC_H_MAJ(t->tcm_info);
2193 	parent = t->tcm_parent;
2194 
2195 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2196 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2197 		return -ENOENT;
2198 	}
2199 
2200 	/* Find head of filter chain. */
2201 
2202 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2203 	if (err)
2204 		return err;
2205 
2206 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2207 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2208 		err = -EINVAL;
2209 		goto errout;
2210 	}
2211 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2212 	 * found), qdisc is not unlocked, classifier type is not specified,
2213 	 * classifier is not unlocked.
2214 	 */
2215 	if (!prio ||
2216 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2217 	    !tcf_proto_is_unlocked(name)) {
2218 		rtnl_held = true;
2219 		rtnl_lock();
2220 	}
2221 
2222 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2223 	if (err)
2224 		goto errout;
2225 
2226 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2227 				 extack);
2228 	if (IS_ERR(block)) {
2229 		err = PTR_ERR(block);
2230 		goto errout;
2231 	}
2232 
2233 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2234 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2235 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2236 		err = -EINVAL;
2237 		goto errout;
2238 	}
2239 	chain = tcf_chain_get(block, chain_index, false);
2240 	if (!chain) {
2241 		/* User requested flush on non-existent chain. Nothing to do,
2242 		 * so just return success.
2243 		 */
2244 		if (prio == 0) {
2245 			err = 0;
2246 			goto errout;
2247 		}
2248 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2249 		err = -ENOENT;
2250 		goto errout;
2251 	}
2252 
2253 	if (prio == 0) {
2254 		tfilter_notify_chain(net, skb, block, q, parent, n,
2255 				     chain, RTM_DELTFILTER);
2256 		tcf_chain_flush(chain, rtnl_held);
2257 		err = 0;
2258 		goto errout;
2259 	}
2260 
2261 	mutex_lock(&chain->filter_chain_lock);
2262 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2263 			       prio, false);
2264 	if (!tp || IS_ERR(tp)) {
2265 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2266 		err = tp ? PTR_ERR(tp) : -ENOENT;
2267 		goto errout_locked;
2268 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2269 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2270 		err = -EINVAL;
2271 		goto errout_locked;
2272 	} else if (t->tcm_handle == 0) {
2273 		tcf_proto_signal_destroying(chain, tp);
2274 		tcf_chain_tp_remove(chain, &chain_info, tp);
2275 		mutex_unlock(&chain->filter_chain_lock);
2276 
2277 		tcf_proto_put(tp, rtnl_held, NULL);
2278 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2279 			       RTM_DELTFILTER, false, rtnl_held);
2280 		err = 0;
2281 		goto errout;
2282 	}
2283 	mutex_unlock(&chain->filter_chain_lock);
2284 
2285 	fh = tp->ops->get(tp, t->tcm_handle);
2286 
2287 	if (!fh) {
2288 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2289 		err = -ENOENT;
2290 	} else {
2291 		bool last;
2292 
2293 		err = tfilter_del_notify(net, skb, n, tp, block,
2294 					 q, parent, fh, false, &last,
2295 					 rtnl_held, extack);
2296 
2297 		if (err)
2298 			goto errout;
2299 		if (last)
2300 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2301 	}
2302 
2303 errout:
2304 	if (chain) {
2305 		if (tp && !IS_ERR(tp))
2306 			tcf_proto_put(tp, rtnl_held, NULL);
2307 		tcf_chain_put(chain);
2308 	}
2309 	tcf_block_release(q, block, rtnl_held);
2310 
2311 	if (rtnl_held)
2312 		rtnl_unlock();
2313 
2314 	return err;
2315 
2316 errout_locked:
2317 	mutex_unlock(&chain->filter_chain_lock);
2318 	goto errout;
2319 }
2320 
2321 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2322 			  struct netlink_ext_ack *extack)
2323 {
2324 	struct net *net = sock_net(skb->sk);
2325 	struct nlattr *tca[TCA_MAX + 1];
2326 	char name[IFNAMSIZ];
2327 	struct tcmsg *t;
2328 	u32 protocol;
2329 	u32 prio;
2330 	u32 parent;
2331 	u32 chain_index;
2332 	struct Qdisc *q = NULL;
2333 	struct tcf_chain_info chain_info;
2334 	struct tcf_chain *chain = NULL;
2335 	struct tcf_block *block = NULL;
2336 	struct tcf_proto *tp = NULL;
2337 	unsigned long cl = 0;
2338 	void *fh = NULL;
2339 	int err;
2340 	bool rtnl_held = false;
2341 
2342 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2343 				     rtm_tca_policy, extack);
2344 	if (err < 0)
2345 		return err;
2346 
2347 	t = nlmsg_data(n);
2348 	protocol = TC_H_MIN(t->tcm_info);
2349 	prio = TC_H_MAJ(t->tcm_info);
2350 	parent = t->tcm_parent;
2351 
2352 	if (prio == 0) {
2353 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2354 		return -ENOENT;
2355 	}
2356 
2357 	/* Find head of filter chain. */
2358 
2359 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2360 	if (err)
2361 		return err;
2362 
2363 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2364 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2365 		err = -EINVAL;
2366 		goto errout;
2367 	}
2368 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2369 	 * unlocked, classifier type is not specified, classifier is not
2370 	 * unlocked.
2371 	 */
2372 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2373 	    !tcf_proto_is_unlocked(name)) {
2374 		rtnl_held = true;
2375 		rtnl_lock();
2376 	}
2377 
2378 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2379 	if (err)
2380 		goto errout;
2381 
2382 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2383 				 extack);
2384 	if (IS_ERR(block)) {
2385 		err = PTR_ERR(block);
2386 		goto errout;
2387 	}
2388 
2389 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2390 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2391 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2392 		err = -EINVAL;
2393 		goto errout;
2394 	}
2395 	chain = tcf_chain_get(block, chain_index, false);
2396 	if (!chain) {
2397 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2398 		err = -EINVAL;
2399 		goto errout;
2400 	}
2401 
2402 	mutex_lock(&chain->filter_chain_lock);
2403 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2404 			       prio, false);
2405 	mutex_unlock(&chain->filter_chain_lock);
2406 	if (!tp || IS_ERR(tp)) {
2407 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2408 		err = tp ? PTR_ERR(tp) : -ENOENT;
2409 		goto errout;
2410 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2411 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2412 		err = -EINVAL;
2413 		goto errout;
2414 	}
2415 
2416 	fh = tp->ops->get(tp, t->tcm_handle);
2417 
2418 	if (!fh) {
2419 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2420 		err = -ENOENT;
2421 	} else {
2422 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2423 				     fh, RTM_NEWTFILTER, true, rtnl_held);
2424 		if (err < 0)
2425 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2426 	}
2427 
2428 	tfilter_put(tp, fh);
2429 errout:
2430 	if (chain) {
2431 		if (tp && !IS_ERR(tp))
2432 			tcf_proto_put(tp, rtnl_held, NULL);
2433 		tcf_chain_put(chain);
2434 	}
2435 	tcf_block_release(q, block, rtnl_held);
2436 
2437 	if (rtnl_held)
2438 		rtnl_unlock();
2439 
2440 	return err;
2441 }
2442 
2443 struct tcf_dump_args {
2444 	struct tcf_walker w;
2445 	struct sk_buff *skb;
2446 	struct netlink_callback *cb;
2447 	struct tcf_block *block;
2448 	struct Qdisc *q;
2449 	u32 parent;
2450 	bool terse_dump;
2451 };
2452 
2453 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2454 {
2455 	struct tcf_dump_args *a = (void *)arg;
2456 	struct net *net = sock_net(a->skb->sk);
2457 
2458 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2459 			     n, NETLINK_CB(a->cb->skb).portid,
2460 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2461 			     RTM_NEWTFILTER, a->terse_dump, true);
2462 }
2463 
2464 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2465 			   struct sk_buff *skb, struct netlink_callback *cb,
2466 			   long index_start, long *p_index, bool terse)
2467 {
2468 	struct net *net = sock_net(skb->sk);
2469 	struct tcf_block *block = chain->block;
2470 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2471 	struct tcf_proto *tp, *tp_prev;
2472 	struct tcf_dump_args arg;
2473 
2474 	for (tp = __tcf_get_next_proto(chain, NULL);
2475 	     tp;
2476 	     tp_prev = tp,
2477 		     tp = __tcf_get_next_proto(chain, tp),
2478 		     tcf_proto_put(tp_prev, true, NULL),
2479 		     (*p_index)++) {
2480 		if (*p_index < index_start)
2481 			continue;
2482 		if (TC_H_MAJ(tcm->tcm_info) &&
2483 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2484 			continue;
2485 		if (TC_H_MIN(tcm->tcm_info) &&
2486 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2487 			continue;
2488 		if (*p_index > index_start)
2489 			memset(&cb->args[1], 0,
2490 			       sizeof(cb->args) - sizeof(cb->args[0]));
2491 		if (cb->args[1] == 0) {
2492 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2493 					  NETLINK_CB(cb->skb).portid,
2494 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2495 					  RTM_NEWTFILTER, false, true) <= 0)
2496 				goto errout;
2497 			cb->args[1] = 1;
2498 		}
2499 		if (!tp->ops->walk)
2500 			continue;
2501 		arg.w.fn = tcf_node_dump;
2502 		arg.skb = skb;
2503 		arg.cb = cb;
2504 		arg.block = block;
2505 		arg.q = q;
2506 		arg.parent = parent;
2507 		arg.w.stop = 0;
2508 		arg.w.skip = cb->args[1] - 1;
2509 		arg.w.count = 0;
2510 		arg.w.cookie = cb->args[2];
2511 		arg.terse_dump = terse;
2512 		tp->ops->walk(tp, &arg.w, true);
2513 		cb->args[2] = arg.w.cookie;
2514 		cb->args[1] = arg.w.count + 1;
2515 		if (arg.w.stop)
2516 			goto errout;
2517 	}
2518 	return true;
2519 
2520 errout:
2521 	tcf_proto_put(tp, true, NULL);
2522 	return false;
2523 }
2524 
2525 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2526 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2527 };
2528 
2529 /* called with RTNL */
2530 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2531 {
2532 	struct tcf_chain *chain, *chain_prev;
2533 	struct net *net = sock_net(skb->sk);
2534 	struct nlattr *tca[TCA_MAX + 1];
2535 	struct Qdisc *q = NULL;
2536 	struct tcf_block *block;
2537 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2538 	bool terse_dump = false;
2539 	long index_start;
2540 	long index;
2541 	u32 parent;
2542 	int err;
2543 
2544 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2545 		return skb->len;
2546 
2547 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2548 				     tcf_tfilter_dump_policy, cb->extack);
2549 	if (err)
2550 		return err;
2551 
2552 	if (tca[TCA_DUMP_FLAGS]) {
2553 		struct nla_bitfield32 flags =
2554 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2555 
2556 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2557 	}
2558 
2559 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2560 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2561 		if (!block)
2562 			goto out;
2563 		/* If we work with block index, q is NULL and parent value
2564 		 * will never be used in the following code. The check
2565 		 * in tcf_fill_node prevents it. However, compiler does not
2566 		 * see that far, so set parent to zero to silence the warning
2567 		 * about parent being uninitialized.
2568 		 */
2569 		parent = 0;
2570 	} else {
2571 		const struct Qdisc_class_ops *cops;
2572 		struct net_device *dev;
2573 		unsigned long cl = 0;
2574 
2575 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2576 		if (!dev)
2577 			return skb->len;
2578 
2579 		parent = tcm->tcm_parent;
2580 		if (!parent)
2581 			q = dev->qdisc;
2582 		else
2583 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2584 		if (!q)
2585 			goto out;
2586 		cops = q->ops->cl_ops;
2587 		if (!cops)
2588 			goto out;
2589 		if (!cops->tcf_block)
2590 			goto out;
2591 		if (TC_H_MIN(tcm->tcm_parent)) {
2592 			cl = cops->find(q, tcm->tcm_parent);
2593 			if (cl == 0)
2594 				goto out;
2595 		}
2596 		block = cops->tcf_block(q, cl, NULL);
2597 		if (!block)
2598 			goto out;
2599 		parent = block->classid;
2600 		if (tcf_block_shared(block))
2601 			q = NULL;
2602 	}
2603 
2604 	index_start = cb->args[0];
2605 	index = 0;
2606 
2607 	for (chain = __tcf_get_next_chain(block, NULL);
2608 	     chain;
2609 	     chain_prev = chain,
2610 		     chain = __tcf_get_next_chain(block, chain),
2611 		     tcf_chain_put(chain_prev)) {
2612 		if (tca[TCA_CHAIN] &&
2613 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2614 			continue;
2615 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2616 				    index_start, &index, terse_dump)) {
2617 			tcf_chain_put(chain);
2618 			err = -EMSGSIZE;
2619 			break;
2620 		}
2621 	}
2622 
2623 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2624 		tcf_block_refcnt_put(block, true);
2625 	cb->args[0] = index;
2626 
2627 out:
2628 	/* If we did no progress, the error (EMSGSIZE) is real */
2629 	if (skb->len == 0 && err)
2630 		return err;
2631 	return skb->len;
2632 }
2633 
2634 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2635 			      void *tmplt_priv, u32 chain_index,
2636 			      struct net *net, struct sk_buff *skb,
2637 			      struct tcf_block *block,
2638 			      u32 portid, u32 seq, u16 flags, int event)
2639 {
2640 	unsigned char *b = skb_tail_pointer(skb);
2641 	const struct tcf_proto_ops *ops;
2642 	struct nlmsghdr *nlh;
2643 	struct tcmsg *tcm;
2644 	void *priv;
2645 
2646 	ops = tmplt_ops;
2647 	priv = tmplt_priv;
2648 
2649 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2650 	if (!nlh)
2651 		goto out_nlmsg_trim;
2652 	tcm = nlmsg_data(nlh);
2653 	tcm->tcm_family = AF_UNSPEC;
2654 	tcm->tcm__pad1 = 0;
2655 	tcm->tcm__pad2 = 0;
2656 	tcm->tcm_handle = 0;
2657 	if (block->q) {
2658 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2659 		tcm->tcm_parent = block->q->handle;
2660 	} else {
2661 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2662 		tcm->tcm_block_index = block->index;
2663 	}
2664 
2665 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2666 		goto nla_put_failure;
2667 
2668 	if (ops) {
2669 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2670 			goto nla_put_failure;
2671 		if (ops->tmplt_dump(skb, net, priv) < 0)
2672 			goto nla_put_failure;
2673 	}
2674 
2675 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2676 	return skb->len;
2677 
2678 out_nlmsg_trim:
2679 nla_put_failure:
2680 	nlmsg_trim(skb, b);
2681 	return -EMSGSIZE;
2682 }
2683 
2684 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2685 			   u32 seq, u16 flags, int event, bool unicast)
2686 {
2687 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2688 	struct tcf_block *block = chain->block;
2689 	struct net *net = block->net;
2690 	struct sk_buff *skb;
2691 	int err = 0;
2692 
2693 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2694 	if (!skb)
2695 		return -ENOBUFS;
2696 
2697 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2698 			       chain->index, net, skb, block, portid,
2699 			       seq, flags, event) <= 0) {
2700 		kfree_skb(skb);
2701 		return -EINVAL;
2702 	}
2703 
2704 	if (unicast)
2705 		err = rtnl_unicast(skb, net, portid);
2706 	else
2707 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2708 				     flags & NLM_F_ECHO);
2709 
2710 	return err;
2711 }
2712 
2713 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2714 				  void *tmplt_priv, u32 chain_index,
2715 				  struct tcf_block *block, struct sk_buff *oskb,
2716 				  u32 seq, u16 flags, bool unicast)
2717 {
2718 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2719 	struct net *net = block->net;
2720 	struct sk_buff *skb;
2721 
2722 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2723 	if (!skb)
2724 		return -ENOBUFS;
2725 
2726 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2727 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2728 		kfree_skb(skb);
2729 		return -EINVAL;
2730 	}
2731 
2732 	if (unicast)
2733 		return rtnl_unicast(skb, net, portid);
2734 
2735 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2736 }
2737 
2738 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2739 			      struct nlattr **tca,
2740 			      struct netlink_ext_ack *extack)
2741 {
2742 	const struct tcf_proto_ops *ops;
2743 	char name[IFNAMSIZ];
2744 	void *tmplt_priv;
2745 
2746 	/* If kind is not set, user did not specify template. */
2747 	if (!tca[TCA_KIND])
2748 		return 0;
2749 
2750 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2751 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2752 		return -EINVAL;
2753 	}
2754 
2755 	ops = tcf_proto_lookup_ops(name, true, extack);
2756 	if (IS_ERR(ops))
2757 		return PTR_ERR(ops);
2758 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2759 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2760 		return -EOPNOTSUPP;
2761 	}
2762 
2763 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2764 	if (IS_ERR(tmplt_priv)) {
2765 		module_put(ops->owner);
2766 		return PTR_ERR(tmplt_priv);
2767 	}
2768 	chain->tmplt_ops = ops;
2769 	chain->tmplt_priv = tmplt_priv;
2770 	return 0;
2771 }
2772 
2773 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2774 			       void *tmplt_priv)
2775 {
2776 	/* If template ops are set, no work to do for us. */
2777 	if (!tmplt_ops)
2778 		return;
2779 
2780 	tmplt_ops->tmplt_destroy(tmplt_priv);
2781 	module_put(tmplt_ops->owner);
2782 }
2783 
2784 /* Add/delete/get a chain */
2785 
2786 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2787 			struct netlink_ext_ack *extack)
2788 {
2789 	struct net *net = sock_net(skb->sk);
2790 	struct nlattr *tca[TCA_MAX + 1];
2791 	struct tcmsg *t;
2792 	u32 parent;
2793 	u32 chain_index;
2794 	struct Qdisc *q = NULL;
2795 	struct tcf_chain *chain = NULL;
2796 	struct tcf_block *block;
2797 	unsigned long cl;
2798 	int err;
2799 
2800 	if (n->nlmsg_type != RTM_GETCHAIN &&
2801 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2802 		return -EPERM;
2803 
2804 replay:
2805 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2806 				     rtm_tca_policy, extack);
2807 	if (err < 0)
2808 		return err;
2809 
2810 	t = nlmsg_data(n);
2811 	parent = t->tcm_parent;
2812 	cl = 0;
2813 
2814 	block = tcf_block_find(net, &q, &parent, &cl,
2815 			       t->tcm_ifindex, t->tcm_block_index, extack);
2816 	if (IS_ERR(block))
2817 		return PTR_ERR(block);
2818 
2819 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2820 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2821 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2822 		err = -EINVAL;
2823 		goto errout_block;
2824 	}
2825 
2826 	mutex_lock(&block->lock);
2827 	chain = tcf_chain_lookup(block, chain_index);
2828 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2829 		if (chain) {
2830 			if (tcf_chain_held_by_acts_only(chain)) {
2831 				/* The chain exists only because there is
2832 				 * some action referencing it.
2833 				 */
2834 				tcf_chain_hold(chain);
2835 			} else {
2836 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2837 				err = -EEXIST;
2838 				goto errout_block_locked;
2839 			}
2840 		} else {
2841 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2842 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2843 				err = -ENOENT;
2844 				goto errout_block_locked;
2845 			}
2846 			chain = tcf_chain_create(block, chain_index);
2847 			if (!chain) {
2848 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2849 				err = -ENOMEM;
2850 				goto errout_block_locked;
2851 			}
2852 		}
2853 	} else {
2854 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2855 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2856 			err = -EINVAL;
2857 			goto errout_block_locked;
2858 		}
2859 		tcf_chain_hold(chain);
2860 	}
2861 
2862 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2863 		/* Modifying chain requires holding parent block lock. In case
2864 		 * the chain was successfully added, take a reference to the
2865 		 * chain. This ensures that an empty chain does not disappear at
2866 		 * the end of this function.
2867 		 */
2868 		tcf_chain_hold(chain);
2869 		chain->explicitly_created = true;
2870 	}
2871 	mutex_unlock(&block->lock);
2872 
2873 	switch (n->nlmsg_type) {
2874 	case RTM_NEWCHAIN:
2875 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2876 		if (err) {
2877 			tcf_chain_put_explicitly_created(chain);
2878 			goto errout;
2879 		}
2880 
2881 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2882 				RTM_NEWCHAIN, false);
2883 		break;
2884 	case RTM_DELCHAIN:
2885 		tfilter_notify_chain(net, skb, block, q, parent, n,
2886 				     chain, RTM_DELTFILTER);
2887 		/* Flush the chain first as the user requested chain removal. */
2888 		tcf_chain_flush(chain, true);
2889 		/* In case the chain was successfully deleted, put a reference
2890 		 * to the chain previously taken during addition.
2891 		 */
2892 		tcf_chain_put_explicitly_created(chain);
2893 		break;
2894 	case RTM_GETCHAIN:
2895 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2896 				      n->nlmsg_flags, n->nlmsg_type, true);
2897 		if (err < 0)
2898 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2899 		break;
2900 	default:
2901 		err = -EOPNOTSUPP;
2902 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2903 		goto errout;
2904 	}
2905 
2906 errout:
2907 	tcf_chain_put(chain);
2908 errout_block:
2909 	tcf_block_release(q, block, true);
2910 	if (err == -EAGAIN)
2911 		/* Replay the request. */
2912 		goto replay;
2913 	return err;
2914 
2915 errout_block_locked:
2916 	mutex_unlock(&block->lock);
2917 	goto errout_block;
2918 }
2919 
2920 /* called with RTNL */
2921 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2922 {
2923 	struct net *net = sock_net(skb->sk);
2924 	struct nlattr *tca[TCA_MAX + 1];
2925 	struct Qdisc *q = NULL;
2926 	struct tcf_block *block;
2927 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2928 	struct tcf_chain *chain;
2929 	long index_start;
2930 	long index;
2931 	int err;
2932 
2933 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2934 		return skb->len;
2935 
2936 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2937 				     rtm_tca_policy, cb->extack);
2938 	if (err)
2939 		return err;
2940 
2941 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2942 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2943 		if (!block)
2944 			goto out;
2945 	} else {
2946 		const struct Qdisc_class_ops *cops;
2947 		struct net_device *dev;
2948 		unsigned long cl = 0;
2949 
2950 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2951 		if (!dev)
2952 			return skb->len;
2953 
2954 		if (!tcm->tcm_parent)
2955 			q = dev->qdisc;
2956 		else
2957 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2958 
2959 		if (!q)
2960 			goto out;
2961 		cops = q->ops->cl_ops;
2962 		if (!cops)
2963 			goto out;
2964 		if (!cops->tcf_block)
2965 			goto out;
2966 		if (TC_H_MIN(tcm->tcm_parent)) {
2967 			cl = cops->find(q, tcm->tcm_parent);
2968 			if (cl == 0)
2969 				goto out;
2970 		}
2971 		block = cops->tcf_block(q, cl, NULL);
2972 		if (!block)
2973 			goto out;
2974 		if (tcf_block_shared(block))
2975 			q = NULL;
2976 	}
2977 
2978 	index_start = cb->args[0];
2979 	index = 0;
2980 
2981 	mutex_lock(&block->lock);
2982 	list_for_each_entry(chain, &block->chain_list, list) {
2983 		if ((tca[TCA_CHAIN] &&
2984 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2985 			continue;
2986 		if (index < index_start) {
2987 			index++;
2988 			continue;
2989 		}
2990 		if (tcf_chain_held_by_acts_only(chain))
2991 			continue;
2992 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2993 					 chain->index, net, skb, block,
2994 					 NETLINK_CB(cb->skb).portid,
2995 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2996 					 RTM_NEWCHAIN);
2997 		if (err <= 0)
2998 			break;
2999 		index++;
3000 	}
3001 	mutex_unlock(&block->lock);
3002 
3003 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3004 		tcf_block_refcnt_put(block, true);
3005 	cb->args[0] = index;
3006 
3007 out:
3008 	/* If we did no progress, the error (EMSGSIZE) is real */
3009 	if (skb->len == 0 && err)
3010 		return err;
3011 	return skb->len;
3012 }
3013 
3014 void tcf_exts_destroy(struct tcf_exts *exts)
3015 {
3016 #ifdef CONFIG_NET_CLS_ACT
3017 	if (exts->actions) {
3018 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3019 		kfree(exts->actions);
3020 	}
3021 	exts->nr_actions = 0;
3022 #endif
3023 }
3024 EXPORT_SYMBOL(tcf_exts_destroy);
3025 
3026 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3027 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3028 		      u32 flags, struct netlink_ext_ack *extack)
3029 {
3030 #ifdef CONFIG_NET_CLS_ACT
3031 	{
3032 		int init_res[TCA_ACT_MAX_PRIO] = {};
3033 		struct tc_action *act;
3034 		size_t attr_size = 0;
3035 
3036 		if (exts->police && tb[exts->police]) {
3037 			struct tc_action_ops *a_o;
3038 
3039 			a_o = tc_action_load_ops(tb[exts->police], true,
3040 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3041 						 extack);
3042 			if (IS_ERR(a_o))
3043 				return PTR_ERR(a_o);
3044 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3045 			act = tcf_action_init_1(net, tp, tb[exts->police],
3046 						rate_tlv, a_o, init_res, flags,
3047 						extack);
3048 			module_put(a_o->owner);
3049 			if (IS_ERR(act))
3050 				return PTR_ERR(act);
3051 
3052 			act->type = exts->type = TCA_OLD_COMPAT;
3053 			exts->actions[0] = act;
3054 			exts->nr_actions = 1;
3055 			tcf_idr_insert_many(exts->actions);
3056 		} else if (exts->action && tb[exts->action]) {
3057 			int err;
3058 
3059 			flags |= TCA_ACT_FLAGS_BIND;
3060 			err = tcf_action_init(net, tp, tb[exts->action],
3061 					      rate_tlv, exts->actions, init_res,
3062 					      &attr_size, flags, extack);
3063 			if (err < 0)
3064 				return err;
3065 			exts->nr_actions = err;
3066 		}
3067 	}
3068 #else
3069 	if ((exts->action && tb[exts->action]) ||
3070 	    (exts->police && tb[exts->police])) {
3071 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3072 		return -EOPNOTSUPP;
3073 	}
3074 #endif
3075 
3076 	return 0;
3077 }
3078 EXPORT_SYMBOL(tcf_exts_validate);
3079 
3080 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3081 {
3082 #ifdef CONFIG_NET_CLS_ACT
3083 	struct tcf_exts old = *dst;
3084 
3085 	*dst = *src;
3086 	tcf_exts_destroy(&old);
3087 #endif
3088 }
3089 EXPORT_SYMBOL(tcf_exts_change);
3090 
3091 #ifdef CONFIG_NET_CLS_ACT
3092 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3093 {
3094 	if (exts->nr_actions == 0)
3095 		return NULL;
3096 	else
3097 		return exts->actions[0];
3098 }
3099 #endif
3100 
3101 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3102 {
3103 #ifdef CONFIG_NET_CLS_ACT
3104 	struct nlattr *nest;
3105 
3106 	if (exts->action && tcf_exts_has_actions(exts)) {
3107 		/*
3108 		 * again for backward compatible mode - we want
3109 		 * to work with both old and new modes of entering
3110 		 * tc data even if iproute2  was newer - jhs
3111 		 */
3112 		if (exts->type != TCA_OLD_COMPAT) {
3113 			nest = nla_nest_start_noflag(skb, exts->action);
3114 			if (nest == NULL)
3115 				goto nla_put_failure;
3116 
3117 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3118 			    < 0)
3119 				goto nla_put_failure;
3120 			nla_nest_end(skb, nest);
3121 		} else if (exts->police) {
3122 			struct tc_action *act = tcf_exts_first_act(exts);
3123 			nest = nla_nest_start_noflag(skb, exts->police);
3124 			if (nest == NULL || !act)
3125 				goto nla_put_failure;
3126 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3127 				goto nla_put_failure;
3128 			nla_nest_end(skb, nest);
3129 		}
3130 	}
3131 	return 0;
3132 
3133 nla_put_failure:
3134 	nla_nest_cancel(skb, nest);
3135 	return -1;
3136 #else
3137 	return 0;
3138 #endif
3139 }
3140 EXPORT_SYMBOL(tcf_exts_dump);
3141 
3142 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3143 {
3144 #ifdef CONFIG_NET_CLS_ACT
3145 	struct nlattr *nest;
3146 
3147 	if (!exts->action || !tcf_exts_has_actions(exts))
3148 		return 0;
3149 
3150 	nest = nla_nest_start_noflag(skb, exts->action);
3151 	if (!nest)
3152 		goto nla_put_failure;
3153 
3154 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3155 		goto nla_put_failure;
3156 	nla_nest_end(skb, nest);
3157 	return 0;
3158 
3159 nla_put_failure:
3160 	nla_nest_cancel(skb, nest);
3161 	return -1;
3162 #else
3163 	return 0;
3164 #endif
3165 }
3166 EXPORT_SYMBOL(tcf_exts_terse_dump);
3167 
3168 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3169 {
3170 #ifdef CONFIG_NET_CLS_ACT
3171 	struct tc_action *a = tcf_exts_first_act(exts);
3172 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3173 		return -1;
3174 #endif
3175 	return 0;
3176 }
3177 EXPORT_SYMBOL(tcf_exts_dump_stats);
3178 
3179 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3180 {
3181 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3182 		return;
3183 	*flags |= TCA_CLS_FLAGS_IN_HW;
3184 	atomic_inc(&block->offloadcnt);
3185 }
3186 
3187 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3188 {
3189 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3190 		return;
3191 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3192 	atomic_dec(&block->offloadcnt);
3193 }
3194 
3195 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3196 				      struct tcf_proto *tp, u32 *cnt,
3197 				      u32 *flags, u32 diff, bool add)
3198 {
3199 	lockdep_assert_held(&block->cb_lock);
3200 
3201 	spin_lock(&tp->lock);
3202 	if (add) {
3203 		if (!*cnt)
3204 			tcf_block_offload_inc(block, flags);
3205 		*cnt += diff;
3206 	} else {
3207 		*cnt -= diff;
3208 		if (!*cnt)
3209 			tcf_block_offload_dec(block, flags);
3210 	}
3211 	spin_unlock(&tp->lock);
3212 }
3213 
3214 static void
3215 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3216 			 u32 *cnt, u32 *flags)
3217 {
3218 	lockdep_assert_held(&block->cb_lock);
3219 
3220 	spin_lock(&tp->lock);
3221 	tcf_block_offload_dec(block, flags);
3222 	*cnt = 0;
3223 	spin_unlock(&tp->lock);
3224 }
3225 
3226 static int
3227 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3228 		   void *type_data, bool err_stop)
3229 {
3230 	struct flow_block_cb *block_cb;
3231 	int ok_count = 0;
3232 	int err;
3233 
3234 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3235 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3236 		if (err) {
3237 			if (err_stop)
3238 				return err;
3239 		} else {
3240 			ok_count++;
3241 		}
3242 	}
3243 	return ok_count;
3244 }
3245 
3246 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3247 		     void *type_data, bool err_stop, bool rtnl_held)
3248 {
3249 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3250 	int ok_count;
3251 
3252 retry:
3253 	if (take_rtnl)
3254 		rtnl_lock();
3255 	down_read(&block->cb_lock);
3256 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3257 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3258 	 * obtain the locks in same order here.
3259 	 */
3260 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3261 		up_read(&block->cb_lock);
3262 		take_rtnl = true;
3263 		goto retry;
3264 	}
3265 
3266 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3267 
3268 	up_read(&block->cb_lock);
3269 	if (take_rtnl)
3270 		rtnl_unlock();
3271 	return ok_count;
3272 }
3273 EXPORT_SYMBOL(tc_setup_cb_call);
3274 
3275 /* Non-destructive filter add. If filter that wasn't already in hardware is
3276  * successfully offloaded, increment block offloads counter. On failure,
3277  * previously offloaded filter is considered to be intact and offloads counter
3278  * is not decremented.
3279  */
3280 
3281 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3282 		    enum tc_setup_type type, void *type_data, bool err_stop,
3283 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3284 {
3285 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3286 	int ok_count;
3287 
3288 retry:
3289 	if (take_rtnl)
3290 		rtnl_lock();
3291 	down_read(&block->cb_lock);
3292 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3293 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3294 	 * obtain the locks in same order here.
3295 	 */
3296 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3297 		up_read(&block->cb_lock);
3298 		take_rtnl = true;
3299 		goto retry;
3300 	}
3301 
3302 	/* Make sure all netdevs sharing this block are offload-capable. */
3303 	if (block->nooffloaddevcnt && err_stop) {
3304 		ok_count = -EOPNOTSUPP;
3305 		goto err_unlock;
3306 	}
3307 
3308 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3309 	if (ok_count < 0)
3310 		goto err_unlock;
3311 
3312 	if (tp->ops->hw_add)
3313 		tp->ops->hw_add(tp, type_data);
3314 	if (ok_count > 0)
3315 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3316 					  ok_count, true);
3317 err_unlock:
3318 	up_read(&block->cb_lock);
3319 	if (take_rtnl)
3320 		rtnl_unlock();
3321 	return ok_count < 0 ? ok_count : 0;
3322 }
3323 EXPORT_SYMBOL(tc_setup_cb_add);
3324 
3325 /* Destructive filter replace. If filter that wasn't already in hardware is
3326  * successfully offloaded, increment block offload counter. On failure,
3327  * previously offloaded filter is considered to be destroyed and offload counter
3328  * is decremented.
3329  */
3330 
3331 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3332 			enum tc_setup_type type, void *type_data, bool err_stop,
3333 			u32 *old_flags, unsigned int *old_in_hw_count,
3334 			u32 *new_flags, unsigned int *new_in_hw_count,
3335 			bool rtnl_held)
3336 {
3337 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3338 	int ok_count;
3339 
3340 retry:
3341 	if (take_rtnl)
3342 		rtnl_lock();
3343 	down_read(&block->cb_lock);
3344 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3345 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3346 	 * obtain the locks in same order here.
3347 	 */
3348 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3349 		up_read(&block->cb_lock);
3350 		take_rtnl = true;
3351 		goto retry;
3352 	}
3353 
3354 	/* Make sure all netdevs sharing this block are offload-capable. */
3355 	if (block->nooffloaddevcnt && err_stop) {
3356 		ok_count = -EOPNOTSUPP;
3357 		goto err_unlock;
3358 	}
3359 
3360 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3361 	if (tp->ops->hw_del)
3362 		tp->ops->hw_del(tp, type_data);
3363 
3364 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3365 	if (ok_count < 0)
3366 		goto err_unlock;
3367 
3368 	if (tp->ops->hw_add)
3369 		tp->ops->hw_add(tp, type_data);
3370 	if (ok_count > 0)
3371 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3372 					  new_flags, ok_count, true);
3373 err_unlock:
3374 	up_read(&block->cb_lock);
3375 	if (take_rtnl)
3376 		rtnl_unlock();
3377 	return ok_count < 0 ? ok_count : 0;
3378 }
3379 EXPORT_SYMBOL(tc_setup_cb_replace);
3380 
3381 /* Destroy filter and decrement block offload counter, if filter was previously
3382  * offloaded.
3383  */
3384 
3385 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3386 			enum tc_setup_type type, void *type_data, bool err_stop,
3387 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3388 {
3389 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3390 	int ok_count;
3391 
3392 retry:
3393 	if (take_rtnl)
3394 		rtnl_lock();
3395 	down_read(&block->cb_lock);
3396 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3397 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3398 	 * obtain the locks in same order here.
3399 	 */
3400 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3401 		up_read(&block->cb_lock);
3402 		take_rtnl = true;
3403 		goto retry;
3404 	}
3405 
3406 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3407 
3408 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3409 	if (tp->ops->hw_del)
3410 		tp->ops->hw_del(tp, type_data);
3411 
3412 	up_read(&block->cb_lock);
3413 	if (take_rtnl)
3414 		rtnl_unlock();
3415 	return ok_count < 0 ? ok_count : 0;
3416 }
3417 EXPORT_SYMBOL(tc_setup_cb_destroy);
3418 
3419 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3420 			  bool add, flow_setup_cb_t *cb,
3421 			  enum tc_setup_type type, void *type_data,
3422 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3423 {
3424 	int err = cb(type, type_data, cb_priv);
3425 
3426 	if (err) {
3427 		if (add && tc_skip_sw(*flags))
3428 			return err;
3429 	} else {
3430 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3431 					  add);
3432 	}
3433 
3434 	return 0;
3435 }
3436 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3437 
3438 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3439 			      const struct tc_action *act)
3440 {
3441 	struct tc_cookie *cookie;
3442 	int err = 0;
3443 
3444 	rcu_read_lock();
3445 	cookie = rcu_dereference(act->act_cookie);
3446 	if (cookie) {
3447 		entry->cookie = flow_action_cookie_create(cookie->data,
3448 							  cookie->len,
3449 							  GFP_ATOMIC);
3450 		if (!entry->cookie)
3451 			err = -ENOMEM;
3452 	}
3453 	rcu_read_unlock();
3454 	return err;
3455 }
3456 
3457 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3458 {
3459 	flow_action_cookie_destroy(entry->cookie);
3460 }
3461 
3462 void tc_cleanup_flow_action(struct flow_action *flow_action)
3463 {
3464 	struct flow_action_entry *entry;
3465 	int i;
3466 
3467 	flow_action_for_each(i, entry, flow_action) {
3468 		tcf_act_put_cookie(entry);
3469 		if (entry->destructor)
3470 			entry->destructor(entry->destructor_priv);
3471 	}
3472 }
3473 EXPORT_SYMBOL(tc_cleanup_flow_action);
3474 
3475 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3476 			       const struct tc_action *act)
3477 {
3478 #ifdef CONFIG_NET_CLS_ACT
3479 	entry->dev = act->ops->get_dev(act, &entry->destructor);
3480 	if (!entry->dev)
3481 		return;
3482 	entry->destructor_priv = entry->dev;
3483 #endif
3484 }
3485 
3486 static void tcf_tunnel_encap_put_tunnel(void *priv)
3487 {
3488 	struct ip_tunnel_info *tunnel = priv;
3489 
3490 	kfree(tunnel);
3491 }
3492 
3493 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3494 				       const struct tc_action *act)
3495 {
3496 	entry->tunnel = tcf_tunnel_info_copy(act);
3497 	if (!entry->tunnel)
3498 		return -ENOMEM;
3499 	entry->destructor = tcf_tunnel_encap_put_tunnel;
3500 	entry->destructor_priv = entry->tunnel;
3501 	return 0;
3502 }
3503 
3504 static void tcf_sample_get_group(struct flow_action_entry *entry,
3505 				 const struct tc_action *act)
3506 {
3507 #ifdef CONFIG_NET_CLS_ACT
3508 	entry->sample.psample_group =
3509 		act->ops->get_psample_group(act, &entry->destructor);
3510 	entry->destructor_priv = entry->sample.psample_group;
3511 #endif
3512 }
3513 
3514 static void tcf_gate_entry_destructor(void *priv)
3515 {
3516 	struct action_gate_entry *oe = priv;
3517 
3518 	kfree(oe);
3519 }
3520 
3521 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3522 				const struct tc_action *act)
3523 {
3524 	entry->gate.entries = tcf_gate_get_list(act);
3525 
3526 	if (!entry->gate.entries)
3527 		return -EINVAL;
3528 
3529 	entry->destructor = tcf_gate_entry_destructor;
3530 	entry->destructor_priv = entry->gate.entries;
3531 
3532 	return 0;
3533 }
3534 
3535 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3536 {
3537 	if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3538 		return FLOW_ACTION_HW_STATS_DONT_CARE;
3539 	else if (!hw_stats)
3540 		return FLOW_ACTION_HW_STATS_DISABLED;
3541 
3542 	return hw_stats;
3543 }
3544 
3545 int tc_setup_flow_action(struct flow_action *flow_action,
3546 			 const struct tcf_exts *exts)
3547 {
3548 	struct tc_action *act;
3549 	int i, j, k, err = 0;
3550 
3551 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3552 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3553 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3554 
3555 	if (!exts)
3556 		return 0;
3557 
3558 	j = 0;
3559 	tcf_exts_for_each_action(i, act, exts) {
3560 		struct flow_action_entry *entry;
3561 
3562 		entry = &flow_action->entries[j];
3563 		spin_lock_bh(&act->tcfa_lock);
3564 		err = tcf_act_get_cookie(entry, act);
3565 		if (err)
3566 			goto err_out_locked;
3567 
3568 		entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3569 
3570 		if (is_tcf_gact_ok(act)) {
3571 			entry->id = FLOW_ACTION_ACCEPT;
3572 		} else if (is_tcf_gact_shot(act)) {
3573 			entry->id = FLOW_ACTION_DROP;
3574 		} else if (is_tcf_gact_trap(act)) {
3575 			entry->id = FLOW_ACTION_TRAP;
3576 		} else if (is_tcf_gact_goto_chain(act)) {
3577 			entry->id = FLOW_ACTION_GOTO;
3578 			entry->chain_index = tcf_gact_goto_chain_index(act);
3579 		} else if (is_tcf_mirred_egress_redirect(act)) {
3580 			entry->id = FLOW_ACTION_REDIRECT;
3581 			tcf_mirred_get_dev(entry, act);
3582 		} else if (is_tcf_mirred_egress_mirror(act)) {
3583 			entry->id = FLOW_ACTION_MIRRED;
3584 			tcf_mirred_get_dev(entry, act);
3585 		} else if (is_tcf_mirred_ingress_redirect(act)) {
3586 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3587 			tcf_mirred_get_dev(entry, act);
3588 		} else if (is_tcf_mirred_ingress_mirror(act)) {
3589 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
3590 			tcf_mirred_get_dev(entry, act);
3591 		} else if (is_tcf_vlan(act)) {
3592 			switch (tcf_vlan_action(act)) {
3593 			case TCA_VLAN_ACT_PUSH:
3594 				entry->id = FLOW_ACTION_VLAN_PUSH;
3595 				entry->vlan.vid = tcf_vlan_push_vid(act);
3596 				entry->vlan.proto = tcf_vlan_push_proto(act);
3597 				entry->vlan.prio = tcf_vlan_push_prio(act);
3598 				break;
3599 			case TCA_VLAN_ACT_POP:
3600 				entry->id = FLOW_ACTION_VLAN_POP;
3601 				break;
3602 			case TCA_VLAN_ACT_MODIFY:
3603 				entry->id = FLOW_ACTION_VLAN_MANGLE;
3604 				entry->vlan.vid = tcf_vlan_push_vid(act);
3605 				entry->vlan.proto = tcf_vlan_push_proto(act);
3606 				entry->vlan.prio = tcf_vlan_push_prio(act);
3607 				break;
3608 			default:
3609 				err = -EOPNOTSUPP;
3610 				goto err_out_locked;
3611 			}
3612 		} else if (is_tcf_tunnel_set(act)) {
3613 			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3614 			err = tcf_tunnel_encap_get_tunnel(entry, act);
3615 			if (err)
3616 				goto err_out_locked;
3617 		} else if (is_tcf_tunnel_release(act)) {
3618 			entry->id = FLOW_ACTION_TUNNEL_DECAP;
3619 		} else if (is_tcf_pedit(act)) {
3620 			for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3621 				switch (tcf_pedit_cmd(act, k)) {
3622 				case TCA_PEDIT_KEY_EX_CMD_SET:
3623 					entry->id = FLOW_ACTION_MANGLE;
3624 					break;
3625 				case TCA_PEDIT_KEY_EX_CMD_ADD:
3626 					entry->id = FLOW_ACTION_ADD;
3627 					break;
3628 				default:
3629 					err = -EOPNOTSUPP;
3630 					goto err_out_locked;
3631 				}
3632 				entry->mangle.htype = tcf_pedit_htype(act, k);
3633 				entry->mangle.mask = tcf_pedit_mask(act, k);
3634 				entry->mangle.val = tcf_pedit_val(act, k);
3635 				entry->mangle.offset = tcf_pedit_offset(act, k);
3636 				entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3637 				entry = &flow_action->entries[++j];
3638 			}
3639 		} else if (is_tcf_csum(act)) {
3640 			entry->id = FLOW_ACTION_CSUM;
3641 			entry->csum_flags = tcf_csum_update_flags(act);
3642 		} else if (is_tcf_skbedit_mark(act)) {
3643 			entry->id = FLOW_ACTION_MARK;
3644 			entry->mark = tcf_skbedit_mark(act);
3645 		} else if (is_tcf_sample(act)) {
3646 			entry->id = FLOW_ACTION_SAMPLE;
3647 			entry->sample.trunc_size = tcf_sample_trunc_size(act);
3648 			entry->sample.truncate = tcf_sample_truncate(act);
3649 			entry->sample.rate = tcf_sample_rate(act);
3650 			tcf_sample_get_group(entry, act);
3651 		} else if (is_tcf_police(act)) {
3652 			entry->id = FLOW_ACTION_POLICE;
3653 			entry->police.burst = tcf_police_burst(act);
3654 			entry->police.rate_bytes_ps =
3655 				tcf_police_rate_bytes_ps(act);
3656 			entry->police.burst_pkt = tcf_police_burst_pkt(act);
3657 			entry->police.rate_pkt_ps =
3658 				tcf_police_rate_pkt_ps(act);
3659 			entry->police.mtu = tcf_police_tcfp_mtu(act);
3660 			entry->police.index = act->tcfa_index;
3661 		} else if (is_tcf_ct(act)) {
3662 			entry->id = FLOW_ACTION_CT;
3663 			entry->ct.action = tcf_ct_action(act);
3664 			entry->ct.zone = tcf_ct_zone(act);
3665 			entry->ct.flow_table = tcf_ct_ft(act);
3666 		} else if (is_tcf_mpls(act)) {
3667 			switch (tcf_mpls_action(act)) {
3668 			case TCA_MPLS_ACT_PUSH:
3669 				entry->id = FLOW_ACTION_MPLS_PUSH;
3670 				entry->mpls_push.proto = tcf_mpls_proto(act);
3671 				entry->mpls_push.label = tcf_mpls_label(act);
3672 				entry->mpls_push.tc = tcf_mpls_tc(act);
3673 				entry->mpls_push.bos = tcf_mpls_bos(act);
3674 				entry->mpls_push.ttl = tcf_mpls_ttl(act);
3675 				break;
3676 			case TCA_MPLS_ACT_POP:
3677 				entry->id = FLOW_ACTION_MPLS_POP;
3678 				entry->mpls_pop.proto = tcf_mpls_proto(act);
3679 				break;
3680 			case TCA_MPLS_ACT_MODIFY:
3681 				entry->id = FLOW_ACTION_MPLS_MANGLE;
3682 				entry->mpls_mangle.label = tcf_mpls_label(act);
3683 				entry->mpls_mangle.tc = tcf_mpls_tc(act);
3684 				entry->mpls_mangle.bos = tcf_mpls_bos(act);
3685 				entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3686 				break;
3687 			default:
3688 				goto err_out_locked;
3689 			}
3690 		} else if (is_tcf_skbedit_ptype(act)) {
3691 			entry->id = FLOW_ACTION_PTYPE;
3692 			entry->ptype = tcf_skbedit_ptype(act);
3693 		} else if (is_tcf_skbedit_priority(act)) {
3694 			entry->id = FLOW_ACTION_PRIORITY;
3695 			entry->priority = tcf_skbedit_priority(act);
3696 		} else if (is_tcf_gate(act)) {
3697 			entry->id = FLOW_ACTION_GATE;
3698 			entry->gate.index = tcf_gate_index(act);
3699 			entry->gate.prio = tcf_gate_prio(act);
3700 			entry->gate.basetime = tcf_gate_basetime(act);
3701 			entry->gate.cycletime = tcf_gate_cycletime(act);
3702 			entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3703 			entry->gate.num_entries = tcf_gate_num_entries(act);
3704 			err = tcf_gate_get_entries(entry, act);
3705 			if (err)
3706 				goto err_out_locked;
3707 		} else {
3708 			err = -EOPNOTSUPP;
3709 			goto err_out_locked;
3710 		}
3711 		spin_unlock_bh(&act->tcfa_lock);
3712 
3713 		if (!is_tcf_pedit(act))
3714 			j++;
3715 	}
3716 
3717 err_out:
3718 	if (err)
3719 		tc_cleanup_flow_action(flow_action);
3720 
3721 	return err;
3722 err_out_locked:
3723 	spin_unlock_bh(&act->tcfa_lock);
3724 	goto err_out;
3725 }
3726 EXPORT_SYMBOL(tc_setup_flow_action);
3727 
3728 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3729 {
3730 	unsigned int num_acts = 0;
3731 	struct tc_action *act;
3732 	int i;
3733 
3734 	tcf_exts_for_each_action(i, act, exts) {
3735 		if (is_tcf_pedit(act))
3736 			num_acts += tcf_pedit_nkeys(act);
3737 		else
3738 			num_acts++;
3739 	}
3740 	return num_acts;
3741 }
3742 EXPORT_SYMBOL(tcf_exts_num_actions);
3743 
3744 #ifdef CONFIG_NET_CLS_ACT
3745 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3746 					u32 *p_block_index,
3747 					struct netlink_ext_ack *extack)
3748 {
3749 	*p_block_index = nla_get_u32(block_index_attr);
3750 	if (!*p_block_index) {
3751 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3752 		return -EINVAL;
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3759 		    enum flow_block_binder_type binder_type,
3760 		    struct nlattr *block_index_attr,
3761 		    struct netlink_ext_ack *extack)
3762 {
3763 	u32 block_index;
3764 	int err;
3765 
3766 	if (!block_index_attr)
3767 		return 0;
3768 
3769 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3770 	if (err)
3771 		return err;
3772 
3773 	if (!block_index)
3774 		return 0;
3775 
3776 	qe->info.binder_type = binder_type;
3777 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3778 	qe->info.chain_head_change_priv = &qe->filter_chain;
3779 	qe->info.block_index = block_index;
3780 
3781 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3782 }
3783 EXPORT_SYMBOL(tcf_qevent_init);
3784 
3785 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3786 {
3787 	if (qe->info.block_index)
3788 		tcf_block_put_ext(qe->block, sch, &qe->info);
3789 }
3790 EXPORT_SYMBOL(tcf_qevent_destroy);
3791 
3792 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3793 			       struct netlink_ext_ack *extack)
3794 {
3795 	u32 block_index;
3796 	int err;
3797 
3798 	if (!block_index_attr)
3799 		return 0;
3800 
3801 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3802 	if (err)
3803 		return err;
3804 
3805 	/* Bounce newly-configured block or change in block. */
3806 	if (block_index != qe->info.block_index) {
3807 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3808 		return -EINVAL;
3809 	}
3810 
3811 	return 0;
3812 }
3813 EXPORT_SYMBOL(tcf_qevent_validate_change);
3814 
3815 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3816 				  struct sk_buff **to_free, int *ret)
3817 {
3818 	struct tcf_result cl_res;
3819 	struct tcf_proto *fl;
3820 
3821 	if (!qe->info.block_index)
3822 		return skb;
3823 
3824 	fl = rcu_dereference_bh(qe->filter_chain);
3825 
3826 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3827 	case TC_ACT_SHOT:
3828 		qdisc_qstats_drop(sch);
3829 		__qdisc_drop(skb, to_free);
3830 		*ret = __NET_XMIT_BYPASS;
3831 		return NULL;
3832 	case TC_ACT_STOLEN:
3833 	case TC_ACT_QUEUED:
3834 	case TC_ACT_TRAP:
3835 		__qdisc_drop(skb, to_free);
3836 		*ret = __NET_XMIT_STOLEN;
3837 		return NULL;
3838 	case TC_ACT_REDIRECT:
3839 		skb_do_redirect(skb);
3840 		*ret = __NET_XMIT_STOLEN;
3841 		return NULL;
3842 	}
3843 
3844 	return skb;
3845 }
3846 EXPORT_SYMBOL(tcf_qevent_handle);
3847 
3848 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3849 {
3850 	if (!qe->info.block_index)
3851 		return 0;
3852 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3853 }
3854 EXPORT_SYMBOL(tcf_qevent_dump);
3855 #endif
3856 
3857 static __net_init int tcf_net_init(struct net *net)
3858 {
3859 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3860 
3861 	spin_lock_init(&tn->idr_lock);
3862 	idr_init(&tn->idr);
3863 	return 0;
3864 }
3865 
3866 static void __net_exit tcf_net_exit(struct net *net)
3867 {
3868 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3869 
3870 	idr_destroy(&tn->idr);
3871 }
3872 
3873 static struct pernet_operations tcf_net_ops = {
3874 	.init = tcf_net_init,
3875 	.exit = tcf_net_exit,
3876 	.id   = &tcf_net_id,
3877 	.size = sizeof(struct tcf_net),
3878 };
3879 
3880 static int __init tc_filter_init(void)
3881 {
3882 	int err;
3883 
3884 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3885 	if (!tc_filter_wq)
3886 		return -ENOMEM;
3887 
3888 	err = register_pernet_subsys(&tcf_net_ops);
3889 	if (err)
3890 		goto err_register_pernet_subsys;
3891 
3892 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3893 		      RTNL_FLAG_DOIT_UNLOCKED);
3894 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3895 		      RTNL_FLAG_DOIT_UNLOCKED);
3896 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3897 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3898 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3899 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3900 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3901 		      tc_dump_chain, 0);
3902 
3903 	return 0;
3904 
3905 err_register_pernet_subsys:
3906 	destroy_workqueue(tc_filter_wq);
3907 	return err;
3908 }
3909 
3910 subsys_initcall(tc_filter_init);
3911