xref: /openbmc/linux/net/sched/cls_api.c (revision c29b9772)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45 
46 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
47 
48 /* The list of all installed classifier types */
49 static LIST_HEAD(tcf_proto_base);
50 
51 /* Protects list of registered TC modules. It is pure SMP lock. */
52 static DEFINE_RWLOCK(cls_mod_lock);
53 
54 static struct xarray tcf_exts_miss_cookies_xa;
55 struct tcf_exts_miss_cookie_node {
56 	const struct tcf_chain *chain;
57 	const struct tcf_proto *tp;
58 	const struct tcf_exts *exts;
59 	u32 chain_index;
60 	u32 tp_prio;
61 	u32 handle;
62 	u32 miss_cookie_base;
63 	struct rcu_head rcu;
64 };
65 
66 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
67  * action index in the exts tc actions array.
68  */
69 union tcf_exts_miss_cookie {
70 	struct {
71 		u32 miss_cookie_base;
72 		u32 act_index;
73 	};
74 	u64 miss_cookie;
75 };
76 
77 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
78 static int
79 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
80 				u32 handle)
81 {
82 	struct tcf_exts_miss_cookie_node *n;
83 	static u32 next;
84 	int err;
85 
86 	if (WARN_ON(!handle || !tp->ops->get_exts))
87 		return -EINVAL;
88 
89 	n = kzalloc(sizeof(*n), GFP_KERNEL);
90 	if (!n)
91 		return -ENOMEM;
92 
93 	n->chain_index = tp->chain->index;
94 	n->chain = tp->chain;
95 	n->tp_prio = tp->prio;
96 	n->tp = tp;
97 	n->exts = exts;
98 	n->handle = handle;
99 
100 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
101 			      n, xa_limit_32b, &next, GFP_KERNEL);
102 	if (err)
103 		goto err_xa_alloc;
104 
105 	exts->miss_cookie_node = n;
106 	return 0;
107 
108 err_xa_alloc:
109 	kfree(n);
110 	return err;
111 }
112 
113 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
114 {
115 	struct tcf_exts_miss_cookie_node *n;
116 
117 	if (!exts->miss_cookie_node)
118 		return;
119 
120 	n = exts->miss_cookie_node;
121 	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
122 	kfree_rcu(n, rcu);
123 }
124 
125 static struct tcf_exts_miss_cookie_node *
126 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
127 {
128 	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
129 
130 	*act_index = mc.act_index;
131 	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
132 }
133 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
134 static int
135 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
136 				u32 handle)
137 {
138 	return 0;
139 }
140 
141 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
142 {
143 }
144 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
145 
146 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
147 {
148 	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
149 
150 	if (!miss_cookie_base)
151 		return 0;
152 
153 	mc.miss_cookie_base = miss_cookie_base;
154 	return mc.miss_cookie;
155 }
156 
157 #ifdef CONFIG_NET_CLS_ACT
158 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
159 EXPORT_SYMBOL(tc_skb_ext_tc);
160 
161 void tc_skb_ext_tc_enable(void)
162 {
163 	static_branch_inc(&tc_skb_ext_tc);
164 }
165 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
166 
167 void tc_skb_ext_tc_disable(void)
168 {
169 	static_branch_dec(&tc_skb_ext_tc);
170 }
171 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
172 #endif
173 
174 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
175 {
176 	return jhash_3words(tp->chain->index, tp->prio,
177 			    (__force __u32)tp->protocol, 0);
178 }
179 
180 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
181 					struct tcf_proto *tp)
182 {
183 	struct tcf_block *block = chain->block;
184 
185 	mutex_lock(&block->proto_destroy_lock);
186 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
187 		     destroy_obj_hashfn(tp));
188 	mutex_unlock(&block->proto_destroy_lock);
189 }
190 
191 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
192 			  const struct tcf_proto *tp2)
193 {
194 	return tp1->chain->index == tp2->chain->index &&
195 	       tp1->prio == tp2->prio &&
196 	       tp1->protocol == tp2->protocol;
197 }
198 
199 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
200 					struct tcf_proto *tp)
201 {
202 	u32 hash = destroy_obj_hashfn(tp);
203 	struct tcf_proto *iter;
204 	bool found = false;
205 
206 	rcu_read_lock();
207 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
208 				   destroy_ht_node, hash) {
209 		if (tcf_proto_cmp(tp, iter)) {
210 			found = true;
211 			break;
212 		}
213 	}
214 	rcu_read_unlock();
215 
216 	return found;
217 }
218 
219 static void
220 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
221 {
222 	struct tcf_block *block = chain->block;
223 
224 	mutex_lock(&block->proto_destroy_lock);
225 	if (hash_hashed(&tp->destroy_ht_node))
226 		hash_del_rcu(&tp->destroy_ht_node);
227 	mutex_unlock(&block->proto_destroy_lock);
228 }
229 
230 /* Find classifier type by string name */
231 
232 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
233 {
234 	const struct tcf_proto_ops *t, *res = NULL;
235 
236 	if (kind) {
237 		read_lock(&cls_mod_lock);
238 		list_for_each_entry(t, &tcf_proto_base, head) {
239 			if (strcmp(kind, t->kind) == 0) {
240 				if (try_module_get(t->owner))
241 					res = t;
242 				break;
243 			}
244 		}
245 		read_unlock(&cls_mod_lock);
246 	}
247 	return res;
248 }
249 
250 static const struct tcf_proto_ops *
251 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
252 		     struct netlink_ext_ack *extack)
253 {
254 	const struct tcf_proto_ops *ops;
255 
256 	ops = __tcf_proto_lookup_ops(kind);
257 	if (ops)
258 		return ops;
259 #ifdef CONFIG_MODULES
260 	if (rtnl_held)
261 		rtnl_unlock();
262 	request_module("cls_%s", kind);
263 	if (rtnl_held)
264 		rtnl_lock();
265 	ops = __tcf_proto_lookup_ops(kind);
266 	/* We dropped the RTNL semaphore in order to perform
267 	 * the module load. So, even if we succeeded in loading
268 	 * the module we have to replay the request. We indicate
269 	 * this using -EAGAIN.
270 	 */
271 	if (ops) {
272 		module_put(ops->owner);
273 		return ERR_PTR(-EAGAIN);
274 	}
275 #endif
276 	NL_SET_ERR_MSG(extack, "TC classifier not found");
277 	return ERR_PTR(-ENOENT);
278 }
279 
280 /* Register(unregister) new classifier type */
281 
282 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
283 {
284 	struct tcf_proto_ops *t;
285 	int rc = -EEXIST;
286 
287 	write_lock(&cls_mod_lock);
288 	list_for_each_entry(t, &tcf_proto_base, head)
289 		if (!strcmp(ops->kind, t->kind))
290 			goto out;
291 
292 	list_add_tail(&ops->head, &tcf_proto_base);
293 	rc = 0;
294 out:
295 	write_unlock(&cls_mod_lock);
296 	return rc;
297 }
298 EXPORT_SYMBOL(register_tcf_proto_ops);
299 
300 static struct workqueue_struct *tc_filter_wq;
301 
302 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
303 {
304 	struct tcf_proto_ops *t;
305 	int rc = -ENOENT;
306 
307 	/* Wait for outstanding call_rcu()s, if any, from a
308 	 * tcf_proto_ops's destroy() handler.
309 	 */
310 	rcu_barrier();
311 	flush_workqueue(tc_filter_wq);
312 
313 	write_lock(&cls_mod_lock);
314 	list_for_each_entry(t, &tcf_proto_base, head) {
315 		if (t == ops) {
316 			list_del(&t->head);
317 			rc = 0;
318 			break;
319 		}
320 	}
321 	write_unlock(&cls_mod_lock);
322 
323 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
324 }
325 EXPORT_SYMBOL(unregister_tcf_proto_ops);
326 
327 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
328 {
329 	INIT_RCU_WORK(rwork, func);
330 	return queue_rcu_work(tc_filter_wq, rwork);
331 }
332 EXPORT_SYMBOL(tcf_queue_work);
333 
334 /* Select new prio value from the range, managed by kernel. */
335 
336 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
337 {
338 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
339 
340 	if (tp)
341 		first = tp->prio - 1;
342 
343 	return TC_H_MAJ(first);
344 }
345 
346 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
347 {
348 	if (kind)
349 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
350 	memset(name, 0, IFNAMSIZ);
351 	return false;
352 }
353 
354 static bool tcf_proto_is_unlocked(const char *kind)
355 {
356 	const struct tcf_proto_ops *ops;
357 	bool ret;
358 
359 	if (strlen(kind) == 0)
360 		return false;
361 
362 	ops = tcf_proto_lookup_ops(kind, false, NULL);
363 	/* On error return false to take rtnl lock. Proto lookup/create
364 	 * functions will perform lookup again and properly handle errors.
365 	 */
366 	if (IS_ERR(ops))
367 		return false;
368 
369 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
370 	module_put(ops->owner);
371 	return ret;
372 }
373 
374 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
375 					  u32 prio, struct tcf_chain *chain,
376 					  bool rtnl_held,
377 					  struct netlink_ext_ack *extack)
378 {
379 	struct tcf_proto *tp;
380 	int err;
381 
382 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
383 	if (!tp)
384 		return ERR_PTR(-ENOBUFS);
385 
386 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
387 	if (IS_ERR(tp->ops)) {
388 		err = PTR_ERR(tp->ops);
389 		goto errout;
390 	}
391 	tp->classify = tp->ops->classify;
392 	tp->protocol = protocol;
393 	tp->prio = prio;
394 	tp->chain = chain;
395 	spin_lock_init(&tp->lock);
396 	refcount_set(&tp->refcnt, 1);
397 
398 	err = tp->ops->init(tp);
399 	if (err) {
400 		module_put(tp->ops->owner);
401 		goto errout;
402 	}
403 	return tp;
404 
405 errout:
406 	kfree(tp);
407 	return ERR_PTR(err);
408 }
409 
410 static void tcf_proto_get(struct tcf_proto *tp)
411 {
412 	refcount_inc(&tp->refcnt);
413 }
414 
415 static void tcf_chain_put(struct tcf_chain *chain);
416 
417 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
418 			      bool sig_destroy, struct netlink_ext_ack *extack)
419 {
420 	tp->ops->destroy(tp, rtnl_held, extack);
421 	if (sig_destroy)
422 		tcf_proto_signal_destroyed(tp->chain, tp);
423 	tcf_chain_put(tp->chain);
424 	module_put(tp->ops->owner);
425 	kfree_rcu(tp, rcu);
426 }
427 
428 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
429 			  struct netlink_ext_ack *extack)
430 {
431 	if (refcount_dec_and_test(&tp->refcnt))
432 		tcf_proto_destroy(tp, rtnl_held, true, extack);
433 }
434 
435 static bool tcf_proto_check_delete(struct tcf_proto *tp)
436 {
437 	if (tp->ops->delete_empty)
438 		return tp->ops->delete_empty(tp);
439 
440 	tp->deleting = true;
441 	return tp->deleting;
442 }
443 
444 static void tcf_proto_mark_delete(struct tcf_proto *tp)
445 {
446 	spin_lock(&tp->lock);
447 	tp->deleting = true;
448 	spin_unlock(&tp->lock);
449 }
450 
451 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
452 {
453 	bool deleting;
454 
455 	spin_lock(&tp->lock);
456 	deleting = tp->deleting;
457 	spin_unlock(&tp->lock);
458 
459 	return deleting;
460 }
461 
462 #define ASSERT_BLOCK_LOCKED(block)					\
463 	lockdep_assert_held(&(block)->lock)
464 
465 struct tcf_filter_chain_list_item {
466 	struct list_head list;
467 	tcf_chain_head_change_t *chain_head_change;
468 	void *chain_head_change_priv;
469 };
470 
471 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
472 					  u32 chain_index)
473 {
474 	struct tcf_chain *chain;
475 
476 	ASSERT_BLOCK_LOCKED(block);
477 
478 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
479 	if (!chain)
480 		return NULL;
481 	list_add_tail_rcu(&chain->list, &block->chain_list);
482 	mutex_init(&chain->filter_chain_lock);
483 	chain->block = block;
484 	chain->index = chain_index;
485 	chain->refcnt = 1;
486 	if (!chain->index)
487 		block->chain0.chain = chain;
488 	return chain;
489 }
490 
491 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
492 				       struct tcf_proto *tp_head)
493 {
494 	if (item->chain_head_change)
495 		item->chain_head_change(tp_head, item->chain_head_change_priv);
496 }
497 
498 static void tcf_chain0_head_change(struct tcf_chain *chain,
499 				   struct tcf_proto *tp_head)
500 {
501 	struct tcf_filter_chain_list_item *item;
502 	struct tcf_block *block = chain->block;
503 
504 	if (chain->index)
505 		return;
506 
507 	mutex_lock(&block->lock);
508 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
509 		tcf_chain_head_change_item(item, tp_head);
510 	mutex_unlock(&block->lock);
511 }
512 
513 /* Returns true if block can be safely freed. */
514 
515 static bool tcf_chain_detach(struct tcf_chain *chain)
516 {
517 	struct tcf_block *block = chain->block;
518 
519 	ASSERT_BLOCK_LOCKED(block);
520 
521 	list_del_rcu(&chain->list);
522 	if (!chain->index)
523 		block->chain0.chain = NULL;
524 
525 	if (list_empty(&block->chain_list) &&
526 	    refcount_read(&block->refcnt) == 0)
527 		return true;
528 
529 	return false;
530 }
531 
532 static void tcf_block_destroy(struct tcf_block *block)
533 {
534 	mutex_destroy(&block->lock);
535 	mutex_destroy(&block->proto_destroy_lock);
536 	kfree_rcu(block, rcu);
537 }
538 
539 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
540 {
541 	struct tcf_block *block = chain->block;
542 
543 	mutex_destroy(&chain->filter_chain_lock);
544 	kfree_rcu(chain, rcu);
545 	if (free_block)
546 		tcf_block_destroy(block);
547 }
548 
549 static void tcf_chain_hold(struct tcf_chain *chain)
550 {
551 	ASSERT_BLOCK_LOCKED(chain->block);
552 
553 	++chain->refcnt;
554 }
555 
556 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
557 {
558 	ASSERT_BLOCK_LOCKED(chain->block);
559 
560 	/* In case all the references are action references, this
561 	 * chain should not be shown to the user.
562 	 */
563 	return chain->refcnt == chain->action_refcnt;
564 }
565 
566 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
567 					  u32 chain_index)
568 {
569 	struct tcf_chain *chain;
570 
571 	ASSERT_BLOCK_LOCKED(block);
572 
573 	list_for_each_entry(chain, &block->chain_list, list) {
574 		if (chain->index == chain_index)
575 			return chain;
576 	}
577 	return NULL;
578 }
579 
580 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
581 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
582 					      u32 chain_index)
583 {
584 	struct tcf_chain *chain;
585 
586 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
587 		if (chain->index == chain_index)
588 			return chain;
589 	}
590 	return NULL;
591 }
592 #endif
593 
594 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
595 			   u32 seq, u16 flags, int event, bool unicast,
596 			   struct netlink_ext_ack *extack);
597 
598 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
599 					 u32 chain_index, bool create,
600 					 bool by_act)
601 {
602 	struct tcf_chain *chain = NULL;
603 	bool is_first_reference;
604 
605 	mutex_lock(&block->lock);
606 	chain = tcf_chain_lookup(block, chain_index);
607 	if (chain) {
608 		tcf_chain_hold(chain);
609 	} else {
610 		if (!create)
611 			goto errout;
612 		chain = tcf_chain_create(block, chain_index);
613 		if (!chain)
614 			goto errout;
615 	}
616 
617 	if (by_act)
618 		++chain->action_refcnt;
619 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
620 	mutex_unlock(&block->lock);
621 
622 	/* Send notification only in case we got the first
623 	 * non-action reference. Until then, the chain acts only as
624 	 * a placeholder for actions pointing to it and user ought
625 	 * not know about them.
626 	 */
627 	if (is_first_reference && !by_act)
628 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
629 				RTM_NEWCHAIN, false, NULL);
630 
631 	return chain;
632 
633 errout:
634 	mutex_unlock(&block->lock);
635 	return chain;
636 }
637 
638 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
639 				       bool create)
640 {
641 	return __tcf_chain_get(block, chain_index, create, false);
642 }
643 
644 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
645 {
646 	return __tcf_chain_get(block, chain_index, true, true);
647 }
648 EXPORT_SYMBOL(tcf_chain_get_by_act);
649 
650 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
651 			       void *tmplt_priv);
652 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
653 				  void *tmplt_priv, u32 chain_index,
654 				  struct tcf_block *block, struct sk_buff *oskb,
655 				  u32 seq, u16 flags, bool unicast);
656 
657 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
658 			    bool explicitly_created)
659 {
660 	struct tcf_block *block = chain->block;
661 	const struct tcf_proto_ops *tmplt_ops;
662 	bool free_block = false;
663 	unsigned int refcnt;
664 	void *tmplt_priv;
665 
666 	mutex_lock(&block->lock);
667 	if (explicitly_created) {
668 		if (!chain->explicitly_created) {
669 			mutex_unlock(&block->lock);
670 			return;
671 		}
672 		chain->explicitly_created = false;
673 	}
674 
675 	if (by_act)
676 		chain->action_refcnt--;
677 
678 	/* tc_chain_notify_delete can't be called while holding block lock.
679 	 * However, when block is unlocked chain can be changed concurrently, so
680 	 * save these to temporary variables.
681 	 */
682 	refcnt = --chain->refcnt;
683 	tmplt_ops = chain->tmplt_ops;
684 	tmplt_priv = chain->tmplt_priv;
685 
686 	/* The last dropped non-action reference will trigger notification. */
687 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
688 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
689 				       block, NULL, 0, 0, false);
690 		/* Last reference to chain, no need to lock. */
691 		chain->flushing = false;
692 	}
693 
694 	if (refcnt == 0)
695 		free_block = tcf_chain_detach(chain);
696 	mutex_unlock(&block->lock);
697 
698 	if (refcnt == 0) {
699 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700 		tcf_chain_destroy(chain, free_block);
701 	}
702 }
703 
704 static void tcf_chain_put(struct tcf_chain *chain)
705 {
706 	__tcf_chain_put(chain, false, false);
707 }
708 
709 void tcf_chain_put_by_act(struct tcf_chain *chain)
710 {
711 	__tcf_chain_put(chain, true, false);
712 }
713 EXPORT_SYMBOL(tcf_chain_put_by_act);
714 
715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
716 {
717 	__tcf_chain_put(chain, false, true);
718 }
719 
720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
721 {
722 	struct tcf_proto *tp, *tp_next;
723 
724 	mutex_lock(&chain->filter_chain_lock);
725 	tp = tcf_chain_dereference(chain->filter_chain, chain);
726 	while (tp) {
727 		tp_next = rcu_dereference_protected(tp->next, 1);
728 		tcf_proto_signal_destroying(chain, tp);
729 		tp = tp_next;
730 	}
731 	tp = tcf_chain_dereference(chain->filter_chain, chain);
732 	RCU_INIT_POINTER(chain->filter_chain, NULL);
733 	tcf_chain0_head_change(chain, NULL);
734 	chain->flushing = true;
735 	mutex_unlock(&chain->filter_chain_lock);
736 
737 	while (tp) {
738 		tp_next = rcu_dereference_protected(tp->next, 1);
739 		tcf_proto_put(tp, rtnl_held, NULL);
740 		tp = tp_next;
741 	}
742 }
743 
744 static int tcf_block_setup(struct tcf_block *block,
745 			   struct flow_block_offload *bo);
746 
747 static void tcf_block_offload_init(struct flow_block_offload *bo,
748 				   struct net_device *dev, struct Qdisc *sch,
749 				   enum flow_block_command command,
750 				   enum flow_block_binder_type binder_type,
751 				   struct flow_block *flow_block,
752 				   bool shared, struct netlink_ext_ack *extack)
753 {
754 	bo->net = dev_net(dev);
755 	bo->command = command;
756 	bo->binder_type = binder_type;
757 	bo->block = flow_block;
758 	bo->block_shared = shared;
759 	bo->extack = extack;
760 	bo->sch = sch;
761 	bo->cb_list_head = &flow_block->cb_list;
762 	INIT_LIST_HEAD(&bo->cb_list);
763 }
764 
765 static void tcf_block_unbind(struct tcf_block *block,
766 			     struct flow_block_offload *bo);
767 
768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
769 {
770 	struct tcf_block *block = block_cb->indr.data;
771 	struct net_device *dev = block_cb->indr.dev;
772 	struct Qdisc *sch = block_cb->indr.sch;
773 	struct netlink_ext_ack extack = {};
774 	struct flow_block_offload bo = {};
775 
776 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777 			       block_cb->indr.binder_type,
778 			       &block->flow_block, tcf_block_shared(block),
779 			       &extack);
780 	rtnl_lock();
781 	down_write(&block->cb_lock);
782 	list_del(&block_cb->driver_list);
783 	list_move(&block_cb->list, &bo.cb_list);
784 	tcf_block_unbind(block, &bo);
785 	up_write(&block->cb_lock);
786 	rtnl_unlock();
787 }
788 
789 static bool tcf_block_offload_in_use(struct tcf_block *block)
790 {
791 	return atomic_read(&block->offloadcnt);
792 }
793 
794 static int tcf_block_offload_cmd(struct tcf_block *block,
795 				 struct net_device *dev, struct Qdisc *sch,
796 				 struct tcf_block_ext_info *ei,
797 				 enum flow_block_command command,
798 				 struct netlink_ext_ack *extack)
799 {
800 	struct flow_block_offload bo = {};
801 
802 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803 			       &block->flow_block, tcf_block_shared(block),
804 			       extack);
805 
806 	if (dev->netdev_ops->ndo_setup_tc) {
807 		int err;
808 
809 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
810 		if (err < 0) {
811 			if (err != -EOPNOTSUPP)
812 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
813 			return err;
814 		}
815 
816 		return tcf_block_setup(block, &bo);
817 	}
818 
819 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820 				    tc_block_indr_cleanup);
821 	tcf_block_setup(block, &bo);
822 
823 	return -EOPNOTSUPP;
824 }
825 
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827 				  struct tcf_block_ext_info *ei,
828 				  struct netlink_ext_ack *extack)
829 {
830 	struct net_device *dev = q->dev_queue->dev;
831 	int err;
832 
833 	down_write(&block->cb_lock);
834 
835 	/* If tc offload feature is disabled and the block we try to bind
836 	 * to already has some offloaded filters, forbid to bind.
837 	 */
838 	if (dev->netdev_ops->ndo_setup_tc &&
839 	    !tc_can_offload(dev) &&
840 	    tcf_block_offload_in_use(block)) {
841 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842 		err = -EOPNOTSUPP;
843 		goto err_unlock;
844 	}
845 
846 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847 	if (err == -EOPNOTSUPP)
848 		goto no_offload_dev_inc;
849 	if (err)
850 		goto err_unlock;
851 
852 	up_write(&block->cb_lock);
853 	return 0;
854 
855 no_offload_dev_inc:
856 	if (tcf_block_offload_in_use(block))
857 		goto err_unlock;
858 
859 	err = 0;
860 	block->nooffloaddevcnt++;
861 err_unlock:
862 	up_write(&block->cb_lock);
863 	return err;
864 }
865 
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867 				     struct tcf_block_ext_info *ei)
868 {
869 	struct net_device *dev = q->dev_queue->dev;
870 	int err;
871 
872 	down_write(&block->cb_lock);
873 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874 	if (err == -EOPNOTSUPP)
875 		goto no_offload_dev_dec;
876 	up_write(&block->cb_lock);
877 	return;
878 
879 no_offload_dev_dec:
880 	WARN_ON(block->nooffloaddevcnt-- == 0);
881 	up_write(&block->cb_lock);
882 }
883 
884 static int
885 tcf_chain0_head_change_cb_add(struct tcf_block *block,
886 			      struct tcf_block_ext_info *ei,
887 			      struct netlink_ext_ack *extack)
888 {
889 	struct tcf_filter_chain_list_item *item;
890 	struct tcf_chain *chain0;
891 
892 	item = kmalloc(sizeof(*item), GFP_KERNEL);
893 	if (!item) {
894 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895 		return -ENOMEM;
896 	}
897 	item->chain_head_change = ei->chain_head_change;
898 	item->chain_head_change_priv = ei->chain_head_change_priv;
899 
900 	mutex_lock(&block->lock);
901 	chain0 = block->chain0.chain;
902 	if (chain0)
903 		tcf_chain_hold(chain0);
904 	else
905 		list_add(&item->list, &block->chain0.filter_chain_list);
906 	mutex_unlock(&block->lock);
907 
908 	if (chain0) {
909 		struct tcf_proto *tp_head;
910 
911 		mutex_lock(&chain0->filter_chain_lock);
912 
913 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
914 		if (tp_head)
915 			tcf_chain_head_change_item(item, tp_head);
916 
917 		mutex_lock(&block->lock);
918 		list_add(&item->list, &block->chain0.filter_chain_list);
919 		mutex_unlock(&block->lock);
920 
921 		mutex_unlock(&chain0->filter_chain_lock);
922 		tcf_chain_put(chain0);
923 	}
924 
925 	return 0;
926 }
927 
928 static void
929 tcf_chain0_head_change_cb_del(struct tcf_block *block,
930 			      struct tcf_block_ext_info *ei)
931 {
932 	struct tcf_filter_chain_list_item *item;
933 
934 	mutex_lock(&block->lock);
935 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937 		    (item->chain_head_change == ei->chain_head_change &&
938 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
939 			if (block->chain0.chain)
940 				tcf_chain_head_change_item(item, NULL);
941 			list_del(&item->list);
942 			mutex_unlock(&block->lock);
943 
944 			kfree(item);
945 			return;
946 		}
947 	}
948 	mutex_unlock(&block->lock);
949 	WARN_ON(1);
950 }
951 
952 struct tcf_net {
953 	spinlock_t idr_lock; /* Protects idr */
954 	struct idr idr;
955 };
956 
957 static unsigned int tcf_net_id;
958 
959 static int tcf_block_insert(struct tcf_block *block, struct net *net,
960 			    struct netlink_ext_ack *extack)
961 {
962 	struct tcf_net *tn = net_generic(net, tcf_net_id);
963 	int err;
964 
965 	idr_preload(GFP_KERNEL);
966 	spin_lock(&tn->idr_lock);
967 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
968 			    GFP_NOWAIT);
969 	spin_unlock(&tn->idr_lock);
970 	idr_preload_end();
971 
972 	return err;
973 }
974 
975 static void tcf_block_remove(struct tcf_block *block, struct net *net)
976 {
977 	struct tcf_net *tn = net_generic(net, tcf_net_id);
978 
979 	spin_lock(&tn->idr_lock);
980 	idr_remove(&tn->idr, block->index);
981 	spin_unlock(&tn->idr_lock);
982 }
983 
984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
985 					  u32 block_index,
986 					  struct netlink_ext_ack *extack)
987 {
988 	struct tcf_block *block;
989 
990 	block = kzalloc(sizeof(*block), GFP_KERNEL);
991 	if (!block) {
992 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993 		return ERR_PTR(-ENOMEM);
994 	}
995 	mutex_init(&block->lock);
996 	mutex_init(&block->proto_destroy_lock);
997 	init_rwsem(&block->cb_lock);
998 	flow_block_init(&block->flow_block);
999 	INIT_LIST_HEAD(&block->chain_list);
1000 	INIT_LIST_HEAD(&block->owner_list);
1001 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1002 
1003 	refcount_set(&block->refcnt, 1);
1004 	block->net = net;
1005 	block->index = block_index;
1006 
1007 	/* Don't store q pointer for blocks which are shared */
1008 	if (!tcf_block_shared(block))
1009 		block->q = q;
1010 	return block;
1011 }
1012 
1013 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1014 {
1015 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1016 
1017 	return idr_find(&tn->idr, block_index);
1018 }
1019 
1020 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1021 {
1022 	struct tcf_block *block;
1023 
1024 	rcu_read_lock();
1025 	block = tcf_block_lookup(net, block_index);
1026 	if (block && !refcount_inc_not_zero(&block->refcnt))
1027 		block = NULL;
1028 	rcu_read_unlock();
1029 
1030 	return block;
1031 }
1032 
1033 static struct tcf_chain *
1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1035 {
1036 	mutex_lock(&block->lock);
1037 	if (chain)
1038 		chain = list_is_last(&chain->list, &block->chain_list) ?
1039 			NULL : list_next_entry(chain, list);
1040 	else
1041 		chain = list_first_entry_or_null(&block->chain_list,
1042 						 struct tcf_chain, list);
1043 
1044 	/* skip all action-only chains */
1045 	while (chain && tcf_chain_held_by_acts_only(chain))
1046 		chain = list_is_last(&chain->list, &block->chain_list) ?
1047 			NULL : list_next_entry(chain, list);
1048 
1049 	if (chain)
1050 		tcf_chain_hold(chain);
1051 	mutex_unlock(&block->lock);
1052 
1053 	return chain;
1054 }
1055 
1056 /* Function to be used by all clients that want to iterate over all chains on
1057  * block. It properly obtains block->lock and takes reference to chain before
1058  * returning it. Users of this function must be tolerant to concurrent chain
1059  * insertion/deletion or ensure that no concurrent chain modification is
1060  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1061  * consistent dump because rtnl lock is released each time skb is filled with
1062  * data and sent to user-space.
1063  */
1064 
1065 struct tcf_chain *
1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1067 {
1068 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1069 
1070 	if (chain)
1071 		tcf_chain_put(chain);
1072 
1073 	return chain_next;
1074 }
1075 EXPORT_SYMBOL(tcf_get_next_chain);
1076 
1077 static struct tcf_proto *
1078 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1079 {
1080 	u32 prio = 0;
1081 
1082 	ASSERT_RTNL();
1083 	mutex_lock(&chain->filter_chain_lock);
1084 
1085 	if (!tp) {
1086 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1087 	} else if (tcf_proto_is_deleting(tp)) {
1088 		/* 'deleting' flag is set and chain->filter_chain_lock was
1089 		 * unlocked, which means next pointer could be invalid. Restart
1090 		 * search.
1091 		 */
1092 		prio = tp->prio + 1;
1093 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1094 
1095 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1096 			if (!tp->deleting && tp->prio >= prio)
1097 				break;
1098 	} else {
1099 		tp = tcf_chain_dereference(tp->next, chain);
1100 	}
1101 
1102 	if (tp)
1103 		tcf_proto_get(tp);
1104 
1105 	mutex_unlock(&chain->filter_chain_lock);
1106 
1107 	return tp;
1108 }
1109 
1110 /* Function to be used by all clients that want to iterate over all tp's on
1111  * chain. Users of this function must be tolerant to concurrent tp
1112  * insertion/deletion or ensure that no concurrent chain modification is
1113  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1114  * consistent dump because rtnl lock is released each time skb is filled with
1115  * data and sent to user-space.
1116  */
1117 
1118 struct tcf_proto *
1119 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1120 {
1121 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1122 
1123 	if (tp)
1124 		tcf_proto_put(tp, true, NULL);
1125 
1126 	return tp_next;
1127 }
1128 EXPORT_SYMBOL(tcf_get_next_proto);
1129 
1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1131 {
1132 	struct tcf_chain *chain;
1133 
1134 	/* Last reference to block. At this point chains cannot be added or
1135 	 * removed concurrently.
1136 	 */
1137 	for (chain = tcf_get_next_chain(block, NULL);
1138 	     chain;
1139 	     chain = tcf_get_next_chain(block, chain)) {
1140 		tcf_chain_put_explicitly_created(chain);
1141 		tcf_chain_flush(chain, rtnl_held);
1142 	}
1143 }
1144 
1145 /* Lookup Qdisc and increments its reference counter.
1146  * Set parent, if necessary.
1147  */
1148 
1149 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1150 			    u32 *parent, int ifindex, bool rtnl_held,
1151 			    struct netlink_ext_ack *extack)
1152 {
1153 	const struct Qdisc_class_ops *cops;
1154 	struct net_device *dev;
1155 	int err = 0;
1156 
1157 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1158 		return 0;
1159 
1160 	rcu_read_lock();
1161 
1162 	/* Find link */
1163 	dev = dev_get_by_index_rcu(net, ifindex);
1164 	if (!dev) {
1165 		rcu_read_unlock();
1166 		return -ENODEV;
1167 	}
1168 
1169 	/* Find qdisc */
1170 	if (!*parent) {
1171 		*q = rcu_dereference(dev->qdisc);
1172 		*parent = (*q)->handle;
1173 	} else {
1174 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1175 		if (!*q) {
1176 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1177 			err = -EINVAL;
1178 			goto errout_rcu;
1179 		}
1180 	}
1181 
1182 	*q = qdisc_refcount_inc_nz(*q);
1183 	if (!*q) {
1184 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1185 		err = -EINVAL;
1186 		goto errout_rcu;
1187 	}
1188 
1189 	/* Is it classful? */
1190 	cops = (*q)->ops->cl_ops;
1191 	if (!cops) {
1192 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1193 		err = -EINVAL;
1194 		goto errout_qdisc;
1195 	}
1196 
1197 	if (!cops->tcf_block) {
1198 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1199 		err = -EOPNOTSUPP;
1200 		goto errout_qdisc;
1201 	}
1202 
1203 errout_rcu:
1204 	/* At this point we know that qdisc is not noop_qdisc,
1205 	 * which means that qdisc holds a reference to net_device
1206 	 * and we hold a reference to qdisc, so it is safe to release
1207 	 * rcu read lock.
1208 	 */
1209 	rcu_read_unlock();
1210 	return err;
1211 
1212 errout_qdisc:
1213 	rcu_read_unlock();
1214 
1215 	if (rtnl_held)
1216 		qdisc_put(*q);
1217 	else
1218 		qdisc_put_unlocked(*q);
1219 	*q = NULL;
1220 
1221 	return err;
1222 }
1223 
1224 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1225 			       int ifindex, struct netlink_ext_ack *extack)
1226 {
1227 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1228 		return 0;
1229 
1230 	/* Do we search for filter, attached to class? */
1231 	if (TC_H_MIN(parent)) {
1232 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1233 
1234 		*cl = cops->find(q, parent);
1235 		if (*cl == 0) {
1236 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1237 			return -ENOENT;
1238 		}
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1245 					  unsigned long cl, int ifindex,
1246 					  u32 block_index,
1247 					  struct netlink_ext_ack *extack)
1248 {
1249 	struct tcf_block *block;
1250 
1251 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1252 		block = tcf_block_refcnt_get(net, block_index);
1253 		if (!block) {
1254 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1255 			return ERR_PTR(-EINVAL);
1256 		}
1257 	} else {
1258 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1259 
1260 		block = cops->tcf_block(q, cl, extack);
1261 		if (!block)
1262 			return ERR_PTR(-EINVAL);
1263 
1264 		if (tcf_block_shared(block)) {
1265 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1266 			return ERR_PTR(-EOPNOTSUPP);
1267 		}
1268 
1269 		/* Always take reference to block in order to support execution
1270 		 * of rules update path of cls API without rtnl lock. Caller
1271 		 * must release block when it is finished using it. 'if' block
1272 		 * of this conditional obtain reference to block by calling
1273 		 * tcf_block_refcnt_get().
1274 		 */
1275 		refcount_inc(&block->refcnt);
1276 	}
1277 
1278 	return block;
1279 }
1280 
1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1282 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1283 {
1284 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1285 		/* Flushing/putting all chains will cause the block to be
1286 		 * deallocated when last chain is freed. However, if chain_list
1287 		 * is empty, block has to be manually deallocated. After block
1288 		 * reference counter reached 0, it is no longer possible to
1289 		 * increment it or add new chains to block.
1290 		 */
1291 		bool free_block = list_empty(&block->chain_list);
1292 
1293 		mutex_unlock(&block->lock);
1294 		if (tcf_block_shared(block))
1295 			tcf_block_remove(block, block->net);
1296 
1297 		if (q)
1298 			tcf_block_offload_unbind(block, q, ei);
1299 
1300 		if (free_block)
1301 			tcf_block_destroy(block);
1302 		else
1303 			tcf_block_flush_all_chains(block, rtnl_held);
1304 	} else if (q) {
1305 		tcf_block_offload_unbind(block, q, ei);
1306 	}
1307 }
1308 
1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1310 {
1311 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1312 }
1313 
1314 /* Find tcf block.
1315  * Set q, parent, cl when appropriate.
1316  */
1317 
1318 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1319 					u32 *parent, unsigned long *cl,
1320 					int ifindex, u32 block_index,
1321 					struct netlink_ext_ack *extack)
1322 {
1323 	struct tcf_block *block;
1324 	int err = 0;
1325 
1326 	ASSERT_RTNL();
1327 
1328 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1329 	if (err)
1330 		goto errout;
1331 
1332 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1333 	if (err)
1334 		goto errout_qdisc;
1335 
1336 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1337 	if (IS_ERR(block)) {
1338 		err = PTR_ERR(block);
1339 		goto errout_qdisc;
1340 	}
1341 
1342 	return block;
1343 
1344 errout_qdisc:
1345 	if (*q)
1346 		qdisc_put(*q);
1347 errout:
1348 	*q = NULL;
1349 	return ERR_PTR(err);
1350 }
1351 
1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1353 			      bool rtnl_held)
1354 {
1355 	if (!IS_ERR_OR_NULL(block))
1356 		tcf_block_refcnt_put(block, rtnl_held);
1357 
1358 	if (q) {
1359 		if (rtnl_held)
1360 			qdisc_put(q);
1361 		else
1362 			qdisc_put_unlocked(q);
1363 	}
1364 }
1365 
1366 struct tcf_block_owner_item {
1367 	struct list_head list;
1368 	struct Qdisc *q;
1369 	enum flow_block_binder_type binder_type;
1370 };
1371 
1372 static void
1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1374 			       struct Qdisc *q,
1375 			       enum flow_block_binder_type binder_type)
1376 {
1377 	if (block->keep_dst &&
1378 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1379 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1380 		netif_keep_dst(qdisc_dev(q));
1381 }
1382 
1383 void tcf_block_netif_keep_dst(struct tcf_block *block)
1384 {
1385 	struct tcf_block_owner_item *item;
1386 
1387 	block->keep_dst = true;
1388 	list_for_each_entry(item, &block->owner_list, list)
1389 		tcf_block_owner_netif_keep_dst(block, item->q,
1390 					       item->binder_type);
1391 }
1392 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1393 
1394 static int tcf_block_owner_add(struct tcf_block *block,
1395 			       struct Qdisc *q,
1396 			       enum flow_block_binder_type binder_type)
1397 {
1398 	struct tcf_block_owner_item *item;
1399 
1400 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1401 	if (!item)
1402 		return -ENOMEM;
1403 	item->q = q;
1404 	item->binder_type = binder_type;
1405 	list_add(&item->list, &block->owner_list);
1406 	return 0;
1407 }
1408 
1409 static void tcf_block_owner_del(struct tcf_block *block,
1410 				struct Qdisc *q,
1411 				enum flow_block_binder_type binder_type)
1412 {
1413 	struct tcf_block_owner_item *item;
1414 
1415 	list_for_each_entry(item, &block->owner_list, list) {
1416 		if (item->q == q && item->binder_type == binder_type) {
1417 			list_del(&item->list);
1418 			kfree(item);
1419 			return;
1420 		}
1421 	}
1422 	WARN_ON(1);
1423 }
1424 
1425 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1426 		      struct tcf_block_ext_info *ei,
1427 		      struct netlink_ext_ack *extack)
1428 {
1429 	struct net *net = qdisc_net(q);
1430 	struct tcf_block *block = NULL;
1431 	int err;
1432 
1433 	if (ei->block_index)
1434 		/* block_index not 0 means the shared block is requested */
1435 		block = tcf_block_refcnt_get(net, ei->block_index);
1436 
1437 	if (!block) {
1438 		block = tcf_block_create(net, q, ei->block_index, extack);
1439 		if (IS_ERR(block))
1440 			return PTR_ERR(block);
1441 		if (tcf_block_shared(block)) {
1442 			err = tcf_block_insert(block, net, extack);
1443 			if (err)
1444 				goto err_block_insert;
1445 		}
1446 	}
1447 
1448 	err = tcf_block_owner_add(block, q, ei->binder_type);
1449 	if (err)
1450 		goto err_block_owner_add;
1451 
1452 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1453 
1454 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1455 	if (err)
1456 		goto err_chain0_head_change_cb_add;
1457 
1458 	err = tcf_block_offload_bind(block, q, ei, extack);
1459 	if (err)
1460 		goto err_block_offload_bind;
1461 
1462 	*p_block = block;
1463 	return 0;
1464 
1465 err_block_offload_bind:
1466 	tcf_chain0_head_change_cb_del(block, ei);
1467 err_chain0_head_change_cb_add:
1468 	tcf_block_owner_del(block, q, ei->binder_type);
1469 err_block_owner_add:
1470 err_block_insert:
1471 	tcf_block_refcnt_put(block, true);
1472 	return err;
1473 }
1474 EXPORT_SYMBOL(tcf_block_get_ext);
1475 
1476 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1477 {
1478 	struct tcf_proto __rcu **p_filter_chain = priv;
1479 
1480 	rcu_assign_pointer(*p_filter_chain, tp_head);
1481 }
1482 
1483 int tcf_block_get(struct tcf_block **p_block,
1484 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1485 		  struct netlink_ext_ack *extack)
1486 {
1487 	struct tcf_block_ext_info ei = {
1488 		.chain_head_change = tcf_chain_head_change_dflt,
1489 		.chain_head_change_priv = p_filter_chain,
1490 	};
1491 
1492 	WARN_ON(!p_filter_chain);
1493 	return tcf_block_get_ext(p_block, q, &ei, extack);
1494 }
1495 EXPORT_SYMBOL(tcf_block_get);
1496 
1497 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1498  * actions should be all removed after flushing.
1499  */
1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1501 		       struct tcf_block_ext_info *ei)
1502 {
1503 	if (!block)
1504 		return;
1505 	tcf_chain0_head_change_cb_del(block, ei);
1506 	tcf_block_owner_del(block, q, ei->binder_type);
1507 
1508 	__tcf_block_put(block, q, ei, true);
1509 }
1510 EXPORT_SYMBOL(tcf_block_put_ext);
1511 
1512 void tcf_block_put(struct tcf_block *block)
1513 {
1514 	struct tcf_block_ext_info ei = {0, };
1515 
1516 	if (!block)
1517 		return;
1518 	tcf_block_put_ext(block, block->q, &ei);
1519 }
1520 
1521 EXPORT_SYMBOL(tcf_block_put);
1522 
1523 static int
1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1525 			    void *cb_priv, bool add, bool offload_in_use,
1526 			    struct netlink_ext_ack *extack)
1527 {
1528 	struct tcf_chain *chain, *chain_prev;
1529 	struct tcf_proto *tp, *tp_prev;
1530 	int err;
1531 
1532 	lockdep_assert_held(&block->cb_lock);
1533 
1534 	for (chain = __tcf_get_next_chain(block, NULL);
1535 	     chain;
1536 	     chain_prev = chain,
1537 		     chain = __tcf_get_next_chain(block, chain),
1538 		     tcf_chain_put(chain_prev)) {
1539 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1540 		     tp_prev = tp,
1541 			     tp = __tcf_get_next_proto(chain, tp),
1542 			     tcf_proto_put(tp_prev, true, NULL)) {
1543 			if (tp->ops->reoffload) {
1544 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1545 							 extack);
1546 				if (err && add)
1547 					goto err_playback_remove;
1548 			} else if (add && offload_in_use) {
1549 				err = -EOPNOTSUPP;
1550 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1551 				goto err_playback_remove;
1552 			}
1553 		}
1554 	}
1555 
1556 	return 0;
1557 
1558 err_playback_remove:
1559 	tcf_proto_put(tp, true, NULL);
1560 	tcf_chain_put(chain);
1561 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1562 				    extack);
1563 	return err;
1564 }
1565 
1566 static int tcf_block_bind(struct tcf_block *block,
1567 			  struct flow_block_offload *bo)
1568 {
1569 	struct flow_block_cb *block_cb, *next;
1570 	int err, i = 0;
1571 
1572 	lockdep_assert_held(&block->cb_lock);
1573 
1574 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1575 		err = tcf_block_playback_offloads(block, block_cb->cb,
1576 						  block_cb->cb_priv, true,
1577 						  tcf_block_offload_in_use(block),
1578 						  bo->extack);
1579 		if (err)
1580 			goto err_unroll;
1581 		if (!bo->unlocked_driver_cb)
1582 			block->lockeddevcnt++;
1583 
1584 		i++;
1585 	}
1586 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1587 
1588 	return 0;
1589 
1590 err_unroll:
1591 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1592 		if (i-- > 0) {
1593 			list_del(&block_cb->list);
1594 			tcf_block_playback_offloads(block, block_cb->cb,
1595 						    block_cb->cb_priv, false,
1596 						    tcf_block_offload_in_use(block),
1597 						    NULL);
1598 			if (!bo->unlocked_driver_cb)
1599 				block->lockeddevcnt--;
1600 		}
1601 		flow_block_cb_free(block_cb);
1602 	}
1603 
1604 	return err;
1605 }
1606 
1607 static void tcf_block_unbind(struct tcf_block *block,
1608 			     struct flow_block_offload *bo)
1609 {
1610 	struct flow_block_cb *block_cb, *next;
1611 
1612 	lockdep_assert_held(&block->cb_lock);
1613 
1614 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1615 		tcf_block_playback_offloads(block, block_cb->cb,
1616 					    block_cb->cb_priv, false,
1617 					    tcf_block_offload_in_use(block),
1618 					    NULL);
1619 		list_del(&block_cb->list);
1620 		flow_block_cb_free(block_cb);
1621 		if (!bo->unlocked_driver_cb)
1622 			block->lockeddevcnt--;
1623 	}
1624 }
1625 
1626 static int tcf_block_setup(struct tcf_block *block,
1627 			   struct flow_block_offload *bo)
1628 {
1629 	int err;
1630 
1631 	switch (bo->command) {
1632 	case FLOW_BLOCK_BIND:
1633 		err = tcf_block_bind(block, bo);
1634 		break;
1635 	case FLOW_BLOCK_UNBIND:
1636 		err = 0;
1637 		tcf_block_unbind(block, bo);
1638 		break;
1639 	default:
1640 		WARN_ON_ONCE(1);
1641 		err = -EOPNOTSUPP;
1642 	}
1643 
1644 	return err;
1645 }
1646 
1647 /* Main classifier routine: scans classifier chain attached
1648  * to this qdisc, (optionally) tests for protocol and asks
1649  * specific classifiers.
1650  */
1651 static inline int __tcf_classify(struct sk_buff *skb,
1652 				 const struct tcf_proto *tp,
1653 				 const struct tcf_proto *orig_tp,
1654 				 struct tcf_result *res,
1655 				 bool compat_mode,
1656 				 struct tcf_exts_miss_cookie_node *n,
1657 				 int act_index,
1658 				 u32 *last_executed_chain)
1659 {
1660 #ifdef CONFIG_NET_CLS_ACT
1661 	const int max_reclassify_loop = 16;
1662 	const struct tcf_proto *first_tp;
1663 	int limit = 0;
1664 
1665 reclassify:
1666 #endif
1667 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1668 		__be16 protocol = skb_protocol(skb, false);
1669 		int err = 0;
1670 
1671 		if (n) {
1672 			struct tcf_exts *exts;
1673 
1674 			if (n->tp_prio != tp->prio)
1675 				continue;
1676 
1677 			/* We re-lookup the tp and chain based on index instead
1678 			 * of having hard refs and locks to them, so do a sanity
1679 			 * check if any of tp,chain,exts was replaced by the
1680 			 * time we got here with a cookie from hardware.
1681 			 */
1682 			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1683 				     !tp->ops->get_exts))
1684 				return TC_ACT_SHOT;
1685 
1686 			exts = tp->ops->get_exts(tp, n->handle);
1687 			if (unlikely(!exts || n->exts != exts))
1688 				return TC_ACT_SHOT;
1689 
1690 			n = NULL;
1691 			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1692 		} else {
1693 			if (tp->protocol != protocol &&
1694 			    tp->protocol != htons(ETH_P_ALL))
1695 				continue;
1696 
1697 			err = tc_classify(skb, tp, res);
1698 		}
1699 #ifdef CONFIG_NET_CLS_ACT
1700 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1701 			first_tp = orig_tp;
1702 			*last_executed_chain = first_tp->chain->index;
1703 			goto reset;
1704 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1705 			first_tp = res->goto_tp;
1706 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1707 			goto reset;
1708 		}
1709 #endif
1710 		if (err >= 0)
1711 			return err;
1712 	}
1713 
1714 	if (unlikely(n))
1715 		return TC_ACT_SHOT;
1716 
1717 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1718 #ifdef CONFIG_NET_CLS_ACT
1719 reset:
1720 	if (unlikely(limit++ >= max_reclassify_loop)) {
1721 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1722 				       tp->chain->block->index,
1723 				       tp->prio & 0xffff,
1724 				       ntohs(tp->protocol));
1725 		return TC_ACT_SHOT;
1726 	}
1727 
1728 	tp = first_tp;
1729 	goto reclassify;
1730 #endif
1731 }
1732 
1733 int tcf_classify(struct sk_buff *skb,
1734 		 const struct tcf_block *block,
1735 		 const struct tcf_proto *tp,
1736 		 struct tcf_result *res, bool compat_mode)
1737 {
1738 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1739 	u32 last_executed_chain = 0;
1740 
1741 	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1742 			      &last_executed_chain);
1743 #else
1744 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1745 	struct tcf_exts_miss_cookie_node *n = NULL;
1746 	const struct tcf_proto *orig_tp = tp;
1747 	struct tc_skb_ext *ext;
1748 	int act_index = 0;
1749 	int ret;
1750 
1751 	if (block) {
1752 		ext = skb_ext_find(skb, TC_SKB_EXT);
1753 
1754 		if (ext && (ext->chain || ext->act_miss)) {
1755 			struct tcf_chain *fchain;
1756 			u32 chain;
1757 
1758 			if (ext->act_miss) {
1759 				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1760 								&act_index);
1761 				if (!n)
1762 					return TC_ACT_SHOT;
1763 
1764 				chain = n->chain_index;
1765 			} else {
1766 				chain = ext->chain;
1767 			}
1768 
1769 			fchain = tcf_chain_lookup_rcu(block, chain);
1770 			if (!fchain)
1771 				return TC_ACT_SHOT;
1772 
1773 			/* Consume, so cloned/redirect skbs won't inherit ext */
1774 			skb_ext_del(skb, TC_SKB_EXT);
1775 
1776 			tp = rcu_dereference_bh(fchain->filter_chain);
1777 			last_executed_chain = fchain->index;
1778 		}
1779 	}
1780 
1781 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1782 			     &last_executed_chain);
1783 
1784 	if (tc_skb_ext_tc_enabled()) {
1785 		/* If we missed on some chain */
1786 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1787 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1788 
1789 			ext = tc_skb_ext_alloc(skb);
1790 			if (WARN_ON_ONCE(!ext))
1791 				return TC_ACT_SHOT;
1792 			ext->chain = last_executed_chain;
1793 			ext->mru = cb->mru;
1794 			ext->post_ct = cb->post_ct;
1795 			ext->post_ct_snat = cb->post_ct_snat;
1796 			ext->post_ct_dnat = cb->post_ct_dnat;
1797 			ext->zone = cb->zone;
1798 		}
1799 	}
1800 
1801 	return ret;
1802 #endif
1803 }
1804 EXPORT_SYMBOL(tcf_classify);
1805 
1806 struct tcf_chain_info {
1807 	struct tcf_proto __rcu **pprev;
1808 	struct tcf_proto __rcu *next;
1809 };
1810 
1811 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1812 					   struct tcf_chain_info *chain_info)
1813 {
1814 	return tcf_chain_dereference(*chain_info->pprev, chain);
1815 }
1816 
1817 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1818 			       struct tcf_chain_info *chain_info,
1819 			       struct tcf_proto *tp)
1820 {
1821 	if (chain->flushing)
1822 		return -EAGAIN;
1823 
1824 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1825 	if (*chain_info->pprev == chain->filter_chain)
1826 		tcf_chain0_head_change(chain, tp);
1827 	tcf_proto_get(tp);
1828 	rcu_assign_pointer(*chain_info->pprev, tp);
1829 
1830 	return 0;
1831 }
1832 
1833 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1834 				struct tcf_chain_info *chain_info,
1835 				struct tcf_proto *tp)
1836 {
1837 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1838 
1839 	tcf_proto_mark_delete(tp);
1840 	if (tp == chain->filter_chain)
1841 		tcf_chain0_head_change(chain, next);
1842 	RCU_INIT_POINTER(*chain_info->pprev, next);
1843 }
1844 
1845 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1846 					   struct tcf_chain_info *chain_info,
1847 					   u32 protocol, u32 prio,
1848 					   bool prio_allocate);
1849 
1850 /* Try to insert new proto.
1851  * If proto with specified priority already exists, free new proto
1852  * and return existing one.
1853  */
1854 
1855 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1856 						    struct tcf_proto *tp_new,
1857 						    u32 protocol, u32 prio,
1858 						    bool rtnl_held)
1859 {
1860 	struct tcf_chain_info chain_info;
1861 	struct tcf_proto *tp;
1862 	int err = 0;
1863 
1864 	mutex_lock(&chain->filter_chain_lock);
1865 
1866 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1867 		mutex_unlock(&chain->filter_chain_lock);
1868 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1869 		return ERR_PTR(-EAGAIN);
1870 	}
1871 
1872 	tp = tcf_chain_tp_find(chain, &chain_info,
1873 			       protocol, prio, false);
1874 	if (!tp)
1875 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1876 	mutex_unlock(&chain->filter_chain_lock);
1877 
1878 	if (tp) {
1879 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1880 		tp_new = tp;
1881 	} else if (err) {
1882 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1883 		tp_new = ERR_PTR(err);
1884 	}
1885 
1886 	return tp_new;
1887 }
1888 
1889 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1890 				      struct tcf_proto *tp, bool rtnl_held,
1891 				      struct netlink_ext_ack *extack)
1892 {
1893 	struct tcf_chain_info chain_info;
1894 	struct tcf_proto *tp_iter;
1895 	struct tcf_proto **pprev;
1896 	struct tcf_proto *next;
1897 
1898 	mutex_lock(&chain->filter_chain_lock);
1899 
1900 	/* Atomically find and remove tp from chain. */
1901 	for (pprev = &chain->filter_chain;
1902 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1903 	     pprev = &tp_iter->next) {
1904 		if (tp_iter == tp) {
1905 			chain_info.pprev = pprev;
1906 			chain_info.next = tp_iter->next;
1907 			WARN_ON(tp_iter->deleting);
1908 			break;
1909 		}
1910 	}
1911 	/* Verify that tp still exists and no new filters were inserted
1912 	 * concurrently.
1913 	 * Mark tp for deletion if it is empty.
1914 	 */
1915 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1916 		mutex_unlock(&chain->filter_chain_lock);
1917 		return;
1918 	}
1919 
1920 	tcf_proto_signal_destroying(chain, tp);
1921 	next = tcf_chain_dereference(chain_info.next, chain);
1922 	if (tp == chain->filter_chain)
1923 		tcf_chain0_head_change(chain, next);
1924 	RCU_INIT_POINTER(*chain_info.pprev, next);
1925 	mutex_unlock(&chain->filter_chain_lock);
1926 
1927 	tcf_proto_put(tp, rtnl_held, extack);
1928 }
1929 
1930 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1931 					   struct tcf_chain_info *chain_info,
1932 					   u32 protocol, u32 prio,
1933 					   bool prio_allocate)
1934 {
1935 	struct tcf_proto **pprev;
1936 	struct tcf_proto *tp;
1937 
1938 	/* Check the chain for existence of proto-tcf with this priority */
1939 	for (pprev = &chain->filter_chain;
1940 	     (tp = tcf_chain_dereference(*pprev, chain));
1941 	     pprev = &tp->next) {
1942 		if (tp->prio >= prio) {
1943 			if (tp->prio == prio) {
1944 				if (prio_allocate ||
1945 				    (tp->protocol != protocol && protocol))
1946 					return ERR_PTR(-EINVAL);
1947 			} else {
1948 				tp = NULL;
1949 			}
1950 			break;
1951 		}
1952 	}
1953 	chain_info->pprev = pprev;
1954 	if (tp) {
1955 		chain_info->next = tp->next;
1956 		tcf_proto_get(tp);
1957 	} else {
1958 		chain_info->next = NULL;
1959 	}
1960 	return tp;
1961 }
1962 
1963 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1964 			 struct tcf_proto *tp, struct tcf_block *block,
1965 			 struct Qdisc *q, u32 parent, void *fh,
1966 			 u32 portid, u32 seq, u16 flags, int event,
1967 			 bool terse_dump, bool rtnl_held,
1968 			 struct netlink_ext_ack *extack)
1969 {
1970 	struct tcmsg *tcm;
1971 	struct nlmsghdr  *nlh;
1972 	unsigned char *b = skb_tail_pointer(skb);
1973 
1974 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1975 	if (!nlh)
1976 		goto out_nlmsg_trim;
1977 	tcm = nlmsg_data(nlh);
1978 	tcm->tcm_family = AF_UNSPEC;
1979 	tcm->tcm__pad1 = 0;
1980 	tcm->tcm__pad2 = 0;
1981 	if (q) {
1982 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1983 		tcm->tcm_parent = parent;
1984 	} else {
1985 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1986 		tcm->tcm_block_index = block->index;
1987 	}
1988 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1989 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1990 		goto nla_put_failure;
1991 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1992 		goto nla_put_failure;
1993 	if (!fh) {
1994 		tcm->tcm_handle = 0;
1995 	} else if (terse_dump) {
1996 		if (tp->ops->terse_dump) {
1997 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1998 						rtnl_held) < 0)
1999 				goto nla_put_failure;
2000 		} else {
2001 			goto cls_op_not_supp;
2002 		}
2003 	} else {
2004 		if (tp->ops->dump &&
2005 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2006 			goto nla_put_failure;
2007 	}
2008 
2009 	if (extack && extack->_msg &&
2010 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2011 		goto nla_put_failure;
2012 
2013 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2014 
2015 	return skb->len;
2016 
2017 out_nlmsg_trim:
2018 nla_put_failure:
2019 cls_op_not_supp:
2020 	nlmsg_trim(skb, b);
2021 	return -1;
2022 }
2023 
2024 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2025 			  struct nlmsghdr *n, struct tcf_proto *tp,
2026 			  struct tcf_block *block, struct Qdisc *q,
2027 			  u32 parent, void *fh, int event, bool unicast,
2028 			  bool rtnl_held, struct netlink_ext_ack *extack)
2029 {
2030 	struct sk_buff *skb;
2031 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2032 	int err = 0;
2033 
2034 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2035 	if (!skb)
2036 		return -ENOBUFS;
2037 
2038 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2039 			  n->nlmsg_seq, n->nlmsg_flags, event,
2040 			  false, rtnl_held, extack) <= 0) {
2041 		kfree_skb(skb);
2042 		return -EINVAL;
2043 	}
2044 
2045 	if (unicast)
2046 		err = rtnl_unicast(skb, net, portid);
2047 	else
2048 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2049 				     n->nlmsg_flags & NLM_F_ECHO);
2050 	return err;
2051 }
2052 
2053 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2054 			      struct nlmsghdr *n, struct tcf_proto *tp,
2055 			      struct tcf_block *block, struct Qdisc *q,
2056 			      u32 parent, void *fh, bool unicast, bool *last,
2057 			      bool rtnl_held, struct netlink_ext_ack *extack)
2058 {
2059 	struct sk_buff *skb;
2060 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2061 	int err;
2062 
2063 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2064 	if (!skb)
2065 		return -ENOBUFS;
2066 
2067 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2068 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2069 			  false, rtnl_held, extack) <= 0) {
2070 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2071 		kfree_skb(skb);
2072 		return -EINVAL;
2073 	}
2074 
2075 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2076 	if (err) {
2077 		kfree_skb(skb);
2078 		return err;
2079 	}
2080 
2081 	if (unicast)
2082 		err = rtnl_unicast(skb, net, portid);
2083 	else
2084 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2085 				     n->nlmsg_flags & NLM_F_ECHO);
2086 	if (err < 0)
2087 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2088 
2089 	return err;
2090 }
2091 
2092 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2093 				 struct tcf_block *block, struct Qdisc *q,
2094 				 u32 parent, struct nlmsghdr *n,
2095 				 struct tcf_chain *chain, int event,
2096 				 struct netlink_ext_ack *extack)
2097 {
2098 	struct tcf_proto *tp;
2099 
2100 	for (tp = tcf_get_next_proto(chain, NULL);
2101 	     tp; tp = tcf_get_next_proto(chain, tp))
2102 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2103 			       event, false, true, extack);
2104 }
2105 
2106 static void tfilter_put(struct tcf_proto *tp, void *fh)
2107 {
2108 	if (tp->ops->put && fh)
2109 		tp->ops->put(tp, fh);
2110 }
2111 
2112 static bool is_qdisc_ingress(__u32 classid)
2113 {
2114 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2115 }
2116 
2117 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2118 			  struct netlink_ext_ack *extack)
2119 {
2120 	struct net *net = sock_net(skb->sk);
2121 	struct nlattr *tca[TCA_MAX + 1];
2122 	char name[IFNAMSIZ];
2123 	struct tcmsg *t;
2124 	u32 protocol;
2125 	u32 prio;
2126 	bool prio_allocate;
2127 	u32 parent;
2128 	u32 chain_index;
2129 	struct Qdisc *q;
2130 	struct tcf_chain_info chain_info;
2131 	struct tcf_chain *chain;
2132 	struct tcf_block *block;
2133 	struct tcf_proto *tp;
2134 	unsigned long cl;
2135 	void *fh;
2136 	int err;
2137 	int tp_created;
2138 	bool rtnl_held = false;
2139 	u32 flags;
2140 
2141 replay:
2142 	tp_created = 0;
2143 
2144 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2145 				     rtm_tca_policy, extack);
2146 	if (err < 0)
2147 		return err;
2148 
2149 	t = nlmsg_data(n);
2150 	protocol = TC_H_MIN(t->tcm_info);
2151 	prio = TC_H_MAJ(t->tcm_info);
2152 	prio_allocate = false;
2153 	parent = t->tcm_parent;
2154 	tp = NULL;
2155 	cl = 0;
2156 	block = NULL;
2157 	q = NULL;
2158 	chain = NULL;
2159 	flags = 0;
2160 
2161 	if (prio == 0) {
2162 		/* If no priority is provided by the user,
2163 		 * we allocate one.
2164 		 */
2165 		if (n->nlmsg_flags & NLM_F_CREATE) {
2166 			prio = TC_H_MAKE(0x80000000U, 0U);
2167 			prio_allocate = true;
2168 		} else {
2169 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2170 			return -ENOENT;
2171 		}
2172 	}
2173 
2174 	/* Find head of filter chain. */
2175 
2176 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2177 	if (err)
2178 		return err;
2179 
2180 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2181 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2182 		err = -EINVAL;
2183 		goto errout;
2184 	}
2185 
2186 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2187 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2188 	 * type is not specified, classifier is not unlocked.
2189 	 */
2190 	if (rtnl_held ||
2191 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2192 	    !tcf_proto_is_unlocked(name)) {
2193 		rtnl_held = true;
2194 		rtnl_lock();
2195 	}
2196 
2197 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2198 	if (err)
2199 		goto errout;
2200 
2201 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2202 				 extack);
2203 	if (IS_ERR(block)) {
2204 		err = PTR_ERR(block);
2205 		goto errout;
2206 	}
2207 	block->classid = parent;
2208 
2209 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2210 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2211 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2212 		err = -EINVAL;
2213 		goto errout;
2214 	}
2215 	chain = tcf_chain_get(block, chain_index, true);
2216 	if (!chain) {
2217 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2218 		err = -ENOMEM;
2219 		goto errout;
2220 	}
2221 
2222 	mutex_lock(&chain->filter_chain_lock);
2223 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2224 			       prio, prio_allocate);
2225 	if (IS_ERR(tp)) {
2226 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2227 		err = PTR_ERR(tp);
2228 		goto errout_locked;
2229 	}
2230 
2231 	if (tp == NULL) {
2232 		struct tcf_proto *tp_new = NULL;
2233 
2234 		if (chain->flushing) {
2235 			err = -EAGAIN;
2236 			goto errout_locked;
2237 		}
2238 
2239 		/* Proto-tcf does not exist, create new one */
2240 
2241 		if (tca[TCA_KIND] == NULL || !protocol) {
2242 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2243 			err = -EINVAL;
2244 			goto errout_locked;
2245 		}
2246 
2247 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2248 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2249 			err = -ENOENT;
2250 			goto errout_locked;
2251 		}
2252 
2253 		if (prio_allocate)
2254 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2255 							       &chain_info));
2256 
2257 		mutex_unlock(&chain->filter_chain_lock);
2258 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2259 					  rtnl_held, extack);
2260 		if (IS_ERR(tp_new)) {
2261 			err = PTR_ERR(tp_new);
2262 			goto errout_tp;
2263 		}
2264 
2265 		tp_created = 1;
2266 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2267 						rtnl_held);
2268 		if (IS_ERR(tp)) {
2269 			err = PTR_ERR(tp);
2270 			goto errout_tp;
2271 		}
2272 	} else {
2273 		mutex_unlock(&chain->filter_chain_lock);
2274 	}
2275 
2276 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2277 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2278 		err = -EINVAL;
2279 		goto errout;
2280 	}
2281 
2282 	fh = tp->ops->get(tp, t->tcm_handle);
2283 
2284 	if (!fh) {
2285 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2286 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2287 			err = -ENOENT;
2288 			goto errout;
2289 		}
2290 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2291 		tfilter_put(tp, fh);
2292 		NL_SET_ERR_MSG(extack, "Filter already exists");
2293 		err = -EEXIST;
2294 		goto errout;
2295 	}
2296 
2297 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2298 		tfilter_put(tp, fh);
2299 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2300 		err = -EINVAL;
2301 		goto errout;
2302 	}
2303 
2304 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2305 		flags |= TCA_ACT_FLAGS_REPLACE;
2306 	if (!rtnl_held)
2307 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2308 	if (is_qdisc_ingress(parent))
2309 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2310 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2311 			      flags, extack);
2312 	if (err == 0) {
2313 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2314 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2315 		tfilter_put(tp, fh);
2316 		/* q pointer is NULL for shared blocks */
2317 		if (q)
2318 			q->flags &= ~TCQ_F_CAN_BYPASS;
2319 	}
2320 
2321 errout:
2322 	if (err && tp_created)
2323 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2324 errout_tp:
2325 	if (chain) {
2326 		if (tp && !IS_ERR(tp))
2327 			tcf_proto_put(tp, rtnl_held, NULL);
2328 		if (!tp_created)
2329 			tcf_chain_put(chain);
2330 	}
2331 	tcf_block_release(q, block, rtnl_held);
2332 
2333 	if (rtnl_held)
2334 		rtnl_unlock();
2335 
2336 	if (err == -EAGAIN) {
2337 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2338 		 * of target chain.
2339 		 */
2340 		rtnl_held = true;
2341 		/* Replay the request. */
2342 		goto replay;
2343 	}
2344 	return err;
2345 
2346 errout_locked:
2347 	mutex_unlock(&chain->filter_chain_lock);
2348 	goto errout;
2349 }
2350 
2351 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2352 			  struct netlink_ext_ack *extack)
2353 {
2354 	struct net *net = sock_net(skb->sk);
2355 	struct nlattr *tca[TCA_MAX + 1];
2356 	char name[IFNAMSIZ];
2357 	struct tcmsg *t;
2358 	u32 protocol;
2359 	u32 prio;
2360 	u32 parent;
2361 	u32 chain_index;
2362 	struct Qdisc *q = NULL;
2363 	struct tcf_chain_info chain_info;
2364 	struct tcf_chain *chain = NULL;
2365 	struct tcf_block *block = NULL;
2366 	struct tcf_proto *tp = NULL;
2367 	unsigned long cl = 0;
2368 	void *fh = NULL;
2369 	int err;
2370 	bool rtnl_held = false;
2371 
2372 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2373 				     rtm_tca_policy, extack);
2374 	if (err < 0)
2375 		return err;
2376 
2377 	t = nlmsg_data(n);
2378 	protocol = TC_H_MIN(t->tcm_info);
2379 	prio = TC_H_MAJ(t->tcm_info);
2380 	parent = t->tcm_parent;
2381 
2382 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2383 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2384 		return -ENOENT;
2385 	}
2386 
2387 	/* Find head of filter chain. */
2388 
2389 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2390 	if (err)
2391 		return err;
2392 
2393 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2394 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2395 		err = -EINVAL;
2396 		goto errout;
2397 	}
2398 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2399 	 * found), qdisc is not unlocked, classifier type is not specified,
2400 	 * classifier is not unlocked.
2401 	 */
2402 	if (!prio ||
2403 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2404 	    !tcf_proto_is_unlocked(name)) {
2405 		rtnl_held = true;
2406 		rtnl_lock();
2407 	}
2408 
2409 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2410 	if (err)
2411 		goto errout;
2412 
2413 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2414 				 extack);
2415 	if (IS_ERR(block)) {
2416 		err = PTR_ERR(block);
2417 		goto errout;
2418 	}
2419 
2420 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2421 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2422 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2423 		err = -EINVAL;
2424 		goto errout;
2425 	}
2426 	chain = tcf_chain_get(block, chain_index, false);
2427 	if (!chain) {
2428 		/* User requested flush on non-existent chain. Nothing to do,
2429 		 * so just return success.
2430 		 */
2431 		if (prio == 0) {
2432 			err = 0;
2433 			goto errout;
2434 		}
2435 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2436 		err = -ENOENT;
2437 		goto errout;
2438 	}
2439 
2440 	if (prio == 0) {
2441 		tfilter_notify_chain(net, skb, block, q, parent, n,
2442 				     chain, RTM_DELTFILTER, extack);
2443 		tcf_chain_flush(chain, rtnl_held);
2444 		err = 0;
2445 		goto errout;
2446 	}
2447 
2448 	mutex_lock(&chain->filter_chain_lock);
2449 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2450 			       prio, false);
2451 	if (!tp || IS_ERR(tp)) {
2452 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2453 		err = tp ? PTR_ERR(tp) : -ENOENT;
2454 		goto errout_locked;
2455 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2456 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2457 		err = -EINVAL;
2458 		goto errout_locked;
2459 	} else if (t->tcm_handle == 0) {
2460 		tcf_proto_signal_destroying(chain, tp);
2461 		tcf_chain_tp_remove(chain, &chain_info, tp);
2462 		mutex_unlock(&chain->filter_chain_lock);
2463 
2464 		tcf_proto_put(tp, rtnl_held, NULL);
2465 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2466 			       RTM_DELTFILTER, false, rtnl_held, extack);
2467 		err = 0;
2468 		goto errout;
2469 	}
2470 	mutex_unlock(&chain->filter_chain_lock);
2471 
2472 	fh = tp->ops->get(tp, t->tcm_handle);
2473 
2474 	if (!fh) {
2475 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2476 		err = -ENOENT;
2477 	} else {
2478 		bool last;
2479 
2480 		err = tfilter_del_notify(net, skb, n, tp, block,
2481 					 q, parent, fh, false, &last,
2482 					 rtnl_held, extack);
2483 
2484 		if (err)
2485 			goto errout;
2486 		if (last)
2487 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2488 	}
2489 
2490 errout:
2491 	if (chain) {
2492 		if (tp && !IS_ERR(tp))
2493 			tcf_proto_put(tp, rtnl_held, NULL);
2494 		tcf_chain_put(chain);
2495 	}
2496 	tcf_block_release(q, block, rtnl_held);
2497 
2498 	if (rtnl_held)
2499 		rtnl_unlock();
2500 
2501 	return err;
2502 
2503 errout_locked:
2504 	mutex_unlock(&chain->filter_chain_lock);
2505 	goto errout;
2506 }
2507 
2508 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2509 			  struct netlink_ext_ack *extack)
2510 {
2511 	struct net *net = sock_net(skb->sk);
2512 	struct nlattr *tca[TCA_MAX + 1];
2513 	char name[IFNAMSIZ];
2514 	struct tcmsg *t;
2515 	u32 protocol;
2516 	u32 prio;
2517 	u32 parent;
2518 	u32 chain_index;
2519 	struct Qdisc *q = NULL;
2520 	struct tcf_chain_info chain_info;
2521 	struct tcf_chain *chain = NULL;
2522 	struct tcf_block *block = NULL;
2523 	struct tcf_proto *tp = NULL;
2524 	unsigned long cl = 0;
2525 	void *fh = NULL;
2526 	int err;
2527 	bool rtnl_held = false;
2528 
2529 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2530 				     rtm_tca_policy, extack);
2531 	if (err < 0)
2532 		return err;
2533 
2534 	t = nlmsg_data(n);
2535 	protocol = TC_H_MIN(t->tcm_info);
2536 	prio = TC_H_MAJ(t->tcm_info);
2537 	parent = t->tcm_parent;
2538 
2539 	if (prio == 0) {
2540 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2541 		return -ENOENT;
2542 	}
2543 
2544 	/* Find head of filter chain. */
2545 
2546 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2547 	if (err)
2548 		return err;
2549 
2550 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2551 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2552 		err = -EINVAL;
2553 		goto errout;
2554 	}
2555 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2556 	 * unlocked, classifier type is not specified, classifier is not
2557 	 * unlocked.
2558 	 */
2559 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2560 	    !tcf_proto_is_unlocked(name)) {
2561 		rtnl_held = true;
2562 		rtnl_lock();
2563 	}
2564 
2565 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2566 	if (err)
2567 		goto errout;
2568 
2569 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2570 				 extack);
2571 	if (IS_ERR(block)) {
2572 		err = PTR_ERR(block);
2573 		goto errout;
2574 	}
2575 
2576 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2577 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2578 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2579 		err = -EINVAL;
2580 		goto errout;
2581 	}
2582 	chain = tcf_chain_get(block, chain_index, false);
2583 	if (!chain) {
2584 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2585 		err = -EINVAL;
2586 		goto errout;
2587 	}
2588 
2589 	mutex_lock(&chain->filter_chain_lock);
2590 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2591 			       prio, false);
2592 	mutex_unlock(&chain->filter_chain_lock);
2593 	if (!tp || IS_ERR(tp)) {
2594 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2595 		err = tp ? PTR_ERR(tp) : -ENOENT;
2596 		goto errout;
2597 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2598 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2599 		err = -EINVAL;
2600 		goto errout;
2601 	}
2602 
2603 	fh = tp->ops->get(tp, t->tcm_handle);
2604 
2605 	if (!fh) {
2606 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2607 		err = -ENOENT;
2608 	} else {
2609 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2610 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2611 		if (err < 0)
2612 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2613 	}
2614 
2615 	tfilter_put(tp, fh);
2616 errout:
2617 	if (chain) {
2618 		if (tp && !IS_ERR(tp))
2619 			tcf_proto_put(tp, rtnl_held, NULL);
2620 		tcf_chain_put(chain);
2621 	}
2622 	tcf_block_release(q, block, rtnl_held);
2623 
2624 	if (rtnl_held)
2625 		rtnl_unlock();
2626 
2627 	return err;
2628 }
2629 
2630 struct tcf_dump_args {
2631 	struct tcf_walker w;
2632 	struct sk_buff *skb;
2633 	struct netlink_callback *cb;
2634 	struct tcf_block *block;
2635 	struct Qdisc *q;
2636 	u32 parent;
2637 	bool terse_dump;
2638 };
2639 
2640 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2641 {
2642 	struct tcf_dump_args *a = (void *)arg;
2643 	struct net *net = sock_net(a->skb->sk);
2644 
2645 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2646 			     n, NETLINK_CB(a->cb->skb).portid,
2647 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2648 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2649 }
2650 
2651 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2652 			   struct sk_buff *skb, struct netlink_callback *cb,
2653 			   long index_start, long *p_index, bool terse)
2654 {
2655 	struct net *net = sock_net(skb->sk);
2656 	struct tcf_block *block = chain->block;
2657 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2658 	struct tcf_proto *tp, *tp_prev;
2659 	struct tcf_dump_args arg;
2660 
2661 	for (tp = __tcf_get_next_proto(chain, NULL);
2662 	     tp;
2663 	     tp_prev = tp,
2664 		     tp = __tcf_get_next_proto(chain, tp),
2665 		     tcf_proto_put(tp_prev, true, NULL),
2666 		     (*p_index)++) {
2667 		if (*p_index < index_start)
2668 			continue;
2669 		if (TC_H_MAJ(tcm->tcm_info) &&
2670 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2671 			continue;
2672 		if (TC_H_MIN(tcm->tcm_info) &&
2673 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2674 			continue;
2675 		if (*p_index > index_start)
2676 			memset(&cb->args[1], 0,
2677 			       sizeof(cb->args) - sizeof(cb->args[0]));
2678 		if (cb->args[1] == 0) {
2679 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2680 					  NETLINK_CB(cb->skb).portid,
2681 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2682 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2683 				goto errout;
2684 			cb->args[1] = 1;
2685 		}
2686 		if (!tp->ops->walk)
2687 			continue;
2688 		arg.w.fn = tcf_node_dump;
2689 		arg.skb = skb;
2690 		arg.cb = cb;
2691 		arg.block = block;
2692 		arg.q = q;
2693 		arg.parent = parent;
2694 		arg.w.stop = 0;
2695 		arg.w.skip = cb->args[1] - 1;
2696 		arg.w.count = 0;
2697 		arg.w.cookie = cb->args[2];
2698 		arg.terse_dump = terse;
2699 		tp->ops->walk(tp, &arg.w, true);
2700 		cb->args[2] = arg.w.cookie;
2701 		cb->args[1] = arg.w.count + 1;
2702 		if (arg.w.stop)
2703 			goto errout;
2704 	}
2705 	return true;
2706 
2707 errout:
2708 	tcf_proto_put(tp, true, NULL);
2709 	return false;
2710 }
2711 
2712 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2713 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2714 };
2715 
2716 /* called with RTNL */
2717 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2718 {
2719 	struct tcf_chain *chain, *chain_prev;
2720 	struct net *net = sock_net(skb->sk);
2721 	struct nlattr *tca[TCA_MAX + 1];
2722 	struct Qdisc *q = NULL;
2723 	struct tcf_block *block;
2724 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2725 	bool terse_dump = false;
2726 	long index_start;
2727 	long index;
2728 	u32 parent;
2729 	int err;
2730 
2731 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2732 		return skb->len;
2733 
2734 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2735 				     tcf_tfilter_dump_policy, cb->extack);
2736 	if (err)
2737 		return err;
2738 
2739 	if (tca[TCA_DUMP_FLAGS]) {
2740 		struct nla_bitfield32 flags =
2741 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2742 
2743 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2744 	}
2745 
2746 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2747 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2748 		if (!block)
2749 			goto out;
2750 		/* If we work with block index, q is NULL and parent value
2751 		 * will never be used in the following code. The check
2752 		 * in tcf_fill_node prevents it. However, compiler does not
2753 		 * see that far, so set parent to zero to silence the warning
2754 		 * about parent being uninitialized.
2755 		 */
2756 		parent = 0;
2757 	} else {
2758 		const struct Qdisc_class_ops *cops;
2759 		struct net_device *dev;
2760 		unsigned long cl = 0;
2761 
2762 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2763 		if (!dev)
2764 			return skb->len;
2765 
2766 		parent = tcm->tcm_parent;
2767 		if (!parent)
2768 			q = rtnl_dereference(dev->qdisc);
2769 		else
2770 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2771 		if (!q)
2772 			goto out;
2773 		cops = q->ops->cl_ops;
2774 		if (!cops)
2775 			goto out;
2776 		if (!cops->tcf_block)
2777 			goto out;
2778 		if (TC_H_MIN(tcm->tcm_parent)) {
2779 			cl = cops->find(q, tcm->tcm_parent);
2780 			if (cl == 0)
2781 				goto out;
2782 		}
2783 		block = cops->tcf_block(q, cl, NULL);
2784 		if (!block)
2785 			goto out;
2786 		parent = block->classid;
2787 		if (tcf_block_shared(block))
2788 			q = NULL;
2789 	}
2790 
2791 	index_start = cb->args[0];
2792 	index = 0;
2793 
2794 	for (chain = __tcf_get_next_chain(block, NULL);
2795 	     chain;
2796 	     chain_prev = chain,
2797 		     chain = __tcf_get_next_chain(block, chain),
2798 		     tcf_chain_put(chain_prev)) {
2799 		if (tca[TCA_CHAIN] &&
2800 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2801 			continue;
2802 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2803 				    index_start, &index, terse_dump)) {
2804 			tcf_chain_put(chain);
2805 			err = -EMSGSIZE;
2806 			break;
2807 		}
2808 	}
2809 
2810 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2811 		tcf_block_refcnt_put(block, true);
2812 	cb->args[0] = index;
2813 
2814 out:
2815 	/* If we did no progress, the error (EMSGSIZE) is real */
2816 	if (skb->len == 0 && err)
2817 		return err;
2818 	return skb->len;
2819 }
2820 
2821 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2822 			      void *tmplt_priv, u32 chain_index,
2823 			      struct net *net, struct sk_buff *skb,
2824 			      struct tcf_block *block,
2825 			      u32 portid, u32 seq, u16 flags, int event,
2826 			      struct netlink_ext_ack *extack)
2827 {
2828 	unsigned char *b = skb_tail_pointer(skb);
2829 	const struct tcf_proto_ops *ops;
2830 	struct nlmsghdr *nlh;
2831 	struct tcmsg *tcm;
2832 	void *priv;
2833 
2834 	ops = tmplt_ops;
2835 	priv = tmplt_priv;
2836 
2837 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2838 	if (!nlh)
2839 		goto out_nlmsg_trim;
2840 	tcm = nlmsg_data(nlh);
2841 	tcm->tcm_family = AF_UNSPEC;
2842 	tcm->tcm__pad1 = 0;
2843 	tcm->tcm__pad2 = 0;
2844 	tcm->tcm_handle = 0;
2845 	if (block->q) {
2846 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2847 		tcm->tcm_parent = block->q->handle;
2848 	} else {
2849 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2850 		tcm->tcm_block_index = block->index;
2851 	}
2852 
2853 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2854 		goto nla_put_failure;
2855 
2856 	if (ops) {
2857 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2858 			goto nla_put_failure;
2859 		if (ops->tmplt_dump(skb, net, priv) < 0)
2860 			goto nla_put_failure;
2861 	}
2862 
2863 	if (extack && extack->_msg &&
2864 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2865 		goto out_nlmsg_trim;
2866 
2867 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2868 
2869 	return skb->len;
2870 
2871 out_nlmsg_trim:
2872 nla_put_failure:
2873 	nlmsg_trim(skb, b);
2874 	return -EMSGSIZE;
2875 }
2876 
2877 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2878 			   u32 seq, u16 flags, int event, bool unicast,
2879 			   struct netlink_ext_ack *extack)
2880 {
2881 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2882 	struct tcf_block *block = chain->block;
2883 	struct net *net = block->net;
2884 	struct sk_buff *skb;
2885 	int err = 0;
2886 
2887 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2888 	if (!skb)
2889 		return -ENOBUFS;
2890 
2891 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2892 			       chain->index, net, skb, block, portid,
2893 			       seq, flags, event, extack) <= 0) {
2894 		kfree_skb(skb);
2895 		return -EINVAL;
2896 	}
2897 
2898 	if (unicast)
2899 		err = rtnl_unicast(skb, net, portid);
2900 	else
2901 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2902 				     flags & NLM_F_ECHO);
2903 
2904 	return err;
2905 }
2906 
2907 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2908 				  void *tmplt_priv, u32 chain_index,
2909 				  struct tcf_block *block, struct sk_buff *oskb,
2910 				  u32 seq, u16 flags, bool unicast)
2911 {
2912 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2913 	struct net *net = block->net;
2914 	struct sk_buff *skb;
2915 
2916 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2917 	if (!skb)
2918 		return -ENOBUFS;
2919 
2920 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2921 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2922 		kfree_skb(skb);
2923 		return -EINVAL;
2924 	}
2925 
2926 	if (unicast)
2927 		return rtnl_unicast(skb, net, portid);
2928 
2929 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2930 }
2931 
2932 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2933 			      struct nlattr **tca,
2934 			      struct netlink_ext_ack *extack)
2935 {
2936 	const struct tcf_proto_ops *ops;
2937 	char name[IFNAMSIZ];
2938 	void *tmplt_priv;
2939 
2940 	/* If kind is not set, user did not specify template. */
2941 	if (!tca[TCA_KIND])
2942 		return 0;
2943 
2944 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2945 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2946 		return -EINVAL;
2947 	}
2948 
2949 	ops = tcf_proto_lookup_ops(name, true, extack);
2950 	if (IS_ERR(ops))
2951 		return PTR_ERR(ops);
2952 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2953 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2954 		return -EOPNOTSUPP;
2955 	}
2956 
2957 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2958 	if (IS_ERR(tmplt_priv)) {
2959 		module_put(ops->owner);
2960 		return PTR_ERR(tmplt_priv);
2961 	}
2962 	chain->tmplt_ops = ops;
2963 	chain->tmplt_priv = tmplt_priv;
2964 	return 0;
2965 }
2966 
2967 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2968 			       void *tmplt_priv)
2969 {
2970 	/* If template ops are set, no work to do for us. */
2971 	if (!tmplt_ops)
2972 		return;
2973 
2974 	tmplt_ops->tmplt_destroy(tmplt_priv);
2975 	module_put(tmplt_ops->owner);
2976 }
2977 
2978 /* Add/delete/get a chain */
2979 
2980 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2981 			struct netlink_ext_ack *extack)
2982 {
2983 	struct net *net = sock_net(skb->sk);
2984 	struct nlattr *tca[TCA_MAX + 1];
2985 	struct tcmsg *t;
2986 	u32 parent;
2987 	u32 chain_index;
2988 	struct Qdisc *q;
2989 	struct tcf_chain *chain;
2990 	struct tcf_block *block;
2991 	unsigned long cl;
2992 	int err;
2993 
2994 replay:
2995 	q = NULL;
2996 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2997 				     rtm_tca_policy, extack);
2998 	if (err < 0)
2999 		return err;
3000 
3001 	t = nlmsg_data(n);
3002 	parent = t->tcm_parent;
3003 	cl = 0;
3004 
3005 	block = tcf_block_find(net, &q, &parent, &cl,
3006 			       t->tcm_ifindex, t->tcm_block_index, extack);
3007 	if (IS_ERR(block))
3008 		return PTR_ERR(block);
3009 
3010 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3011 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3012 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3013 		err = -EINVAL;
3014 		goto errout_block;
3015 	}
3016 
3017 	mutex_lock(&block->lock);
3018 	chain = tcf_chain_lookup(block, chain_index);
3019 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3020 		if (chain) {
3021 			if (tcf_chain_held_by_acts_only(chain)) {
3022 				/* The chain exists only because there is
3023 				 * some action referencing it.
3024 				 */
3025 				tcf_chain_hold(chain);
3026 			} else {
3027 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3028 				err = -EEXIST;
3029 				goto errout_block_locked;
3030 			}
3031 		} else {
3032 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3033 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3034 				err = -ENOENT;
3035 				goto errout_block_locked;
3036 			}
3037 			chain = tcf_chain_create(block, chain_index);
3038 			if (!chain) {
3039 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3040 				err = -ENOMEM;
3041 				goto errout_block_locked;
3042 			}
3043 		}
3044 	} else {
3045 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3046 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3047 			err = -EINVAL;
3048 			goto errout_block_locked;
3049 		}
3050 		tcf_chain_hold(chain);
3051 	}
3052 
3053 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3054 		/* Modifying chain requires holding parent block lock. In case
3055 		 * the chain was successfully added, take a reference to the
3056 		 * chain. This ensures that an empty chain does not disappear at
3057 		 * the end of this function.
3058 		 */
3059 		tcf_chain_hold(chain);
3060 		chain->explicitly_created = true;
3061 	}
3062 	mutex_unlock(&block->lock);
3063 
3064 	switch (n->nlmsg_type) {
3065 	case RTM_NEWCHAIN:
3066 		err = tc_chain_tmplt_add(chain, net, tca, extack);
3067 		if (err) {
3068 			tcf_chain_put_explicitly_created(chain);
3069 			goto errout;
3070 		}
3071 
3072 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3073 				RTM_NEWCHAIN, false, extack);
3074 		break;
3075 	case RTM_DELCHAIN:
3076 		tfilter_notify_chain(net, skb, block, q, parent, n,
3077 				     chain, RTM_DELTFILTER, extack);
3078 		/* Flush the chain first as the user requested chain removal. */
3079 		tcf_chain_flush(chain, true);
3080 		/* In case the chain was successfully deleted, put a reference
3081 		 * to the chain previously taken during addition.
3082 		 */
3083 		tcf_chain_put_explicitly_created(chain);
3084 		break;
3085 	case RTM_GETCHAIN:
3086 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3087 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3088 		if (err < 0)
3089 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3090 		break;
3091 	default:
3092 		err = -EOPNOTSUPP;
3093 		NL_SET_ERR_MSG(extack, "Unsupported message type");
3094 		goto errout;
3095 	}
3096 
3097 errout:
3098 	tcf_chain_put(chain);
3099 errout_block:
3100 	tcf_block_release(q, block, true);
3101 	if (err == -EAGAIN)
3102 		/* Replay the request. */
3103 		goto replay;
3104 	return err;
3105 
3106 errout_block_locked:
3107 	mutex_unlock(&block->lock);
3108 	goto errout_block;
3109 }
3110 
3111 /* called with RTNL */
3112 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3113 {
3114 	struct net *net = sock_net(skb->sk);
3115 	struct nlattr *tca[TCA_MAX + 1];
3116 	struct Qdisc *q = NULL;
3117 	struct tcf_block *block;
3118 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3119 	struct tcf_chain *chain;
3120 	long index_start;
3121 	long index;
3122 	int err;
3123 
3124 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3125 		return skb->len;
3126 
3127 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3128 				     rtm_tca_policy, cb->extack);
3129 	if (err)
3130 		return err;
3131 
3132 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3133 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3134 		if (!block)
3135 			goto out;
3136 	} else {
3137 		const struct Qdisc_class_ops *cops;
3138 		struct net_device *dev;
3139 		unsigned long cl = 0;
3140 
3141 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3142 		if (!dev)
3143 			return skb->len;
3144 
3145 		if (!tcm->tcm_parent)
3146 			q = rtnl_dereference(dev->qdisc);
3147 		else
3148 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3149 
3150 		if (!q)
3151 			goto out;
3152 		cops = q->ops->cl_ops;
3153 		if (!cops)
3154 			goto out;
3155 		if (!cops->tcf_block)
3156 			goto out;
3157 		if (TC_H_MIN(tcm->tcm_parent)) {
3158 			cl = cops->find(q, tcm->tcm_parent);
3159 			if (cl == 0)
3160 				goto out;
3161 		}
3162 		block = cops->tcf_block(q, cl, NULL);
3163 		if (!block)
3164 			goto out;
3165 		if (tcf_block_shared(block))
3166 			q = NULL;
3167 	}
3168 
3169 	index_start = cb->args[0];
3170 	index = 0;
3171 
3172 	mutex_lock(&block->lock);
3173 	list_for_each_entry(chain, &block->chain_list, list) {
3174 		if ((tca[TCA_CHAIN] &&
3175 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3176 			continue;
3177 		if (index < index_start) {
3178 			index++;
3179 			continue;
3180 		}
3181 		if (tcf_chain_held_by_acts_only(chain))
3182 			continue;
3183 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3184 					 chain->index, net, skb, block,
3185 					 NETLINK_CB(cb->skb).portid,
3186 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3187 					 RTM_NEWCHAIN, NULL);
3188 		if (err <= 0)
3189 			break;
3190 		index++;
3191 	}
3192 	mutex_unlock(&block->lock);
3193 
3194 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3195 		tcf_block_refcnt_put(block, true);
3196 	cb->args[0] = index;
3197 
3198 out:
3199 	/* If we did no progress, the error (EMSGSIZE) is real */
3200 	if (skb->len == 0 && err)
3201 		return err;
3202 	return skb->len;
3203 }
3204 
3205 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3206 		     int police, struct tcf_proto *tp, u32 handle,
3207 		     bool use_action_miss)
3208 {
3209 	int err = 0;
3210 
3211 #ifdef CONFIG_NET_CLS_ACT
3212 	exts->type = 0;
3213 	exts->nr_actions = 0;
3214 	/* Note: we do not own yet a reference on net.
3215 	 * This reference might be taken later from tcf_exts_get_net().
3216 	 */
3217 	exts->net = net;
3218 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3219 				GFP_KERNEL);
3220 	if (!exts->actions)
3221 		return -ENOMEM;
3222 #endif
3223 
3224 	exts->action = action;
3225 	exts->police = police;
3226 
3227 	if (!use_action_miss)
3228 		return 0;
3229 
3230 	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3231 	if (err)
3232 		goto err_miss_alloc;
3233 
3234 	return 0;
3235 
3236 err_miss_alloc:
3237 	tcf_exts_destroy(exts);
3238 	return err;
3239 }
3240 EXPORT_SYMBOL(tcf_exts_init_ex);
3241 
3242 void tcf_exts_destroy(struct tcf_exts *exts)
3243 {
3244 	tcf_exts_miss_cookie_base_destroy(exts);
3245 
3246 #ifdef CONFIG_NET_CLS_ACT
3247 	if (exts->actions) {
3248 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3249 		kfree(exts->actions);
3250 	}
3251 	exts->nr_actions = 0;
3252 #endif
3253 }
3254 EXPORT_SYMBOL(tcf_exts_destroy);
3255 
3256 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3257 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3258 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3259 {
3260 #ifdef CONFIG_NET_CLS_ACT
3261 	{
3262 		int init_res[TCA_ACT_MAX_PRIO] = {};
3263 		struct tc_action *act;
3264 		size_t attr_size = 0;
3265 
3266 		if (exts->police && tb[exts->police]) {
3267 			struct tc_action_ops *a_o;
3268 
3269 			a_o = tc_action_load_ops(tb[exts->police], true,
3270 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3271 						 extack);
3272 			if (IS_ERR(a_o))
3273 				return PTR_ERR(a_o);
3274 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3275 			act = tcf_action_init_1(net, tp, tb[exts->police],
3276 						rate_tlv, a_o, init_res, flags,
3277 						extack);
3278 			module_put(a_o->owner);
3279 			if (IS_ERR(act))
3280 				return PTR_ERR(act);
3281 
3282 			act->type = exts->type = TCA_OLD_COMPAT;
3283 			exts->actions[0] = act;
3284 			exts->nr_actions = 1;
3285 			tcf_idr_insert_many(exts->actions);
3286 		} else if (exts->action && tb[exts->action]) {
3287 			int err;
3288 
3289 			flags |= TCA_ACT_FLAGS_BIND;
3290 			err = tcf_action_init(net, tp, tb[exts->action],
3291 					      rate_tlv, exts->actions, init_res,
3292 					      &attr_size, flags, fl_flags,
3293 					      extack);
3294 			if (err < 0)
3295 				return err;
3296 			exts->nr_actions = err;
3297 		}
3298 	}
3299 #else
3300 	if ((exts->action && tb[exts->action]) ||
3301 	    (exts->police && tb[exts->police])) {
3302 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3303 		return -EOPNOTSUPP;
3304 	}
3305 #endif
3306 
3307 	return 0;
3308 }
3309 EXPORT_SYMBOL(tcf_exts_validate_ex);
3310 
3311 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3312 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3313 		      u32 flags, struct netlink_ext_ack *extack)
3314 {
3315 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3316 				    flags, 0, extack);
3317 }
3318 EXPORT_SYMBOL(tcf_exts_validate);
3319 
3320 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3321 {
3322 #ifdef CONFIG_NET_CLS_ACT
3323 	struct tcf_exts old = *dst;
3324 
3325 	*dst = *src;
3326 	tcf_exts_destroy(&old);
3327 #endif
3328 }
3329 EXPORT_SYMBOL(tcf_exts_change);
3330 
3331 #ifdef CONFIG_NET_CLS_ACT
3332 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3333 {
3334 	if (exts->nr_actions == 0)
3335 		return NULL;
3336 	else
3337 		return exts->actions[0];
3338 }
3339 #endif
3340 
3341 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3342 {
3343 #ifdef CONFIG_NET_CLS_ACT
3344 	struct nlattr *nest;
3345 
3346 	if (exts->action && tcf_exts_has_actions(exts)) {
3347 		/*
3348 		 * again for backward compatible mode - we want
3349 		 * to work with both old and new modes of entering
3350 		 * tc data even if iproute2  was newer - jhs
3351 		 */
3352 		if (exts->type != TCA_OLD_COMPAT) {
3353 			nest = nla_nest_start_noflag(skb, exts->action);
3354 			if (nest == NULL)
3355 				goto nla_put_failure;
3356 
3357 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3358 			    < 0)
3359 				goto nla_put_failure;
3360 			nla_nest_end(skb, nest);
3361 		} else if (exts->police) {
3362 			struct tc_action *act = tcf_exts_first_act(exts);
3363 			nest = nla_nest_start_noflag(skb, exts->police);
3364 			if (nest == NULL || !act)
3365 				goto nla_put_failure;
3366 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3367 				goto nla_put_failure;
3368 			nla_nest_end(skb, nest);
3369 		}
3370 	}
3371 	return 0;
3372 
3373 nla_put_failure:
3374 	nla_nest_cancel(skb, nest);
3375 	return -1;
3376 #else
3377 	return 0;
3378 #endif
3379 }
3380 EXPORT_SYMBOL(tcf_exts_dump);
3381 
3382 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3383 {
3384 #ifdef CONFIG_NET_CLS_ACT
3385 	struct nlattr *nest;
3386 
3387 	if (!exts->action || !tcf_exts_has_actions(exts))
3388 		return 0;
3389 
3390 	nest = nla_nest_start_noflag(skb, exts->action);
3391 	if (!nest)
3392 		goto nla_put_failure;
3393 
3394 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3395 		goto nla_put_failure;
3396 	nla_nest_end(skb, nest);
3397 	return 0;
3398 
3399 nla_put_failure:
3400 	nla_nest_cancel(skb, nest);
3401 	return -1;
3402 #else
3403 	return 0;
3404 #endif
3405 }
3406 EXPORT_SYMBOL(tcf_exts_terse_dump);
3407 
3408 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3409 {
3410 #ifdef CONFIG_NET_CLS_ACT
3411 	struct tc_action *a = tcf_exts_first_act(exts);
3412 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3413 		return -1;
3414 #endif
3415 	return 0;
3416 }
3417 EXPORT_SYMBOL(tcf_exts_dump_stats);
3418 
3419 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3420 {
3421 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3422 		return;
3423 	*flags |= TCA_CLS_FLAGS_IN_HW;
3424 	atomic_inc(&block->offloadcnt);
3425 }
3426 
3427 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3428 {
3429 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3430 		return;
3431 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3432 	atomic_dec(&block->offloadcnt);
3433 }
3434 
3435 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3436 				      struct tcf_proto *tp, u32 *cnt,
3437 				      u32 *flags, u32 diff, bool add)
3438 {
3439 	lockdep_assert_held(&block->cb_lock);
3440 
3441 	spin_lock(&tp->lock);
3442 	if (add) {
3443 		if (!*cnt)
3444 			tcf_block_offload_inc(block, flags);
3445 		*cnt += diff;
3446 	} else {
3447 		*cnt -= diff;
3448 		if (!*cnt)
3449 			tcf_block_offload_dec(block, flags);
3450 	}
3451 	spin_unlock(&tp->lock);
3452 }
3453 
3454 static void
3455 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3456 			 u32 *cnt, u32 *flags)
3457 {
3458 	lockdep_assert_held(&block->cb_lock);
3459 
3460 	spin_lock(&tp->lock);
3461 	tcf_block_offload_dec(block, flags);
3462 	*cnt = 0;
3463 	spin_unlock(&tp->lock);
3464 }
3465 
3466 static int
3467 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3468 		   void *type_data, bool err_stop)
3469 {
3470 	struct flow_block_cb *block_cb;
3471 	int ok_count = 0;
3472 	int err;
3473 
3474 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3475 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3476 		if (err) {
3477 			if (err_stop)
3478 				return err;
3479 		} else {
3480 			ok_count++;
3481 		}
3482 	}
3483 	return ok_count;
3484 }
3485 
3486 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3487 		     void *type_data, bool err_stop, bool rtnl_held)
3488 {
3489 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3490 	int ok_count;
3491 
3492 retry:
3493 	if (take_rtnl)
3494 		rtnl_lock();
3495 	down_read(&block->cb_lock);
3496 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3497 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3498 	 * obtain the locks in same order here.
3499 	 */
3500 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3501 		up_read(&block->cb_lock);
3502 		take_rtnl = true;
3503 		goto retry;
3504 	}
3505 
3506 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3507 
3508 	up_read(&block->cb_lock);
3509 	if (take_rtnl)
3510 		rtnl_unlock();
3511 	return ok_count;
3512 }
3513 EXPORT_SYMBOL(tc_setup_cb_call);
3514 
3515 /* Non-destructive filter add. If filter that wasn't already in hardware is
3516  * successfully offloaded, increment block offloads counter. On failure,
3517  * previously offloaded filter is considered to be intact and offloads counter
3518  * is not decremented.
3519  */
3520 
3521 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3522 		    enum tc_setup_type type, void *type_data, bool err_stop,
3523 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3524 {
3525 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3526 	int ok_count;
3527 
3528 retry:
3529 	if (take_rtnl)
3530 		rtnl_lock();
3531 	down_read(&block->cb_lock);
3532 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3533 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3534 	 * obtain the locks in same order here.
3535 	 */
3536 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3537 		up_read(&block->cb_lock);
3538 		take_rtnl = true;
3539 		goto retry;
3540 	}
3541 
3542 	/* Make sure all netdevs sharing this block are offload-capable. */
3543 	if (block->nooffloaddevcnt && err_stop) {
3544 		ok_count = -EOPNOTSUPP;
3545 		goto err_unlock;
3546 	}
3547 
3548 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3549 	if (ok_count < 0)
3550 		goto err_unlock;
3551 
3552 	if (tp->ops->hw_add)
3553 		tp->ops->hw_add(tp, type_data);
3554 	if (ok_count > 0)
3555 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3556 					  ok_count, true);
3557 err_unlock:
3558 	up_read(&block->cb_lock);
3559 	if (take_rtnl)
3560 		rtnl_unlock();
3561 	return min(ok_count, 0);
3562 }
3563 EXPORT_SYMBOL(tc_setup_cb_add);
3564 
3565 /* Destructive filter replace. If filter that wasn't already in hardware is
3566  * successfully offloaded, increment block offload counter. On failure,
3567  * previously offloaded filter is considered to be destroyed and offload counter
3568  * is decremented.
3569  */
3570 
3571 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3572 			enum tc_setup_type type, void *type_data, bool err_stop,
3573 			u32 *old_flags, unsigned int *old_in_hw_count,
3574 			u32 *new_flags, unsigned int *new_in_hw_count,
3575 			bool rtnl_held)
3576 {
3577 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3578 	int ok_count;
3579 
3580 retry:
3581 	if (take_rtnl)
3582 		rtnl_lock();
3583 	down_read(&block->cb_lock);
3584 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3585 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3586 	 * obtain the locks in same order here.
3587 	 */
3588 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3589 		up_read(&block->cb_lock);
3590 		take_rtnl = true;
3591 		goto retry;
3592 	}
3593 
3594 	/* Make sure all netdevs sharing this block are offload-capable. */
3595 	if (block->nooffloaddevcnt && err_stop) {
3596 		ok_count = -EOPNOTSUPP;
3597 		goto err_unlock;
3598 	}
3599 
3600 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3601 	if (tp->ops->hw_del)
3602 		tp->ops->hw_del(tp, type_data);
3603 
3604 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3605 	if (ok_count < 0)
3606 		goto err_unlock;
3607 
3608 	if (tp->ops->hw_add)
3609 		tp->ops->hw_add(tp, type_data);
3610 	if (ok_count > 0)
3611 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3612 					  new_flags, ok_count, true);
3613 err_unlock:
3614 	up_read(&block->cb_lock);
3615 	if (take_rtnl)
3616 		rtnl_unlock();
3617 	return min(ok_count, 0);
3618 }
3619 EXPORT_SYMBOL(tc_setup_cb_replace);
3620 
3621 /* Destroy filter and decrement block offload counter, if filter was previously
3622  * offloaded.
3623  */
3624 
3625 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3626 			enum tc_setup_type type, void *type_data, bool err_stop,
3627 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3628 {
3629 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3630 	int ok_count;
3631 
3632 retry:
3633 	if (take_rtnl)
3634 		rtnl_lock();
3635 	down_read(&block->cb_lock);
3636 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3637 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3638 	 * obtain the locks in same order here.
3639 	 */
3640 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3641 		up_read(&block->cb_lock);
3642 		take_rtnl = true;
3643 		goto retry;
3644 	}
3645 
3646 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3647 
3648 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3649 	if (tp->ops->hw_del)
3650 		tp->ops->hw_del(tp, type_data);
3651 
3652 	up_read(&block->cb_lock);
3653 	if (take_rtnl)
3654 		rtnl_unlock();
3655 	return min(ok_count, 0);
3656 }
3657 EXPORT_SYMBOL(tc_setup_cb_destroy);
3658 
3659 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3660 			  bool add, flow_setup_cb_t *cb,
3661 			  enum tc_setup_type type, void *type_data,
3662 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3663 {
3664 	int err = cb(type, type_data, cb_priv);
3665 
3666 	if (err) {
3667 		if (add && tc_skip_sw(*flags))
3668 			return err;
3669 	} else {
3670 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3671 					  add);
3672 	}
3673 
3674 	return 0;
3675 }
3676 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3677 
3678 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3679 				   const struct tc_action *act)
3680 {
3681 	struct tc_cookie *user_cookie;
3682 	int err = 0;
3683 
3684 	rcu_read_lock();
3685 	user_cookie = rcu_dereference(act->user_cookie);
3686 	if (user_cookie) {
3687 		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3688 							       user_cookie->len,
3689 							       GFP_ATOMIC);
3690 		if (!entry->user_cookie)
3691 			err = -ENOMEM;
3692 	}
3693 	rcu_read_unlock();
3694 	return err;
3695 }
3696 
3697 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3698 {
3699 	flow_action_cookie_destroy(entry->user_cookie);
3700 }
3701 
3702 void tc_cleanup_offload_action(struct flow_action *flow_action)
3703 {
3704 	struct flow_action_entry *entry;
3705 	int i;
3706 
3707 	flow_action_for_each(i, entry, flow_action) {
3708 		tcf_act_put_user_cookie(entry);
3709 		if (entry->destructor)
3710 			entry->destructor(entry->destructor_priv);
3711 	}
3712 }
3713 EXPORT_SYMBOL(tc_cleanup_offload_action);
3714 
3715 static int tc_setup_offload_act(struct tc_action *act,
3716 				struct flow_action_entry *entry,
3717 				u32 *index_inc,
3718 				struct netlink_ext_ack *extack)
3719 {
3720 #ifdef CONFIG_NET_CLS_ACT
3721 	if (act->ops->offload_act_setup) {
3722 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3723 						   extack);
3724 	} else {
3725 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3726 		return -EOPNOTSUPP;
3727 	}
3728 #else
3729 	return 0;
3730 #endif
3731 }
3732 
3733 int tc_setup_action(struct flow_action *flow_action,
3734 		    struct tc_action *actions[],
3735 		    u32 miss_cookie_base,
3736 		    struct netlink_ext_ack *extack)
3737 {
3738 	int i, j, k, index, err = 0;
3739 	struct tc_action *act;
3740 
3741 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3742 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3743 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3744 
3745 	if (!actions)
3746 		return 0;
3747 
3748 	j = 0;
3749 	tcf_act_for_each_action(i, act, actions) {
3750 		struct flow_action_entry *entry;
3751 
3752 		entry = &flow_action->entries[j];
3753 		spin_lock_bh(&act->tcfa_lock);
3754 		err = tcf_act_get_user_cookie(entry, act);
3755 		if (err)
3756 			goto err_out_locked;
3757 
3758 		index = 0;
3759 		err = tc_setup_offload_act(act, entry, &index, extack);
3760 		if (err)
3761 			goto err_out_locked;
3762 
3763 		for (k = 0; k < index ; k++) {
3764 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3765 			entry[k].hw_index = act->tcfa_index;
3766 			entry[k].cookie = (unsigned long)act;
3767 			entry[k].miss_cookie =
3768 				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3769 		}
3770 
3771 		j += index;
3772 
3773 		spin_unlock_bh(&act->tcfa_lock);
3774 	}
3775 
3776 err_out:
3777 	if (err)
3778 		tc_cleanup_offload_action(flow_action);
3779 
3780 	return err;
3781 err_out_locked:
3782 	spin_unlock_bh(&act->tcfa_lock);
3783 	goto err_out;
3784 }
3785 
3786 int tc_setup_offload_action(struct flow_action *flow_action,
3787 			    const struct tcf_exts *exts,
3788 			    struct netlink_ext_ack *extack)
3789 {
3790 #ifdef CONFIG_NET_CLS_ACT
3791 	u32 miss_cookie_base;
3792 
3793 	if (!exts)
3794 		return 0;
3795 
3796 	miss_cookie_base = exts->miss_cookie_node ?
3797 			   exts->miss_cookie_node->miss_cookie_base : 0;
3798 	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3799 			       extack);
3800 #else
3801 	return 0;
3802 #endif
3803 }
3804 EXPORT_SYMBOL(tc_setup_offload_action);
3805 
3806 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3807 {
3808 	unsigned int num_acts = 0;
3809 	struct tc_action *act;
3810 	int i;
3811 
3812 	tcf_exts_for_each_action(i, act, exts) {
3813 		if (is_tcf_pedit(act))
3814 			num_acts += tcf_pedit_nkeys(act);
3815 		else
3816 			num_acts++;
3817 	}
3818 	return num_acts;
3819 }
3820 EXPORT_SYMBOL(tcf_exts_num_actions);
3821 
3822 #ifdef CONFIG_NET_CLS_ACT
3823 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3824 					u32 *p_block_index,
3825 					struct netlink_ext_ack *extack)
3826 {
3827 	*p_block_index = nla_get_u32(block_index_attr);
3828 	if (!*p_block_index) {
3829 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3830 		return -EINVAL;
3831 	}
3832 
3833 	return 0;
3834 }
3835 
3836 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3837 		    enum flow_block_binder_type binder_type,
3838 		    struct nlattr *block_index_attr,
3839 		    struct netlink_ext_ack *extack)
3840 {
3841 	u32 block_index;
3842 	int err;
3843 
3844 	if (!block_index_attr)
3845 		return 0;
3846 
3847 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3848 	if (err)
3849 		return err;
3850 
3851 	qe->info.binder_type = binder_type;
3852 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3853 	qe->info.chain_head_change_priv = &qe->filter_chain;
3854 	qe->info.block_index = block_index;
3855 
3856 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3857 }
3858 EXPORT_SYMBOL(tcf_qevent_init);
3859 
3860 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3861 {
3862 	if (qe->info.block_index)
3863 		tcf_block_put_ext(qe->block, sch, &qe->info);
3864 }
3865 EXPORT_SYMBOL(tcf_qevent_destroy);
3866 
3867 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3868 			       struct netlink_ext_ack *extack)
3869 {
3870 	u32 block_index;
3871 	int err;
3872 
3873 	if (!block_index_attr)
3874 		return 0;
3875 
3876 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3877 	if (err)
3878 		return err;
3879 
3880 	/* Bounce newly-configured block or change in block. */
3881 	if (block_index != qe->info.block_index) {
3882 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3883 		return -EINVAL;
3884 	}
3885 
3886 	return 0;
3887 }
3888 EXPORT_SYMBOL(tcf_qevent_validate_change);
3889 
3890 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3891 				  struct sk_buff **to_free, int *ret)
3892 {
3893 	struct tcf_result cl_res;
3894 	struct tcf_proto *fl;
3895 
3896 	if (!qe->info.block_index)
3897 		return skb;
3898 
3899 	fl = rcu_dereference_bh(qe->filter_chain);
3900 
3901 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3902 	case TC_ACT_SHOT:
3903 		qdisc_qstats_drop(sch);
3904 		__qdisc_drop(skb, to_free);
3905 		*ret = __NET_XMIT_BYPASS;
3906 		return NULL;
3907 	case TC_ACT_STOLEN:
3908 	case TC_ACT_QUEUED:
3909 	case TC_ACT_TRAP:
3910 		__qdisc_drop(skb, to_free);
3911 		*ret = __NET_XMIT_STOLEN;
3912 		return NULL;
3913 	case TC_ACT_REDIRECT:
3914 		skb_do_redirect(skb);
3915 		*ret = __NET_XMIT_STOLEN;
3916 		return NULL;
3917 	}
3918 
3919 	return skb;
3920 }
3921 EXPORT_SYMBOL(tcf_qevent_handle);
3922 
3923 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3924 {
3925 	if (!qe->info.block_index)
3926 		return 0;
3927 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3928 }
3929 EXPORT_SYMBOL(tcf_qevent_dump);
3930 #endif
3931 
3932 static __net_init int tcf_net_init(struct net *net)
3933 {
3934 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3935 
3936 	spin_lock_init(&tn->idr_lock);
3937 	idr_init(&tn->idr);
3938 	return 0;
3939 }
3940 
3941 static void __net_exit tcf_net_exit(struct net *net)
3942 {
3943 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3944 
3945 	idr_destroy(&tn->idr);
3946 }
3947 
3948 static struct pernet_operations tcf_net_ops = {
3949 	.init = tcf_net_init,
3950 	.exit = tcf_net_exit,
3951 	.id   = &tcf_net_id,
3952 	.size = sizeof(struct tcf_net),
3953 };
3954 
3955 static int __init tc_filter_init(void)
3956 {
3957 	int err;
3958 
3959 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3960 	if (!tc_filter_wq)
3961 		return -ENOMEM;
3962 
3963 	err = register_pernet_subsys(&tcf_net_ops);
3964 	if (err)
3965 		goto err_register_pernet_subsys;
3966 
3967 	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3968 
3969 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3970 		      RTNL_FLAG_DOIT_UNLOCKED);
3971 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3972 		      RTNL_FLAG_DOIT_UNLOCKED);
3973 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3974 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3975 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3976 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3977 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3978 		      tc_dump_chain, 0);
3979 
3980 	return 0;
3981 
3982 err_register_pernet_subsys:
3983 	destroy_workqueue(tc_filter_wq);
3984 	return err;
3985 }
3986 
3987 subsys_initcall(tc_filter_init);
3988