xref: /openbmc/linux/net/sched/cls_api.c (revision b3b984dc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 	const struct tcf_chain *chain;
55 	const struct tcf_proto *tp;
56 	const struct tcf_exts *exts;
57 	u32 chain_index;
58 	u32 tp_prio;
59 	u32 handle;
60 	u32 miss_cookie_base;
61 	struct rcu_head rcu;
62 };
63 
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65  * action index in the exts tc actions array.
66  */
67 union tcf_exts_miss_cookie {
68 	struct {
69 		u32 miss_cookie_base;
70 		u32 act_index;
71 	};
72 	u64 miss_cookie;
73 };
74 
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 				u32 handle)
79 {
80 	struct tcf_exts_miss_cookie_node *n;
81 	static u32 next;
82 	int err;
83 
84 	if (WARN_ON(!handle || !tp->ops->get_exts))
85 		return -EINVAL;
86 
87 	n = kzalloc(sizeof(*n), GFP_KERNEL);
88 	if (!n)
89 		return -ENOMEM;
90 
91 	n->chain_index = tp->chain->index;
92 	n->chain = tp->chain;
93 	n->tp_prio = tp->prio;
94 	n->tp = tp;
95 	n->exts = exts;
96 	n->handle = handle;
97 
98 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 			      n, xa_limit_32b, &next, GFP_KERNEL);
100 	if (err)
101 		goto err_xa_alloc;
102 
103 	exts->miss_cookie_node = n;
104 	return 0;
105 
106 err_xa_alloc:
107 	kfree(n);
108 	return err;
109 }
110 
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 	struct tcf_exts_miss_cookie_node *n;
114 
115 	if (!exts->miss_cookie_node)
116 		return;
117 
118 	n = exts->miss_cookie_node;
119 	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 	kfree_rcu(n, rcu);
121 }
122 
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127 
128 	*act_index = mc.act_index;
129 	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 				u32 handle)
135 {
136 	return 0;
137 }
138 
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143 
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147 
148 	if (!miss_cookie_base)
149 		return 0;
150 
151 	mc.miss_cookie_base = miss_cookie_base;
152 	return mc.miss_cookie;
153 }
154 
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158 
159 void tc_skb_ext_tc_enable(void)
160 {
161 	static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164 
165 void tc_skb_ext_tc_disable(void)
166 {
167 	static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171 
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 	return jhash_3words(tp->chain->index, tp->prio,
175 			    (__force __u32)tp->protocol, 0);
176 }
177 
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 					struct tcf_proto *tp)
180 {
181 	struct tcf_block *block = chain->block;
182 
183 	mutex_lock(&block->proto_destroy_lock);
184 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 		     destroy_obj_hashfn(tp));
186 	mutex_unlock(&block->proto_destroy_lock);
187 }
188 
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 			  const struct tcf_proto *tp2)
191 {
192 	return tp1->chain->index == tp2->chain->index &&
193 	       tp1->prio == tp2->prio &&
194 	       tp1->protocol == tp2->protocol;
195 }
196 
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 					struct tcf_proto *tp)
199 {
200 	u32 hash = destroy_obj_hashfn(tp);
201 	struct tcf_proto *iter;
202 	bool found = false;
203 
204 	rcu_read_lock();
205 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 				   destroy_ht_node, hash) {
207 		if (tcf_proto_cmp(tp, iter)) {
208 			found = true;
209 			break;
210 		}
211 	}
212 	rcu_read_unlock();
213 
214 	return found;
215 }
216 
217 static void
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 	struct tcf_block *block = chain->block;
221 
222 	mutex_lock(&block->proto_destroy_lock);
223 	if (hash_hashed(&tp->destroy_ht_node))
224 		hash_del_rcu(&tp->destroy_ht_node);
225 	mutex_unlock(&block->proto_destroy_lock);
226 }
227 
228 /* Find classifier type by string name */
229 
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 	const struct tcf_proto_ops *t, *res = NULL;
233 
234 	if (kind) {
235 		read_lock(&cls_mod_lock);
236 		list_for_each_entry(t, &tcf_proto_base, head) {
237 			if (strcmp(kind, t->kind) == 0) {
238 				if (try_module_get(t->owner))
239 					res = t;
240 				break;
241 			}
242 		}
243 		read_unlock(&cls_mod_lock);
244 	}
245 	return res;
246 }
247 
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 		     struct netlink_ext_ack *extack)
251 {
252 	const struct tcf_proto_ops *ops;
253 
254 	ops = __tcf_proto_lookup_ops(kind);
255 	if (ops)
256 		return ops;
257 #ifdef CONFIG_MODULES
258 	if (rtnl_held)
259 		rtnl_unlock();
260 	request_module("cls_%s", kind);
261 	if (rtnl_held)
262 		rtnl_lock();
263 	ops = __tcf_proto_lookup_ops(kind);
264 	/* We dropped the RTNL semaphore in order to perform
265 	 * the module load. So, even if we succeeded in loading
266 	 * the module we have to replay the request. We indicate
267 	 * this using -EAGAIN.
268 	 */
269 	if (ops) {
270 		module_put(ops->owner);
271 		return ERR_PTR(-EAGAIN);
272 	}
273 #endif
274 	NL_SET_ERR_MSG(extack, "TC classifier not found");
275 	return ERR_PTR(-ENOENT);
276 }
277 
278 /* Register(unregister) new classifier type */
279 
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 	struct tcf_proto_ops *t;
283 	int rc = -EEXIST;
284 
285 	write_lock(&cls_mod_lock);
286 	list_for_each_entry(t, &tcf_proto_base, head)
287 		if (!strcmp(ops->kind, t->kind))
288 			goto out;
289 
290 	list_add_tail(&ops->head, &tcf_proto_base);
291 	rc = 0;
292 out:
293 	write_unlock(&cls_mod_lock);
294 	return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297 
298 static struct workqueue_struct *tc_filter_wq;
299 
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 	struct tcf_proto_ops *t;
303 	int rc = -ENOENT;
304 
305 	/* Wait for outstanding call_rcu()s, if any, from a
306 	 * tcf_proto_ops's destroy() handler.
307 	 */
308 	rcu_barrier();
309 	flush_workqueue(tc_filter_wq);
310 
311 	write_lock(&cls_mod_lock);
312 	list_for_each_entry(t, &tcf_proto_base, head) {
313 		if (t == ops) {
314 			list_del(&t->head);
315 			rc = 0;
316 			break;
317 		}
318 	}
319 	write_unlock(&cls_mod_lock);
320 
321 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324 
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 	INIT_RCU_WORK(rwork, func);
328 	return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331 
332 /* Select new prio value from the range, managed by kernel. */
333 
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
337 
338 	if (tp)
339 		first = tp->prio - 1;
340 
341 	return TC_H_MAJ(first);
342 }
343 
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 	if (kind)
347 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 	memset(name, 0, IFNAMSIZ);
349 	return false;
350 }
351 
352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 	const struct tcf_proto_ops *ops;
355 	bool ret;
356 
357 	if (strlen(kind) == 0)
358 		return false;
359 
360 	ops = tcf_proto_lookup_ops(kind, false, NULL);
361 	/* On error return false to take rtnl lock. Proto lookup/create
362 	 * functions will perform lookup again and properly handle errors.
363 	 */
364 	if (IS_ERR(ops))
365 		return false;
366 
367 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 	module_put(ops->owner);
369 	return ret;
370 }
371 
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 					  u32 prio, struct tcf_chain *chain,
374 					  bool rtnl_held,
375 					  struct netlink_ext_ack *extack)
376 {
377 	struct tcf_proto *tp;
378 	int err;
379 
380 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 	if (!tp)
382 		return ERR_PTR(-ENOBUFS);
383 
384 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 	if (IS_ERR(tp->ops)) {
386 		err = PTR_ERR(tp->ops);
387 		goto errout;
388 	}
389 	tp->classify = tp->ops->classify;
390 	tp->protocol = protocol;
391 	tp->prio = prio;
392 	tp->chain = chain;
393 	spin_lock_init(&tp->lock);
394 	refcount_set(&tp->refcnt, 1);
395 
396 	err = tp->ops->init(tp);
397 	if (err) {
398 		module_put(tp->ops->owner);
399 		goto errout;
400 	}
401 	return tp;
402 
403 errout:
404 	kfree(tp);
405 	return ERR_PTR(err);
406 }
407 
408 static void tcf_proto_get(struct tcf_proto *tp)
409 {
410 	refcount_inc(&tp->refcnt);
411 }
412 
413 static void tcf_chain_put(struct tcf_chain *chain);
414 
415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416 			      bool sig_destroy, struct netlink_ext_ack *extack)
417 {
418 	tp->ops->destroy(tp, rtnl_held, extack);
419 	if (sig_destroy)
420 		tcf_proto_signal_destroyed(tp->chain, tp);
421 	tcf_chain_put(tp->chain);
422 	module_put(tp->ops->owner);
423 	kfree_rcu(tp, rcu);
424 }
425 
426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427 			  struct netlink_ext_ack *extack)
428 {
429 	if (refcount_dec_and_test(&tp->refcnt))
430 		tcf_proto_destroy(tp, rtnl_held, true, extack);
431 }
432 
433 static bool tcf_proto_check_delete(struct tcf_proto *tp)
434 {
435 	if (tp->ops->delete_empty)
436 		return tp->ops->delete_empty(tp);
437 
438 	tp->deleting = true;
439 	return tp->deleting;
440 }
441 
442 static void tcf_proto_mark_delete(struct tcf_proto *tp)
443 {
444 	spin_lock(&tp->lock);
445 	tp->deleting = true;
446 	spin_unlock(&tp->lock);
447 }
448 
449 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
450 {
451 	bool deleting;
452 
453 	spin_lock(&tp->lock);
454 	deleting = tp->deleting;
455 	spin_unlock(&tp->lock);
456 
457 	return deleting;
458 }
459 
460 #define ASSERT_BLOCK_LOCKED(block)					\
461 	lockdep_assert_held(&(block)->lock)
462 
463 struct tcf_filter_chain_list_item {
464 	struct list_head list;
465 	tcf_chain_head_change_t *chain_head_change;
466 	void *chain_head_change_priv;
467 };
468 
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
470 					  u32 chain_index)
471 {
472 	struct tcf_chain *chain;
473 
474 	ASSERT_BLOCK_LOCKED(block);
475 
476 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
477 	if (!chain)
478 		return NULL;
479 	list_add_tail_rcu(&chain->list, &block->chain_list);
480 	mutex_init(&chain->filter_chain_lock);
481 	chain->block = block;
482 	chain->index = chain_index;
483 	chain->refcnt = 1;
484 	if (!chain->index)
485 		block->chain0.chain = chain;
486 	return chain;
487 }
488 
489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490 				       struct tcf_proto *tp_head)
491 {
492 	if (item->chain_head_change)
493 		item->chain_head_change(tp_head, item->chain_head_change_priv);
494 }
495 
496 static void tcf_chain0_head_change(struct tcf_chain *chain,
497 				   struct tcf_proto *tp_head)
498 {
499 	struct tcf_filter_chain_list_item *item;
500 	struct tcf_block *block = chain->block;
501 
502 	if (chain->index)
503 		return;
504 
505 	mutex_lock(&block->lock);
506 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507 		tcf_chain_head_change_item(item, tp_head);
508 	mutex_unlock(&block->lock);
509 }
510 
511 /* Returns true if block can be safely freed. */
512 
513 static bool tcf_chain_detach(struct tcf_chain *chain)
514 {
515 	struct tcf_block *block = chain->block;
516 
517 	ASSERT_BLOCK_LOCKED(block);
518 
519 	list_del_rcu(&chain->list);
520 	if (!chain->index)
521 		block->chain0.chain = NULL;
522 
523 	if (list_empty(&block->chain_list) &&
524 	    refcount_read(&block->refcnt) == 0)
525 		return true;
526 
527 	return false;
528 }
529 
530 static void tcf_block_destroy(struct tcf_block *block)
531 {
532 	mutex_destroy(&block->lock);
533 	mutex_destroy(&block->proto_destroy_lock);
534 	kfree_rcu(block, rcu);
535 }
536 
537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
538 {
539 	struct tcf_block *block = chain->block;
540 
541 	mutex_destroy(&chain->filter_chain_lock);
542 	kfree_rcu(chain, rcu);
543 	if (free_block)
544 		tcf_block_destroy(block);
545 }
546 
547 static void tcf_chain_hold(struct tcf_chain *chain)
548 {
549 	ASSERT_BLOCK_LOCKED(chain->block);
550 
551 	++chain->refcnt;
552 }
553 
554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
555 {
556 	ASSERT_BLOCK_LOCKED(chain->block);
557 
558 	/* In case all the references are action references, this
559 	 * chain should not be shown to the user.
560 	 */
561 	return chain->refcnt == chain->action_refcnt;
562 }
563 
564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
565 					  u32 chain_index)
566 {
567 	struct tcf_chain *chain;
568 
569 	ASSERT_BLOCK_LOCKED(block);
570 
571 	list_for_each_entry(chain, &block->chain_list, list) {
572 		if (chain->index == chain_index)
573 			return chain;
574 	}
575 	return NULL;
576 }
577 
578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
580 					      u32 chain_index)
581 {
582 	struct tcf_chain *chain;
583 
584 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
585 		if (chain->index == chain_index)
586 			return chain;
587 	}
588 	return NULL;
589 }
590 #endif
591 
592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
593 			   u32 seq, u16 flags, int event, bool unicast,
594 			   struct netlink_ext_ack *extack);
595 
596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
597 					 u32 chain_index, bool create,
598 					 bool by_act)
599 {
600 	struct tcf_chain *chain = NULL;
601 	bool is_first_reference;
602 
603 	mutex_lock(&block->lock);
604 	chain = tcf_chain_lookup(block, chain_index);
605 	if (chain) {
606 		tcf_chain_hold(chain);
607 	} else {
608 		if (!create)
609 			goto errout;
610 		chain = tcf_chain_create(block, chain_index);
611 		if (!chain)
612 			goto errout;
613 	}
614 
615 	if (by_act)
616 		++chain->action_refcnt;
617 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
618 	mutex_unlock(&block->lock);
619 
620 	/* Send notification only in case we got the first
621 	 * non-action reference. Until then, the chain acts only as
622 	 * a placeholder for actions pointing to it and user ought
623 	 * not know about them.
624 	 */
625 	if (is_first_reference && !by_act)
626 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
627 				RTM_NEWCHAIN, false, NULL);
628 
629 	return chain;
630 
631 errout:
632 	mutex_unlock(&block->lock);
633 	return chain;
634 }
635 
636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
637 				       bool create)
638 {
639 	return __tcf_chain_get(block, chain_index, create, false);
640 }
641 
642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
643 {
644 	return __tcf_chain_get(block, chain_index, true, true);
645 }
646 EXPORT_SYMBOL(tcf_chain_get_by_act);
647 
648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
649 			       void *tmplt_priv);
650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
651 				  void *tmplt_priv, u32 chain_index,
652 				  struct tcf_block *block, struct sk_buff *oskb,
653 				  u32 seq, u16 flags, bool unicast);
654 
655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
656 			    bool explicitly_created)
657 {
658 	struct tcf_block *block = chain->block;
659 	const struct tcf_proto_ops *tmplt_ops;
660 	bool free_block = false;
661 	unsigned int refcnt;
662 	void *tmplt_priv;
663 
664 	mutex_lock(&block->lock);
665 	if (explicitly_created) {
666 		if (!chain->explicitly_created) {
667 			mutex_unlock(&block->lock);
668 			return;
669 		}
670 		chain->explicitly_created = false;
671 	}
672 
673 	if (by_act)
674 		chain->action_refcnt--;
675 
676 	/* tc_chain_notify_delete can't be called while holding block lock.
677 	 * However, when block is unlocked chain can be changed concurrently, so
678 	 * save these to temporary variables.
679 	 */
680 	refcnt = --chain->refcnt;
681 	tmplt_ops = chain->tmplt_ops;
682 	tmplt_priv = chain->tmplt_priv;
683 
684 	/* The last dropped non-action reference will trigger notification. */
685 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
686 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
687 				       block, NULL, 0, 0, false);
688 		/* Last reference to chain, no need to lock. */
689 		chain->flushing = false;
690 	}
691 
692 	if (refcnt == 0)
693 		free_block = tcf_chain_detach(chain);
694 	mutex_unlock(&block->lock);
695 
696 	if (refcnt == 0) {
697 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
698 		tcf_chain_destroy(chain, free_block);
699 	}
700 }
701 
702 static void tcf_chain_put(struct tcf_chain *chain)
703 {
704 	__tcf_chain_put(chain, false, false);
705 }
706 
707 void tcf_chain_put_by_act(struct tcf_chain *chain)
708 {
709 	__tcf_chain_put(chain, true, false);
710 }
711 EXPORT_SYMBOL(tcf_chain_put_by_act);
712 
713 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
714 {
715 	__tcf_chain_put(chain, false, true);
716 }
717 
718 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
719 {
720 	struct tcf_proto *tp, *tp_next;
721 
722 	mutex_lock(&chain->filter_chain_lock);
723 	tp = tcf_chain_dereference(chain->filter_chain, chain);
724 	while (tp) {
725 		tp_next = rcu_dereference_protected(tp->next, 1);
726 		tcf_proto_signal_destroying(chain, tp);
727 		tp = tp_next;
728 	}
729 	tp = tcf_chain_dereference(chain->filter_chain, chain);
730 	RCU_INIT_POINTER(chain->filter_chain, NULL);
731 	tcf_chain0_head_change(chain, NULL);
732 	chain->flushing = true;
733 	mutex_unlock(&chain->filter_chain_lock);
734 
735 	while (tp) {
736 		tp_next = rcu_dereference_protected(tp->next, 1);
737 		tcf_proto_put(tp, rtnl_held, NULL);
738 		tp = tp_next;
739 	}
740 }
741 
742 static int tcf_block_setup(struct tcf_block *block,
743 			   struct flow_block_offload *bo);
744 
745 static void tcf_block_offload_init(struct flow_block_offload *bo,
746 				   struct net_device *dev, struct Qdisc *sch,
747 				   enum flow_block_command command,
748 				   enum flow_block_binder_type binder_type,
749 				   struct flow_block *flow_block,
750 				   bool shared, struct netlink_ext_ack *extack)
751 {
752 	bo->net = dev_net(dev);
753 	bo->command = command;
754 	bo->binder_type = binder_type;
755 	bo->block = flow_block;
756 	bo->block_shared = shared;
757 	bo->extack = extack;
758 	bo->sch = sch;
759 	bo->cb_list_head = &flow_block->cb_list;
760 	INIT_LIST_HEAD(&bo->cb_list);
761 }
762 
763 static void tcf_block_unbind(struct tcf_block *block,
764 			     struct flow_block_offload *bo);
765 
766 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
767 {
768 	struct tcf_block *block = block_cb->indr.data;
769 	struct net_device *dev = block_cb->indr.dev;
770 	struct Qdisc *sch = block_cb->indr.sch;
771 	struct netlink_ext_ack extack = {};
772 	struct flow_block_offload bo = {};
773 
774 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
775 			       block_cb->indr.binder_type,
776 			       &block->flow_block, tcf_block_shared(block),
777 			       &extack);
778 	rtnl_lock();
779 	down_write(&block->cb_lock);
780 	list_del(&block_cb->driver_list);
781 	list_move(&block_cb->list, &bo.cb_list);
782 	tcf_block_unbind(block, &bo);
783 	up_write(&block->cb_lock);
784 	rtnl_unlock();
785 }
786 
787 static bool tcf_block_offload_in_use(struct tcf_block *block)
788 {
789 	return atomic_read(&block->offloadcnt);
790 }
791 
792 static int tcf_block_offload_cmd(struct tcf_block *block,
793 				 struct net_device *dev, struct Qdisc *sch,
794 				 struct tcf_block_ext_info *ei,
795 				 enum flow_block_command command,
796 				 struct netlink_ext_ack *extack)
797 {
798 	struct flow_block_offload bo = {};
799 
800 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
801 			       &block->flow_block, tcf_block_shared(block),
802 			       extack);
803 
804 	if (dev->netdev_ops->ndo_setup_tc) {
805 		int err;
806 
807 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
808 		if (err < 0) {
809 			if (err != -EOPNOTSUPP)
810 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
811 			return err;
812 		}
813 
814 		return tcf_block_setup(block, &bo);
815 	}
816 
817 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
818 				    tc_block_indr_cleanup);
819 	tcf_block_setup(block, &bo);
820 
821 	return -EOPNOTSUPP;
822 }
823 
824 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
825 				  struct tcf_block_ext_info *ei,
826 				  struct netlink_ext_ack *extack)
827 {
828 	struct net_device *dev = q->dev_queue->dev;
829 	int err;
830 
831 	down_write(&block->cb_lock);
832 
833 	/* If tc offload feature is disabled and the block we try to bind
834 	 * to already has some offloaded filters, forbid to bind.
835 	 */
836 	if (dev->netdev_ops->ndo_setup_tc &&
837 	    !tc_can_offload(dev) &&
838 	    tcf_block_offload_in_use(block)) {
839 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
840 		err = -EOPNOTSUPP;
841 		goto err_unlock;
842 	}
843 
844 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
845 	if (err == -EOPNOTSUPP)
846 		goto no_offload_dev_inc;
847 	if (err)
848 		goto err_unlock;
849 
850 	up_write(&block->cb_lock);
851 	return 0;
852 
853 no_offload_dev_inc:
854 	if (tcf_block_offload_in_use(block))
855 		goto err_unlock;
856 
857 	err = 0;
858 	block->nooffloaddevcnt++;
859 err_unlock:
860 	up_write(&block->cb_lock);
861 	return err;
862 }
863 
864 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
865 				     struct tcf_block_ext_info *ei)
866 {
867 	struct net_device *dev = q->dev_queue->dev;
868 	int err;
869 
870 	down_write(&block->cb_lock);
871 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
872 	if (err == -EOPNOTSUPP)
873 		goto no_offload_dev_dec;
874 	up_write(&block->cb_lock);
875 	return;
876 
877 no_offload_dev_dec:
878 	WARN_ON(block->nooffloaddevcnt-- == 0);
879 	up_write(&block->cb_lock);
880 }
881 
882 static int
883 tcf_chain0_head_change_cb_add(struct tcf_block *block,
884 			      struct tcf_block_ext_info *ei,
885 			      struct netlink_ext_ack *extack)
886 {
887 	struct tcf_filter_chain_list_item *item;
888 	struct tcf_chain *chain0;
889 
890 	item = kmalloc(sizeof(*item), GFP_KERNEL);
891 	if (!item) {
892 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
893 		return -ENOMEM;
894 	}
895 	item->chain_head_change = ei->chain_head_change;
896 	item->chain_head_change_priv = ei->chain_head_change_priv;
897 
898 	mutex_lock(&block->lock);
899 	chain0 = block->chain0.chain;
900 	if (chain0)
901 		tcf_chain_hold(chain0);
902 	else
903 		list_add(&item->list, &block->chain0.filter_chain_list);
904 	mutex_unlock(&block->lock);
905 
906 	if (chain0) {
907 		struct tcf_proto *tp_head;
908 
909 		mutex_lock(&chain0->filter_chain_lock);
910 
911 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
912 		if (tp_head)
913 			tcf_chain_head_change_item(item, tp_head);
914 
915 		mutex_lock(&block->lock);
916 		list_add(&item->list, &block->chain0.filter_chain_list);
917 		mutex_unlock(&block->lock);
918 
919 		mutex_unlock(&chain0->filter_chain_lock);
920 		tcf_chain_put(chain0);
921 	}
922 
923 	return 0;
924 }
925 
926 static void
927 tcf_chain0_head_change_cb_del(struct tcf_block *block,
928 			      struct tcf_block_ext_info *ei)
929 {
930 	struct tcf_filter_chain_list_item *item;
931 
932 	mutex_lock(&block->lock);
933 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
934 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
935 		    (item->chain_head_change == ei->chain_head_change &&
936 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
937 			if (block->chain0.chain)
938 				tcf_chain_head_change_item(item, NULL);
939 			list_del(&item->list);
940 			mutex_unlock(&block->lock);
941 
942 			kfree(item);
943 			return;
944 		}
945 	}
946 	mutex_unlock(&block->lock);
947 	WARN_ON(1);
948 }
949 
950 struct tcf_net {
951 	spinlock_t idr_lock; /* Protects idr */
952 	struct idr idr;
953 };
954 
955 static unsigned int tcf_net_id;
956 
957 static int tcf_block_insert(struct tcf_block *block, struct net *net,
958 			    struct netlink_ext_ack *extack)
959 {
960 	struct tcf_net *tn = net_generic(net, tcf_net_id);
961 	int err;
962 
963 	idr_preload(GFP_KERNEL);
964 	spin_lock(&tn->idr_lock);
965 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
966 			    GFP_NOWAIT);
967 	spin_unlock(&tn->idr_lock);
968 	idr_preload_end();
969 
970 	return err;
971 }
972 
973 static void tcf_block_remove(struct tcf_block *block, struct net *net)
974 {
975 	struct tcf_net *tn = net_generic(net, tcf_net_id);
976 
977 	spin_lock(&tn->idr_lock);
978 	idr_remove(&tn->idr, block->index);
979 	spin_unlock(&tn->idr_lock);
980 }
981 
982 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
983 					  u32 block_index,
984 					  struct netlink_ext_ack *extack)
985 {
986 	struct tcf_block *block;
987 
988 	block = kzalloc(sizeof(*block), GFP_KERNEL);
989 	if (!block) {
990 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
991 		return ERR_PTR(-ENOMEM);
992 	}
993 	mutex_init(&block->lock);
994 	mutex_init(&block->proto_destroy_lock);
995 	init_rwsem(&block->cb_lock);
996 	flow_block_init(&block->flow_block);
997 	INIT_LIST_HEAD(&block->chain_list);
998 	INIT_LIST_HEAD(&block->owner_list);
999 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1000 
1001 	refcount_set(&block->refcnt, 1);
1002 	block->net = net;
1003 	block->index = block_index;
1004 
1005 	/* Don't store q pointer for blocks which are shared */
1006 	if (!tcf_block_shared(block))
1007 		block->q = q;
1008 	return block;
1009 }
1010 
1011 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1012 {
1013 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1014 
1015 	return idr_find(&tn->idr, block_index);
1016 }
1017 
1018 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1019 {
1020 	struct tcf_block *block;
1021 
1022 	rcu_read_lock();
1023 	block = tcf_block_lookup(net, block_index);
1024 	if (block && !refcount_inc_not_zero(&block->refcnt))
1025 		block = NULL;
1026 	rcu_read_unlock();
1027 
1028 	return block;
1029 }
1030 
1031 static struct tcf_chain *
1032 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1033 {
1034 	mutex_lock(&block->lock);
1035 	if (chain)
1036 		chain = list_is_last(&chain->list, &block->chain_list) ?
1037 			NULL : list_next_entry(chain, list);
1038 	else
1039 		chain = list_first_entry_or_null(&block->chain_list,
1040 						 struct tcf_chain, list);
1041 
1042 	/* skip all action-only chains */
1043 	while (chain && tcf_chain_held_by_acts_only(chain))
1044 		chain = list_is_last(&chain->list, &block->chain_list) ?
1045 			NULL : list_next_entry(chain, list);
1046 
1047 	if (chain)
1048 		tcf_chain_hold(chain);
1049 	mutex_unlock(&block->lock);
1050 
1051 	return chain;
1052 }
1053 
1054 /* Function to be used by all clients that want to iterate over all chains on
1055  * block. It properly obtains block->lock and takes reference to chain before
1056  * returning it. Users of this function must be tolerant to concurrent chain
1057  * insertion/deletion or ensure that no concurrent chain modification is
1058  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1059  * consistent dump because rtnl lock is released each time skb is filled with
1060  * data and sent to user-space.
1061  */
1062 
1063 struct tcf_chain *
1064 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1065 {
1066 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1067 
1068 	if (chain)
1069 		tcf_chain_put(chain);
1070 
1071 	return chain_next;
1072 }
1073 EXPORT_SYMBOL(tcf_get_next_chain);
1074 
1075 static struct tcf_proto *
1076 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1077 {
1078 	u32 prio = 0;
1079 
1080 	ASSERT_RTNL();
1081 	mutex_lock(&chain->filter_chain_lock);
1082 
1083 	if (!tp) {
1084 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1085 	} else if (tcf_proto_is_deleting(tp)) {
1086 		/* 'deleting' flag is set and chain->filter_chain_lock was
1087 		 * unlocked, which means next pointer could be invalid. Restart
1088 		 * search.
1089 		 */
1090 		prio = tp->prio + 1;
1091 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1092 
1093 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1094 			if (!tp->deleting && tp->prio >= prio)
1095 				break;
1096 	} else {
1097 		tp = tcf_chain_dereference(tp->next, chain);
1098 	}
1099 
1100 	if (tp)
1101 		tcf_proto_get(tp);
1102 
1103 	mutex_unlock(&chain->filter_chain_lock);
1104 
1105 	return tp;
1106 }
1107 
1108 /* Function to be used by all clients that want to iterate over all tp's on
1109  * chain. Users of this function must be tolerant to concurrent tp
1110  * insertion/deletion or ensure that no concurrent chain modification is
1111  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1112  * consistent dump because rtnl lock is released each time skb is filled with
1113  * data and sent to user-space.
1114  */
1115 
1116 struct tcf_proto *
1117 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1118 {
1119 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1120 
1121 	if (tp)
1122 		tcf_proto_put(tp, true, NULL);
1123 
1124 	return tp_next;
1125 }
1126 EXPORT_SYMBOL(tcf_get_next_proto);
1127 
1128 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1129 {
1130 	struct tcf_chain *chain;
1131 
1132 	/* Last reference to block. At this point chains cannot be added or
1133 	 * removed concurrently.
1134 	 */
1135 	for (chain = tcf_get_next_chain(block, NULL);
1136 	     chain;
1137 	     chain = tcf_get_next_chain(block, chain)) {
1138 		tcf_chain_put_explicitly_created(chain);
1139 		tcf_chain_flush(chain, rtnl_held);
1140 	}
1141 }
1142 
1143 /* Lookup Qdisc and increments its reference counter.
1144  * Set parent, if necessary.
1145  */
1146 
1147 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1148 			    u32 *parent, int ifindex, bool rtnl_held,
1149 			    struct netlink_ext_ack *extack)
1150 {
1151 	const struct Qdisc_class_ops *cops;
1152 	struct net_device *dev;
1153 	int err = 0;
1154 
1155 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1156 		return 0;
1157 
1158 	rcu_read_lock();
1159 
1160 	/* Find link */
1161 	dev = dev_get_by_index_rcu(net, ifindex);
1162 	if (!dev) {
1163 		rcu_read_unlock();
1164 		return -ENODEV;
1165 	}
1166 
1167 	/* Find qdisc */
1168 	if (!*parent) {
1169 		*q = rcu_dereference(dev->qdisc);
1170 		*parent = (*q)->handle;
1171 	} else {
1172 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1173 		if (!*q) {
1174 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1175 			err = -EINVAL;
1176 			goto errout_rcu;
1177 		}
1178 	}
1179 
1180 	*q = qdisc_refcount_inc_nz(*q);
1181 	if (!*q) {
1182 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1183 		err = -EINVAL;
1184 		goto errout_rcu;
1185 	}
1186 
1187 	/* Is it classful? */
1188 	cops = (*q)->ops->cl_ops;
1189 	if (!cops) {
1190 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1191 		err = -EINVAL;
1192 		goto errout_qdisc;
1193 	}
1194 
1195 	if (!cops->tcf_block) {
1196 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1197 		err = -EOPNOTSUPP;
1198 		goto errout_qdisc;
1199 	}
1200 
1201 errout_rcu:
1202 	/* At this point we know that qdisc is not noop_qdisc,
1203 	 * which means that qdisc holds a reference to net_device
1204 	 * and we hold a reference to qdisc, so it is safe to release
1205 	 * rcu read lock.
1206 	 */
1207 	rcu_read_unlock();
1208 	return err;
1209 
1210 errout_qdisc:
1211 	rcu_read_unlock();
1212 
1213 	if (rtnl_held)
1214 		qdisc_put(*q);
1215 	else
1216 		qdisc_put_unlocked(*q);
1217 	*q = NULL;
1218 
1219 	return err;
1220 }
1221 
1222 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1223 			       int ifindex, struct netlink_ext_ack *extack)
1224 {
1225 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1226 		return 0;
1227 
1228 	/* Do we search for filter, attached to class? */
1229 	if (TC_H_MIN(parent)) {
1230 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1231 
1232 		*cl = cops->find(q, parent);
1233 		if (*cl == 0) {
1234 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1235 			return -ENOENT;
1236 		}
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1243 					  unsigned long cl, int ifindex,
1244 					  u32 block_index,
1245 					  struct netlink_ext_ack *extack)
1246 {
1247 	struct tcf_block *block;
1248 
1249 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1250 		block = tcf_block_refcnt_get(net, block_index);
1251 		if (!block) {
1252 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1253 			return ERR_PTR(-EINVAL);
1254 		}
1255 	} else {
1256 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1257 
1258 		block = cops->tcf_block(q, cl, extack);
1259 		if (!block)
1260 			return ERR_PTR(-EINVAL);
1261 
1262 		if (tcf_block_shared(block)) {
1263 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1264 			return ERR_PTR(-EOPNOTSUPP);
1265 		}
1266 
1267 		/* Always take reference to block in order to support execution
1268 		 * of rules update path of cls API without rtnl lock. Caller
1269 		 * must release block when it is finished using it. 'if' block
1270 		 * of this conditional obtain reference to block by calling
1271 		 * tcf_block_refcnt_get().
1272 		 */
1273 		refcount_inc(&block->refcnt);
1274 	}
1275 
1276 	return block;
1277 }
1278 
1279 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1280 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1281 {
1282 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1283 		/* Flushing/putting all chains will cause the block to be
1284 		 * deallocated when last chain is freed. However, if chain_list
1285 		 * is empty, block has to be manually deallocated. After block
1286 		 * reference counter reached 0, it is no longer possible to
1287 		 * increment it or add new chains to block.
1288 		 */
1289 		bool free_block = list_empty(&block->chain_list);
1290 
1291 		mutex_unlock(&block->lock);
1292 		if (tcf_block_shared(block))
1293 			tcf_block_remove(block, block->net);
1294 
1295 		if (q)
1296 			tcf_block_offload_unbind(block, q, ei);
1297 
1298 		if (free_block)
1299 			tcf_block_destroy(block);
1300 		else
1301 			tcf_block_flush_all_chains(block, rtnl_held);
1302 	} else if (q) {
1303 		tcf_block_offload_unbind(block, q, ei);
1304 	}
1305 }
1306 
1307 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1308 {
1309 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1310 }
1311 
1312 /* Find tcf block.
1313  * Set q, parent, cl when appropriate.
1314  */
1315 
1316 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1317 					u32 *parent, unsigned long *cl,
1318 					int ifindex, u32 block_index,
1319 					struct netlink_ext_ack *extack)
1320 {
1321 	struct tcf_block *block;
1322 	int err = 0;
1323 
1324 	ASSERT_RTNL();
1325 
1326 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1327 	if (err)
1328 		goto errout;
1329 
1330 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1331 	if (err)
1332 		goto errout_qdisc;
1333 
1334 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1335 	if (IS_ERR(block)) {
1336 		err = PTR_ERR(block);
1337 		goto errout_qdisc;
1338 	}
1339 
1340 	return block;
1341 
1342 errout_qdisc:
1343 	if (*q)
1344 		qdisc_put(*q);
1345 errout:
1346 	*q = NULL;
1347 	return ERR_PTR(err);
1348 }
1349 
1350 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1351 			      bool rtnl_held)
1352 {
1353 	if (!IS_ERR_OR_NULL(block))
1354 		tcf_block_refcnt_put(block, rtnl_held);
1355 
1356 	if (q) {
1357 		if (rtnl_held)
1358 			qdisc_put(q);
1359 		else
1360 			qdisc_put_unlocked(q);
1361 	}
1362 }
1363 
1364 struct tcf_block_owner_item {
1365 	struct list_head list;
1366 	struct Qdisc *q;
1367 	enum flow_block_binder_type binder_type;
1368 };
1369 
1370 static void
1371 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1372 			       struct Qdisc *q,
1373 			       enum flow_block_binder_type binder_type)
1374 {
1375 	if (block->keep_dst &&
1376 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1377 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1378 		netif_keep_dst(qdisc_dev(q));
1379 }
1380 
1381 void tcf_block_netif_keep_dst(struct tcf_block *block)
1382 {
1383 	struct tcf_block_owner_item *item;
1384 
1385 	block->keep_dst = true;
1386 	list_for_each_entry(item, &block->owner_list, list)
1387 		tcf_block_owner_netif_keep_dst(block, item->q,
1388 					       item->binder_type);
1389 }
1390 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1391 
1392 static int tcf_block_owner_add(struct tcf_block *block,
1393 			       struct Qdisc *q,
1394 			       enum flow_block_binder_type binder_type)
1395 {
1396 	struct tcf_block_owner_item *item;
1397 
1398 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1399 	if (!item)
1400 		return -ENOMEM;
1401 	item->q = q;
1402 	item->binder_type = binder_type;
1403 	list_add(&item->list, &block->owner_list);
1404 	return 0;
1405 }
1406 
1407 static void tcf_block_owner_del(struct tcf_block *block,
1408 				struct Qdisc *q,
1409 				enum flow_block_binder_type binder_type)
1410 {
1411 	struct tcf_block_owner_item *item;
1412 
1413 	list_for_each_entry(item, &block->owner_list, list) {
1414 		if (item->q == q && item->binder_type == binder_type) {
1415 			list_del(&item->list);
1416 			kfree(item);
1417 			return;
1418 		}
1419 	}
1420 	WARN_ON(1);
1421 }
1422 
1423 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1424 		      struct tcf_block_ext_info *ei,
1425 		      struct netlink_ext_ack *extack)
1426 {
1427 	struct net *net = qdisc_net(q);
1428 	struct tcf_block *block = NULL;
1429 	int err;
1430 
1431 	if (ei->block_index)
1432 		/* block_index not 0 means the shared block is requested */
1433 		block = tcf_block_refcnt_get(net, ei->block_index);
1434 
1435 	if (!block) {
1436 		block = tcf_block_create(net, q, ei->block_index, extack);
1437 		if (IS_ERR(block))
1438 			return PTR_ERR(block);
1439 		if (tcf_block_shared(block)) {
1440 			err = tcf_block_insert(block, net, extack);
1441 			if (err)
1442 				goto err_block_insert;
1443 		}
1444 	}
1445 
1446 	err = tcf_block_owner_add(block, q, ei->binder_type);
1447 	if (err)
1448 		goto err_block_owner_add;
1449 
1450 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1451 
1452 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1453 	if (err)
1454 		goto err_chain0_head_change_cb_add;
1455 
1456 	err = tcf_block_offload_bind(block, q, ei, extack);
1457 	if (err)
1458 		goto err_block_offload_bind;
1459 
1460 	*p_block = block;
1461 	return 0;
1462 
1463 err_block_offload_bind:
1464 	tcf_chain0_head_change_cb_del(block, ei);
1465 err_chain0_head_change_cb_add:
1466 	tcf_block_owner_del(block, q, ei->binder_type);
1467 err_block_owner_add:
1468 err_block_insert:
1469 	tcf_block_refcnt_put(block, true);
1470 	return err;
1471 }
1472 EXPORT_SYMBOL(tcf_block_get_ext);
1473 
1474 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1475 {
1476 	struct tcf_proto __rcu **p_filter_chain = priv;
1477 
1478 	rcu_assign_pointer(*p_filter_chain, tp_head);
1479 }
1480 
1481 int tcf_block_get(struct tcf_block **p_block,
1482 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1483 		  struct netlink_ext_ack *extack)
1484 {
1485 	struct tcf_block_ext_info ei = {
1486 		.chain_head_change = tcf_chain_head_change_dflt,
1487 		.chain_head_change_priv = p_filter_chain,
1488 	};
1489 
1490 	WARN_ON(!p_filter_chain);
1491 	return tcf_block_get_ext(p_block, q, &ei, extack);
1492 }
1493 EXPORT_SYMBOL(tcf_block_get);
1494 
1495 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1496  * actions should be all removed after flushing.
1497  */
1498 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1499 		       struct tcf_block_ext_info *ei)
1500 {
1501 	if (!block)
1502 		return;
1503 	tcf_chain0_head_change_cb_del(block, ei);
1504 	tcf_block_owner_del(block, q, ei->binder_type);
1505 
1506 	__tcf_block_put(block, q, ei, true);
1507 }
1508 EXPORT_SYMBOL(tcf_block_put_ext);
1509 
1510 void tcf_block_put(struct tcf_block *block)
1511 {
1512 	struct tcf_block_ext_info ei = {0, };
1513 
1514 	if (!block)
1515 		return;
1516 	tcf_block_put_ext(block, block->q, &ei);
1517 }
1518 
1519 EXPORT_SYMBOL(tcf_block_put);
1520 
1521 static int
1522 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1523 			    void *cb_priv, bool add, bool offload_in_use,
1524 			    struct netlink_ext_ack *extack)
1525 {
1526 	struct tcf_chain *chain, *chain_prev;
1527 	struct tcf_proto *tp, *tp_prev;
1528 	int err;
1529 
1530 	lockdep_assert_held(&block->cb_lock);
1531 
1532 	for (chain = __tcf_get_next_chain(block, NULL);
1533 	     chain;
1534 	     chain_prev = chain,
1535 		     chain = __tcf_get_next_chain(block, chain),
1536 		     tcf_chain_put(chain_prev)) {
1537 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1538 		     tp_prev = tp,
1539 			     tp = __tcf_get_next_proto(chain, tp),
1540 			     tcf_proto_put(tp_prev, true, NULL)) {
1541 			if (tp->ops->reoffload) {
1542 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1543 							 extack);
1544 				if (err && add)
1545 					goto err_playback_remove;
1546 			} else if (add && offload_in_use) {
1547 				err = -EOPNOTSUPP;
1548 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1549 				goto err_playback_remove;
1550 			}
1551 		}
1552 	}
1553 
1554 	return 0;
1555 
1556 err_playback_remove:
1557 	tcf_proto_put(tp, true, NULL);
1558 	tcf_chain_put(chain);
1559 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1560 				    extack);
1561 	return err;
1562 }
1563 
1564 static int tcf_block_bind(struct tcf_block *block,
1565 			  struct flow_block_offload *bo)
1566 {
1567 	struct flow_block_cb *block_cb, *next;
1568 	int err, i = 0;
1569 
1570 	lockdep_assert_held(&block->cb_lock);
1571 
1572 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1573 		err = tcf_block_playback_offloads(block, block_cb->cb,
1574 						  block_cb->cb_priv, true,
1575 						  tcf_block_offload_in_use(block),
1576 						  bo->extack);
1577 		if (err)
1578 			goto err_unroll;
1579 		if (!bo->unlocked_driver_cb)
1580 			block->lockeddevcnt++;
1581 
1582 		i++;
1583 	}
1584 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1585 
1586 	return 0;
1587 
1588 err_unroll:
1589 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1590 		list_del(&block_cb->driver_list);
1591 		if (i-- > 0) {
1592 			list_del(&block_cb->list);
1593 			tcf_block_playback_offloads(block, block_cb->cb,
1594 						    block_cb->cb_priv, false,
1595 						    tcf_block_offload_in_use(block),
1596 						    NULL);
1597 			if (!bo->unlocked_driver_cb)
1598 				block->lockeddevcnt--;
1599 		}
1600 		flow_block_cb_free(block_cb);
1601 	}
1602 
1603 	return err;
1604 }
1605 
1606 static void tcf_block_unbind(struct tcf_block *block,
1607 			     struct flow_block_offload *bo)
1608 {
1609 	struct flow_block_cb *block_cb, *next;
1610 
1611 	lockdep_assert_held(&block->cb_lock);
1612 
1613 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1614 		tcf_block_playback_offloads(block, block_cb->cb,
1615 					    block_cb->cb_priv, false,
1616 					    tcf_block_offload_in_use(block),
1617 					    NULL);
1618 		list_del(&block_cb->list);
1619 		flow_block_cb_free(block_cb);
1620 		if (!bo->unlocked_driver_cb)
1621 			block->lockeddevcnt--;
1622 	}
1623 }
1624 
1625 static int tcf_block_setup(struct tcf_block *block,
1626 			   struct flow_block_offload *bo)
1627 {
1628 	int err;
1629 
1630 	switch (bo->command) {
1631 	case FLOW_BLOCK_BIND:
1632 		err = tcf_block_bind(block, bo);
1633 		break;
1634 	case FLOW_BLOCK_UNBIND:
1635 		err = 0;
1636 		tcf_block_unbind(block, bo);
1637 		break;
1638 	default:
1639 		WARN_ON_ONCE(1);
1640 		err = -EOPNOTSUPP;
1641 	}
1642 
1643 	return err;
1644 }
1645 
1646 /* Main classifier routine: scans classifier chain attached
1647  * to this qdisc, (optionally) tests for protocol and asks
1648  * specific classifiers.
1649  */
1650 static inline int __tcf_classify(struct sk_buff *skb,
1651 				 const struct tcf_proto *tp,
1652 				 const struct tcf_proto *orig_tp,
1653 				 struct tcf_result *res,
1654 				 bool compat_mode,
1655 				 struct tcf_exts_miss_cookie_node *n,
1656 				 int act_index,
1657 				 u32 *last_executed_chain)
1658 {
1659 #ifdef CONFIG_NET_CLS_ACT
1660 	const int max_reclassify_loop = 16;
1661 	const struct tcf_proto *first_tp;
1662 	int limit = 0;
1663 
1664 reclassify:
1665 #endif
1666 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1667 		__be16 protocol = skb_protocol(skb, false);
1668 		int err = 0;
1669 
1670 		if (n) {
1671 			struct tcf_exts *exts;
1672 
1673 			if (n->tp_prio != tp->prio)
1674 				continue;
1675 
1676 			/* We re-lookup the tp and chain based on index instead
1677 			 * of having hard refs and locks to them, so do a sanity
1678 			 * check if any of tp,chain,exts was replaced by the
1679 			 * time we got here with a cookie from hardware.
1680 			 */
1681 			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1682 				     !tp->ops->get_exts))
1683 				return TC_ACT_SHOT;
1684 
1685 			exts = tp->ops->get_exts(tp, n->handle);
1686 			if (unlikely(!exts || n->exts != exts))
1687 				return TC_ACT_SHOT;
1688 
1689 			n = NULL;
1690 			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1691 		} else {
1692 			if (tp->protocol != protocol &&
1693 			    tp->protocol != htons(ETH_P_ALL))
1694 				continue;
1695 
1696 			err = tc_classify(skb, tp, res);
1697 		}
1698 #ifdef CONFIG_NET_CLS_ACT
1699 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1700 			first_tp = orig_tp;
1701 			*last_executed_chain = first_tp->chain->index;
1702 			goto reset;
1703 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1704 			first_tp = res->goto_tp;
1705 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1706 			goto reset;
1707 		}
1708 #endif
1709 		if (err >= 0)
1710 			return err;
1711 	}
1712 
1713 	if (unlikely(n))
1714 		return TC_ACT_SHOT;
1715 
1716 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1717 #ifdef CONFIG_NET_CLS_ACT
1718 reset:
1719 	if (unlikely(limit++ >= max_reclassify_loop)) {
1720 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1721 				       tp->chain->block->index,
1722 				       tp->prio & 0xffff,
1723 				       ntohs(tp->protocol));
1724 		return TC_ACT_SHOT;
1725 	}
1726 
1727 	tp = first_tp;
1728 	goto reclassify;
1729 #endif
1730 }
1731 
1732 int tcf_classify(struct sk_buff *skb,
1733 		 const struct tcf_block *block,
1734 		 const struct tcf_proto *tp,
1735 		 struct tcf_result *res, bool compat_mode)
1736 {
1737 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1738 	u32 last_executed_chain = 0;
1739 
1740 	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1741 			      &last_executed_chain);
1742 #else
1743 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1744 	struct tcf_exts_miss_cookie_node *n = NULL;
1745 	const struct tcf_proto *orig_tp = tp;
1746 	struct tc_skb_ext *ext;
1747 	int act_index = 0;
1748 	int ret;
1749 
1750 	if (block) {
1751 		ext = skb_ext_find(skb, TC_SKB_EXT);
1752 
1753 		if (ext && (ext->chain || ext->act_miss)) {
1754 			struct tcf_chain *fchain;
1755 			u32 chain;
1756 
1757 			if (ext->act_miss) {
1758 				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1759 								&act_index);
1760 				if (!n)
1761 					return TC_ACT_SHOT;
1762 
1763 				chain = n->chain_index;
1764 			} else {
1765 				chain = ext->chain;
1766 			}
1767 
1768 			fchain = tcf_chain_lookup_rcu(block, chain);
1769 			if (!fchain)
1770 				return TC_ACT_SHOT;
1771 
1772 			/* Consume, so cloned/redirect skbs won't inherit ext */
1773 			skb_ext_del(skb, TC_SKB_EXT);
1774 
1775 			tp = rcu_dereference_bh(fchain->filter_chain);
1776 			last_executed_chain = fchain->index;
1777 		}
1778 	}
1779 
1780 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1781 			     &last_executed_chain);
1782 
1783 	if (tc_skb_ext_tc_enabled()) {
1784 		/* If we missed on some chain */
1785 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1786 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1787 
1788 			ext = tc_skb_ext_alloc(skb);
1789 			if (WARN_ON_ONCE(!ext))
1790 				return TC_ACT_SHOT;
1791 			ext->chain = last_executed_chain;
1792 			ext->mru = cb->mru;
1793 			ext->post_ct = cb->post_ct;
1794 			ext->post_ct_snat = cb->post_ct_snat;
1795 			ext->post_ct_dnat = cb->post_ct_dnat;
1796 			ext->zone = cb->zone;
1797 		}
1798 	}
1799 
1800 	return ret;
1801 #endif
1802 }
1803 EXPORT_SYMBOL(tcf_classify);
1804 
1805 struct tcf_chain_info {
1806 	struct tcf_proto __rcu **pprev;
1807 	struct tcf_proto __rcu *next;
1808 };
1809 
1810 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1811 					   struct tcf_chain_info *chain_info)
1812 {
1813 	return tcf_chain_dereference(*chain_info->pprev, chain);
1814 }
1815 
1816 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1817 			       struct tcf_chain_info *chain_info,
1818 			       struct tcf_proto *tp)
1819 {
1820 	if (chain->flushing)
1821 		return -EAGAIN;
1822 
1823 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1824 	if (*chain_info->pprev == chain->filter_chain)
1825 		tcf_chain0_head_change(chain, tp);
1826 	tcf_proto_get(tp);
1827 	rcu_assign_pointer(*chain_info->pprev, tp);
1828 
1829 	return 0;
1830 }
1831 
1832 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1833 				struct tcf_chain_info *chain_info,
1834 				struct tcf_proto *tp)
1835 {
1836 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1837 
1838 	tcf_proto_mark_delete(tp);
1839 	if (tp == chain->filter_chain)
1840 		tcf_chain0_head_change(chain, next);
1841 	RCU_INIT_POINTER(*chain_info->pprev, next);
1842 }
1843 
1844 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1845 					   struct tcf_chain_info *chain_info,
1846 					   u32 protocol, u32 prio,
1847 					   bool prio_allocate);
1848 
1849 /* Try to insert new proto.
1850  * If proto with specified priority already exists, free new proto
1851  * and return existing one.
1852  */
1853 
1854 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1855 						    struct tcf_proto *tp_new,
1856 						    u32 protocol, u32 prio,
1857 						    bool rtnl_held)
1858 {
1859 	struct tcf_chain_info chain_info;
1860 	struct tcf_proto *tp;
1861 	int err = 0;
1862 
1863 	mutex_lock(&chain->filter_chain_lock);
1864 
1865 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1866 		mutex_unlock(&chain->filter_chain_lock);
1867 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1868 		return ERR_PTR(-EAGAIN);
1869 	}
1870 
1871 	tp = tcf_chain_tp_find(chain, &chain_info,
1872 			       protocol, prio, false);
1873 	if (!tp)
1874 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1875 	mutex_unlock(&chain->filter_chain_lock);
1876 
1877 	if (tp) {
1878 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1879 		tp_new = tp;
1880 	} else if (err) {
1881 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1882 		tp_new = ERR_PTR(err);
1883 	}
1884 
1885 	return tp_new;
1886 }
1887 
1888 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1889 				      struct tcf_proto *tp, bool rtnl_held,
1890 				      struct netlink_ext_ack *extack)
1891 {
1892 	struct tcf_chain_info chain_info;
1893 	struct tcf_proto *tp_iter;
1894 	struct tcf_proto **pprev;
1895 	struct tcf_proto *next;
1896 
1897 	mutex_lock(&chain->filter_chain_lock);
1898 
1899 	/* Atomically find and remove tp from chain. */
1900 	for (pprev = &chain->filter_chain;
1901 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1902 	     pprev = &tp_iter->next) {
1903 		if (tp_iter == tp) {
1904 			chain_info.pprev = pprev;
1905 			chain_info.next = tp_iter->next;
1906 			WARN_ON(tp_iter->deleting);
1907 			break;
1908 		}
1909 	}
1910 	/* Verify that tp still exists and no new filters were inserted
1911 	 * concurrently.
1912 	 * Mark tp for deletion if it is empty.
1913 	 */
1914 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1915 		mutex_unlock(&chain->filter_chain_lock);
1916 		return;
1917 	}
1918 
1919 	tcf_proto_signal_destroying(chain, tp);
1920 	next = tcf_chain_dereference(chain_info.next, chain);
1921 	if (tp == chain->filter_chain)
1922 		tcf_chain0_head_change(chain, next);
1923 	RCU_INIT_POINTER(*chain_info.pprev, next);
1924 	mutex_unlock(&chain->filter_chain_lock);
1925 
1926 	tcf_proto_put(tp, rtnl_held, extack);
1927 }
1928 
1929 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1930 					   struct tcf_chain_info *chain_info,
1931 					   u32 protocol, u32 prio,
1932 					   bool prio_allocate)
1933 {
1934 	struct tcf_proto **pprev;
1935 	struct tcf_proto *tp;
1936 
1937 	/* Check the chain for existence of proto-tcf with this priority */
1938 	for (pprev = &chain->filter_chain;
1939 	     (tp = tcf_chain_dereference(*pprev, chain));
1940 	     pprev = &tp->next) {
1941 		if (tp->prio >= prio) {
1942 			if (tp->prio == prio) {
1943 				if (prio_allocate ||
1944 				    (tp->protocol != protocol && protocol))
1945 					return ERR_PTR(-EINVAL);
1946 			} else {
1947 				tp = NULL;
1948 			}
1949 			break;
1950 		}
1951 	}
1952 	chain_info->pprev = pprev;
1953 	if (tp) {
1954 		chain_info->next = tp->next;
1955 		tcf_proto_get(tp);
1956 	} else {
1957 		chain_info->next = NULL;
1958 	}
1959 	return tp;
1960 }
1961 
1962 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1963 			 struct tcf_proto *tp, struct tcf_block *block,
1964 			 struct Qdisc *q, u32 parent, void *fh,
1965 			 u32 portid, u32 seq, u16 flags, int event,
1966 			 bool terse_dump, bool rtnl_held,
1967 			 struct netlink_ext_ack *extack)
1968 {
1969 	struct tcmsg *tcm;
1970 	struct nlmsghdr  *nlh;
1971 	unsigned char *b = skb_tail_pointer(skb);
1972 
1973 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1974 	if (!nlh)
1975 		goto out_nlmsg_trim;
1976 	tcm = nlmsg_data(nlh);
1977 	tcm->tcm_family = AF_UNSPEC;
1978 	tcm->tcm__pad1 = 0;
1979 	tcm->tcm__pad2 = 0;
1980 	if (q) {
1981 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1982 		tcm->tcm_parent = parent;
1983 	} else {
1984 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1985 		tcm->tcm_block_index = block->index;
1986 	}
1987 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1988 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1989 		goto nla_put_failure;
1990 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1991 		goto nla_put_failure;
1992 	if (!fh) {
1993 		tcm->tcm_handle = 0;
1994 	} else if (terse_dump) {
1995 		if (tp->ops->terse_dump) {
1996 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1997 						rtnl_held) < 0)
1998 				goto nla_put_failure;
1999 		} else {
2000 			goto cls_op_not_supp;
2001 		}
2002 	} else {
2003 		if (tp->ops->dump &&
2004 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2005 			goto nla_put_failure;
2006 	}
2007 
2008 	if (extack && extack->_msg &&
2009 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2010 		goto nla_put_failure;
2011 
2012 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2013 
2014 	return skb->len;
2015 
2016 out_nlmsg_trim:
2017 nla_put_failure:
2018 cls_op_not_supp:
2019 	nlmsg_trim(skb, b);
2020 	return -1;
2021 }
2022 
2023 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2024 			  struct nlmsghdr *n, struct tcf_proto *tp,
2025 			  struct tcf_block *block, struct Qdisc *q,
2026 			  u32 parent, void *fh, int event, bool unicast,
2027 			  bool rtnl_held, struct netlink_ext_ack *extack)
2028 {
2029 	struct sk_buff *skb;
2030 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2031 	int err = 0;
2032 
2033 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2034 	if (!skb)
2035 		return -ENOBUFS;
2036 
2037 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2038 			  n->nlmsg_seq, n->nlmsg_flags, event,
2039 			  false, rtnl_held, extack) <= 0) {
2040 		kfree_skb(skb);
2041 		return -EINVAL;
2042 	}
2043 
2044 	if (unicast)
2045 		err = rtnl_unicast(skb, net, portid);
2046 	else
2047 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2048 				     n->nlmsg_flags & NLM_F_ECHO);
2049 	return err;
2050 }
2051 
2052 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2053 			      struct nlmsghdr *n, struct tcf_proto *tp,
2054 			      struct tcf_block *block, struct Qdisc *q,
2055 			      u32 parent, void *fh, bool unicast, bool *last,
2056 			      bool rtnl_held, struct netlink_ext_ack *extack)
2057 {
2058 	struct sk_buff *skb;
2059 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2060 	int err;
2061 
2062 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2063 	if (!skb)
2064 		return -ENOBUFS;
2065 
2066 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2067 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2068 			  false, rtnl_held, extack) <= 0) {
2069 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2070 		kfree_skb(skb);
2071 		return -EINVAL;
2072 	}
2073 
2074 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2075 	if (err) {
2076 		kfree_skb(skb);
2077 		return err;
2078 	}
2079 
2080 	if (unicast)
2081 		err = rtnl_unicast(skb, net, portid);
2082 	else
2083 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2084 				     n->nlmsg_flags & NLM_F_ECHO);
2085 	if (err < 0)
2086 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2087 
2088 	return err;
2089 }
2090 
2091 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2092 				 struct tcf_block *block, struct Qdisc *q,
2093 				 u32 parent, struct nlmsghdr *n,
2094 				 struct tcf_chain *chain, int event,
2095 				 struct netlink_ext_ack *extack)
2096 {
2097 	struct tcf_proto *tp;
2098 
2099 	for (tp = tcf_get_next_proto(chain, NULL);
2100 	     tp; tp = tcf_get_next_proto(chain, tp))
2101 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2102 			       event, false, true, extack);
2103 }
2104 
2105 static void tfilter_put(struct tcf_proto *tp, void *fh)
2106 {
2107 	if (tp->ops->put && fh)
2108 		tp->ops->put(tp, fh);
2109 }
2110 
2111 static bool is_qdisc_ingress(__u32 classid)
2112 {
2113 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2114 }
2115 
2116 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2117 			  struct netlink_ext_ack *extack)
2118 {
2119 	struct net *net = sock_net(skb->sk);
2120 	struct nlattr *tca[TCA_MAX + 1];
2121 	char name[IFNAMSIZ];
2122 	struct tcmsg *t;
2123 	u32 protocol;
2124 	u32 prio;
2125 	bool prio_allocate;
2126 	u32 parent;
2127 	u32 chain_index;
2128 	struct Qdisc *q;
2129 	struct tcf_chain_info chain_info;
2130 	struct tcf_chain *chain;
2131 	struct tcf_block *block;
2132 	struct tcf_proto *tp;
2133 	unsigned long cl;
2134 	void *fh;
2135 	int err;
2136 	int tp_created;
2137 	bool rtnl_held = false;
2138 	u32 flags;
2139 
2140 replay:
2141 	tp_created = 0;
2142 
2143 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2144 				     rtm_tca_policy, extack);
2145 	if (err < 0)
2146 		return err;
2147 
2148 	t = nlmsg_data(n);
2149 	protocol = TC_H_MIN(t->tcm_info);
2150 	prio = TC_H_MAJ(t->tcm_info);
2151 	prio_allocate = false;
2152 	parent = t->tcm_parent;
2153 	tp = NULL;
2154 	cl = 0;
2155 	block = NULL;
2156 	q = NULL;
2157 	chain = NULL;
2158 	flags = 0;
2159 
2160 	if (prio == 0) {
2161 		/* If no priority is provided by the user,
2162 		 * we allocate one.
2163 		 */
2164 		if (n->nlmsg_flags & NLM_F_CREATE) {
2165 			prio = TC_H_MAKE(0x80000000U, 0U);
2166 			prio_allocate = true;
2167 		} else {
2168 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2169 			return -ENOENT;
2170 		}
2171 	}
2172 
2173 	/* Find head of filter chain. */
2174 
2175 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2176 	if (err)
2177 		return err;
2178 
2179 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2180 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2181 		err = -EINVAL;
2182 		goto errout;
2183 	}
2184 
2185 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2186 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2187 	 * type is not specified, classifier is not unlocked.
2188 	 */
2189 	if (rtnl_held ||
2190 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2191 	    !tcf_proto_is_unlocked(name)) {
2192 		rtnl_held = true;
2193 		rtnl_lock();
2194 	}
2195 
2196 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2197 	if (err)
2198 		goto errout;
2199 
2200 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2201 				 extack);
2202 	if (IS_ERR(block)) {
2203 		err = PTR_ERR(block);
2204 		goto errout;
2205 	}
2206 	block->classid = parent;
2207 
2208 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2209 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2210 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2211 		err = -EINVAL;
2212 		goto errout;
2213 	}
2214 	chain = tcf_chain_get(block, chain_index, true);
2215 	if (!chain) {
2216 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2217 		err = -ENOMEM;
2218 		goto errout;
2219 	}
2220 
2221 	mutex_lock(&chain->filter_chain_lock);
2222 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2223 			       prio, prio_allocate);
2224 	if (IS_ERR(tp)) {
2225 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2226 		err = PTR_ERR(tp);
2227 		goto errout_locked;
2228 	}
2229 
2230 	if (tp == NULL) {
2231 		struct tcf_proto *tp_new = NULL;
2232 
2233 		if (chain->flushing) {
2234 			err = -EAGAIN;
2235 			goto errout_locked;
2236 		}
2237 
2238 		/* Proto-tcf does not exist, create new one */
2239 
2240 		if (tca[TCA_KIND] == NULL || !protocol) {
2241 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2242 			err = -EINVAL;
2243 			goto errout_locked;
2244 		}
2245 
2246 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2247 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2248 			err = -ENOENT;
2249 			goto errout_locked;
2250 		}
2251 
2252 		if (prio_allocate)
2253 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2254 							       &chain_info));
2255 
2256 		mutex_unlock(&chain->filter_chain_lock);
2257 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2258 					  rtnl_held, extack);
2259 		if (IS_ERR(tp_new)) {
2260 			err = PTR_ERR(tp_new);
2261 			goto errout_tp;
2262 		}
2263 
2264 		tp_created = 1;
2265 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2266 						rtnl_held);
2267 		if (IS_ERR(tp)) {
2268 			err = PTR_ERR(tp);
2269 			goto errout_tp;
2270 		}
2271 	} else {
2272 		mutex_unlock(&chain->filter_chain_lock);
2273 	}
2274 
2275 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2276 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2277 		err = -EINVAL;
2278 		goto errout;
2279 	}
2280 
2281 	fh = tp->ops->get(tp, t->tcm_handle);
2282 
2283 	if (!fh) {
2284 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2285 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2286 			err = -ENOENT;
2287 			goto errout;
2288 		}
2289 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2290 		tfilter_put(tp, fh);
2291 		NL_SET_ERR_MSG(extack, "Filter already exists");
2292 		err = -EEXIST;
2293 		goto errout;
2294 	}
2295 
2296 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2297 		tfilter_put(tp, fh);
2298 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2299 		err = -EINVAL;
2300 		goto errout;
2301 	}
2302 
2303 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2304 		flags |= TCA_ACT_FLAGS_REPLACE;
2305 	if (!rtnl_held)
2306 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2307 	if (is_qdisc_ingress(parent))
2308 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2309 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2310 			      flags, extack);
2311 	if (err == 0) {
2312 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2313 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2314 		tfilter_put(tp, fh);
2315 		/* q pointer is NULL for shared blocks */
2316 		if (q)
2317 			q->flags &= ~TCQ_F_CAN_BYPASS;
2318 	}
2319 
2320 errout:
2321 	if (err && tp_created)
2322 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2323 errout_tp:
2324 	if (chain) {
2325 		if (tp && !IS_ERR(tp))
2326 			tcf_proto_put(tp, rtnl_held, NULL);
2327 		if (!tp_created)
2328 			tcf_chain_put(chain);
2329 	}
2330 	tcf_block_release(q, block, rtnl_held);
2331 
2332 	if (rtnl_held)
2333 		rtnl_unlock();
2334 
2335 	if (err == -EAGAIN) {
2336 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2337 		 * of target chain.
2338 		 */
2339 		rtnl_held = true;
2340 		/* Replay the request. */
2341 		goto replay;
2342 	}
2343 	return err;
2344 
2345 errout_locked:
2346 	mutex_unlock(&chain->filter_chain_lock);
2347 	goto errout;
2348 }
2349 
2350 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2351 			  struct netlink_ext_ack *extack)
2352 {
2353 	struct net *net = sock_net(skb->sk);
2354 	struct nlattr *tca[TCA_MAX + 1];
2355 	char name[IFNAMSIZ];
2356 	struct tcmsg *t;
2357 	u32 protocol;
2358 	u32 prio;
2359 	u32 parent;
2360 	u32 chain_index;
2361 	struct Qdisc *q = NULL;
2362 	struct tcf_chain_info chain_info;
2363 	struct tcf_chain *chain = NULL;
2364 	struct tcf_block *block = NULL;
2365 	struct tcf_proto *tp = NULL;
2366 	unsigned long cl = 0;
2367 	void *fh = NULL;
2368 	int err;
2369 	bool rtnl_held = false;
2370 
2371 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2372 				     rtm_tca_policy, extack);
2373 	if (err < 0)
2374 		return err;
2375 
2376 	t = nlmsg_data(n);
2377 	protocol = TC_H_MIN(t->tcm_info);
2378 	prio = TC_H_MAJ(t->tcm_info);
2379 	parent = t->tcm_parent;
2380 
2381 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2382 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2383 		return -ENOENT;
2384 	}
2385 
2386 	/* Find head of filter chain. */
2387 
2388 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2389 	if (err)
2390 		return err;
2391 
2392 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2393 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2394 		err = -EINVAL;
2395 		goto errout;
2396 	}
2397 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2398 	 * found), qdisc is not unlocked, classifier type is not specified,
2399 	 * classifier is not unlocked.
2400 	 */
2401 	if (!prio ||
2402 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2403 	    !tcf_proto_is_unlocked(name)) {
2404 		rtnl_held = true;
2405 		rtnl_lock();
2406 	}
2407 
2408 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2409 	if (err)
2410 		goto errout;
2411 
2412 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2413 				 extack);
2414 	if (IS_ERR(block)) {
2415 		err = PTR_ERR(block);
2416 		goto errout;
2417 	}
2418 
2419 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2420 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2421 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2422 		err = -EINVAL;
2423 		goto errout;
2424 	}
2425 	chain = tcf_chain_get(block, chain_index, false);
2426 	if (!chain) {
2427 		/* User requested flush on non-existent chain. Nothing to do,
2428 		 * so just return success.
2429 		 */
2430 		if (prio == 0) {
2431 			err = 0;
2432 			goto errout;
2433 		}
2434 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2435 		err = -ENOENT;
2436 		goto errout;
2437 	}
2438 
2439 	if (prio == 0) {
2440 		tfilter_notify_chain(net, skb, block, q, parent, n,
2441 				     chain, RTM_DELTFILTER, extack);
2442 		tcf_chain_flush(chain, rtnl_held);
2443 		err = 0;
2444 		goto errout;
2445 	}
2446 
2447 	mutex_lock(&chain->filter_chain_lock);
2448 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2449 			       prio, false);
2450 	if (!tp || IS_ERR(tp)) {
2451 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2452 		err = tp ? PTR_ERR(tp) : -ENOENT;
2453 		goto errout_locked;
2454 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2455 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2456 		err = -EINVAL;
2457 		goto errout_locked;
2458 	} else if (t->tcm_handle == 0) {
2459 		tcf_proto_signal_destroying(chain, tp);
2460 		tcf_chain_tp_remove(chain, &chain_info, tp);
2461 		mutex_unlock(&chain->filter_chain_lock);
2462 
2463 		tcf_proto_put(tp, rtnl_held, NULL);
2464 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2465 			       RTM_DELTFILTER, false, rtnl_held, extack);
2466 		err = 0;
2467 		goto errout;
2468 	}
2469 	mutex_unlock(&chain->filter_chain_lock);
2470 
2471 	fh = tp->ops->get(tp, t->tcm_handle);
2472 
2473 	if (!fh) {
2474 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2475 		err = -ENOENT;
2476 	} else {
2477 		bool last;
2478 
2479 		err = tfilter_del_notify(net, skb, n, tp, block,
2480 					 q, parent, fh, false, &last,
2481 					 rtnl_held, extack);
2482 
2483 		if (err)
2484 			goto errout;
2485 		if (last)
2486 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2487 	}
2488 
2489 errout:
2490 	if (chain) {
2491 		if (tp && !IS_ERR(tp))
2492 			tcf_proto_put(tp, rtnl_held, NULL);
2493 		tcf_chain_put(chain);
2494 	}
2495 	tcf_block_release(q, block, rtnl_held);
2496 
2497 	if (rtnl_held)
2498 		rtnl_unlock();
2499 
2500 	return err;
2501 
2502 errout_locked:
2503 	mutex_unlock(&chain->filter_chain_lock);
2504 	goto errout;
2505 }
2506 
2507 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2508 			  struct netlink_ext_ack *extack)
2509 {
2510 	struct net *net = sock_net(skb->sk);
2511 	struct nlattr *tca[TCA_MAX + 1];
2512 	char name[IFNAMSIZ];
2513 	struct tcmsg *t;
2514 	u32 protocol;
2515 	u32 prio;
2516 	u32 parent;
2517 	u32 chain_index;
2518 	struct Qdisc *q = NULL;
2519 	struct tcf_chain_info chain_info;
2520 	struct tcf_chain *chain = NULL;
2521 	struct tcf_block *block = NULL;
2522 	struct tcf_proto *tp = NULL;
2523 	unsigned long cl = 0;
2524 	void *fh = NULL;
2525 	int err;
2526 	bool rtnl_held = false;
2527 
2528 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2529 				     rtm_tca_policy, extack);
2530 	if (err < 0)
2531 		return err;
2532 
2533 	t = nlmsg_data(n);
2534 	protocol = TC_H_MIN(t->tcm_info);
2535 	prio = TC_H_MAJ(t->tcm_info);
2536 	parent = t->tcm_parent;
2537 
2538 	if (prio == 0) {
2539 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2540 		return -ENOENT;
2541 	}
2542 
2543 	/* Find head of filter chain. */
2544 
2545 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2546 	if (err)
2547 		return err;
2548 
2549 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2550 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2551 		err = -EINVAL;
2552 		goto errout;
2553 	}
2554 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2555 	 * unlocked, classifier type is not specified, classifier is not
2556 	 * unlocked.
2557 	 */
2558 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2559 	    !tcf_proto_is_unlocked(name)) {
2560 		rtnl_held = true;
2561 		rtnl_lock();
2562 	}
2563 
2564 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2565 	if (err)
2566 		goto errout;
2567 
2568 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2569 				 extack);
2570 	if (IS_ERR(block)) {
2571 		err = PTR_ERR(block);
2572 		goto errout;
2573 	}
2574 
2575 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2576 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2577 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2578 		err = -EINVAL;
2579 		goto errout;
2580 	}
2581 	chain = tcf_chain_get(block, chain_index, false);
2582 	if (!chain) {
2583 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2584 		err = -EINVAL;
2585 		goto errout;
2586 	}
2587 
2588 	mutex_lock(&chain->filter_chain_lock);
2589 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2590 			       prio, false);
2591 	mutex_unlock(&chain->filter_chain_lock);
2592 	if (!tp || IS_ERR(tp)) {
2593 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2594 		err = tp ? PTR_ERR(tp) : -ENOENT;
2595 		goto errout;
2596 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2597 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2598 		err = -EINVAL;
2599 		goto errout;
2600 	}
2601 
2602 	fh = tp->ops->get(tp, t->tcm_handle);
2603 
2604 	if (!fh) {
2605 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2606 		err = -ENOENT;
2607 	} else {
2608 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2609 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2610 		if (err < 0)
2611 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2612 	}
2613 
2614 	tfilter_put(tp, fh);
2615 errout:
2616 	if (chain) {
2617 		if (tp && !IS_ERR(tp))
2618 			tcf_proto_put(tp, rtnl_held, NULL);
2619 		tcf_chain_put(chain);
2620 	}
2621 	tcf_block_release(q, block, rtnl_held);
2622 
2623 	if (rtnl_held)
2624 		rtnl_unlock();
2625 
2626 	return err;
2627 }
2628 
2629 struct tcf_dump_args {
2630 	struct tcf_walker w;
2631 	struct sk_buff *skb;
2632 	struct netlink_callback *cb;
2633 	struct tcf_block *block;
2634 	struct Qdisc *q;
2635 	u32 parent;
2636 	bool terse_dump;
2637 };
2638 
2639 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2640 {
2641 	struct tcf_dump_args *a = (void *)arg;
2642 	struct net *net = sock_net(a->skb->sk);
2643 
2644 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2645 			     n, NETLINK_CB(a->cb->skb).portid,
2646 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2647 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2648 }
2649 
2650 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2651 			   struct sk_buff *skb, struct netlink_callback *cb,
2652 			   long index_start, long *p_index, bool terse)
2653 {
2654 	struct net *net = sock_net(skb->sk);
2655 	struct tcf_block *block = chain->block;
2656 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2657 	struct tcf_proto *tp, *tp_prev;
2658 	struct tcf_dump_args arg;
2659 
2660 	for (tp = __tcf_get_next_proto(chain, NULL);
2661 	     tp;
2662 	     tp_prev = tp,
2663 		     tp = __tcf_get_next_proto(chain, tp),
2664 		     tcf_proto_put(tp_prev, true, NULL),
2665 		     (*p_index)++) {
2666 		if (*p_index < index_start)
2667 			continue;
2668 		if (TC_H_MAJ(tcm->tcm_info) &&
2669 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2670 			continue;
2671 		if (TC_H_MIN(tcm->tcm_info) &&
2672 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2673 			continue;
2674 		if (*p_index > index_start)
2675 			memset(&cb->args[1], 0,
2676 			       sizeof(cb->args) - sizeof(cb->args[0]));
2677 		if (cb->args[1] == 0) {
2678 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2679 					  NETLINK_CB(cb->skb).portid,
2680 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2681 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2682 				goto errout;
2683 			cb->args[1] = 1;
2684 		}
2685 		if (!tp->ops->walk)
2686 			continue;
2687 		arg.w.fn = tcf_node_dump;
2688 		arg.skb = skb;
2689 		arg.cb = cb;
2690 		arg.block = block;
2691 		arg.q = q;
2692 		arg.parent = parent;
2693 		arg.w.stop = 0;
2694 		arg.w.skip = cb->args[1] - 1;
2695 		arg.w.count = 0;
2696 		arg.w.cookie = cb->args[2];
2697 		arg.terse_dump = terse;
2698 		tp->ops->walk(tp, &arg.w, true);
2699 		cb->args[2] = arg.w.cookie;
2700 		cb->args[1] = arg.w.count + 1;
2701 		if (arg.w.stop)
2702 			goto errout;
2703 	}
2704 	return true;
2705 
2706 errout:
2707 	tcf_proto_put(tp, true, NULL);
2708 	return false;
2709 }
2710 
2711 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2712 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2713 };
2714 
2715 /* called with RTNL */
2716 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2717 {
2718 	struct tcf_chain *chain, *chain_prev;
2719 	struct net *net = sock_net(skb->sk);
2720 	struct nlattr *tca[TCA_MAX + 1];
2721 	struct Qdisc *q = NULL;
2722 	struct tcf_block *block;
2723 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2724 	bool terse_dump = false;
2725 	long index_start;
2726 	long index;
2727 	u32 parent;
2728 	int err;
2729 
2730 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2731 		return skb->len;
2732 
2733 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2734 				     tcf_tfilter_dump_policy, cb->extack);
2735 	if (err)
2736 		return err;
2737 
2738 	if (tca[TCA_DUMP_FLAGS]) {
2739 		struct nla_bitfield32 flags =
2740 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2741 
2742 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2743 	}
2744 
2745 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2746 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2747 		if (!block)
2748 			goto out;
2749 		/* If we work with block index, q is NULL and parent value
2750 		 * will never be used in the following code. The check
2751 		 * in tcf_fill_node prevents it. However, compiler does not
2752 		 * see that far, so set parent to zero to silence the warning
2753 		 * about parent being uninitialized.
2754 		 */
2755 		parent = 0;
2756 	} else {
2757 		const struct Qdisc_class_ops *cops;
2758 		struct net_device *dev;
2759 		unsigned long cl = 0;
2760 
2761 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2762 		if (!dev)
2763 			return skb->len;
2764 
2765 		parent = tcm->tcm_parent;
2766 		if (!parent)
2767 			q = rtnl_dereference(dev->qdisc);
2768 		else
2769 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2770 		if (!q)
2771 			goto out;
2772 		cops = q->ops->cl_ops;
2773 		if (!cops)
2774 			goto out;
2775 		if (!cops->tcf_block)
2776 			goto out;
2777 		if (TC_H_MIN(tcm->tcm_parent)) {
2778 			cl = cops->find(q, tcm->tcm_parent);
2779 			if (cl == 0)
2780 				goto out;
2781 		}
2782 		block = cops->tcf_block(q, cl, NULL);
2783 		if (!block)
2784 			goto out;
2785 		parent = block->classid;
2786 		if (tcf_block_shared(block))
2787 			q = NULL;
2788 	}
2789 
2790 	index_start = cb->args[0];
2791 	index = 0;
2792 
2793 	for (chain = __tcf_get_next_chain(block, NULL);
2794 	     chain;
2795 	     chain_prev = chain,
2796 		     chain = __tcf_get_next_chain(block, chain),
2797 		     tcf_chain_put(chain_prev)) {
2798 		if (tca[TCA_CHAIN] &&
2799 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2800 			continue;
2801 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2802 				    index_start, &index, terse_dump)) {
2803 			tcf_chain_put(chain);
2804 			err = -EMSGSIZE;
2805 			break;
2806 		}
2807 	}
2808 
2809 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2810 		tcf_block_refcnt_put(block, true);
2811 	cb->args[0] = index;
2812 
2813 out:
2814 	/* If we did no progress, the error (EMSGSIZE) is real */
2815 	if (skb->len == 0 && err)
2816 		return err;
2817 	return skb->len;
2818 }
2819 
2820 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2821 			      void *tmplt_priv, u32 chain_index,
2822 			      struct net *net, struct sk_buff *skb,
2823 			      struct tcf_block *block,
2824 			      u32 portid, u32 seq, u16 flags, int event,
2825 			      struct netlink_ext_ack *extack)
2826 {
2827 	unsigned char *b = skb_tail_pointer(skb);
2828 	const struct tcf_proto_ops *ops;
2829 	struct nlmsghdr *nlh;
2830 	struct tcmsg *tcm;
2831 	void *priv;
2832 
2833 	ops = tmplt_ops;
2834 	priv = tmplt_priv;
2835 
2836 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2837 	if (!nlh)
2838 		goto out_nlmsg_trim;
2839 	tcm = nlmsg_data(nlh);
2840 	tcm->tcm_family = AF_UNSPEC;
2841 	tcm->tcm__pad1 = 0;
2842 	tcm->tcm__pad2 = 0;
2843 	tcm->tcm_handle = 0;
2844 	if (block->q) {
2845 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2846 		tcm->tcm_parent = block->q->handle;
2847 	} else {
2848 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2849 		tcm->tcm_block_index = block->index;
2850 	}
2851 
2852 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2853 		goto nla_put_failure;
2854 
2855 	if (ops) {
2856 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2857 			goto nla_put_failure;
2858 		if (ops->tmplt_dump(skb, net, priv) < 0)
2859 			goto nla_put_failure;
2860 	}
2861 
2862 	if (extack && extack->_msg &&
2863 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2864 		goto out_nlmsg_trim;
2865 
2866 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2867 
2868 	return skb->len;
2869 
2870 out_nlmsg_trim:
2871 nla_put_failure:
2872 	nlmsg_trim(skb, b);
2873 	return -EMSGSIZE;
2874 }
2875 
2876 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2877 			   u32 seq, u16 flags, int event, bool unicast,
2878 			   struct netlink_ext_ack *extack)
2879 {
2880 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2881 	struct tcf_block *block = chain->block;
2882 	struct net *net = block->net;
2883 	struct sk_buff *skb;
2884 	int err = 0;
2885 
2886 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2887 	if (!skb)
2888 		return -ENOBUFS;
2889 
2890 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2891 			       chain->index, net, skb, block, portid,
2892 			       seq, flags, event, extack) <= 0) {
2893 		kfree_skb(skb);
2894 		return -EINVAL;
2895 	}
2896 
2897 	if (unicast)
2898 		err = rtnl_unicast(skb, net, portid);
2899 	else
2900 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2901 				     flags & NLM_F_ECHO);
2902 
2903 	return err;
2904 }
2905 
2906 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2907 				  void *tmplt_priv, u32 chain_index,
2908 				  struct tcf_block *block, struct sk_buff *oskb,
2909 				  u32 seq, u16 flags, bool unicast)
2910 {
2911 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2912 	struct net *net = block->net;
2913 	struct sk_buff *skb;
2914 
2915 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2916 	if (!skb)
2917 		return -ENOBUFS;
2918 
2919 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2920 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2921 		kfree_skb(skb);
2922 		return -EINVAL;
2923 	}
2924 
2925 	if (unicast)
2926 		return rtnl_unicast(skb, net, portid);
2927 
2928 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2929 }
2930 
2931 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2932 			      struct nlattr **tca,
2933 			      struct netlink_ext_ack *extack)
2934 {
2935 	const struct tcf_proto_ops *ops;
2936 	char name[IFNAMSIZ];
2937 	void *tmplt_priv;
2938 
2939 	/* If kind is not set, user did not specify template. */
2940 	if (!tca[TCA_KIND])
2941 		return 0;
2942 
2943 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2944 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2945 		return -EINVAL;
2946 	}
2947 
2948 	ops = tcf_proto_lookup_ops(name, true, extack);
2949 	if (IS_ERR(ops))
2950 		return PTR_ERR(ops);
2951 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2952 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2953 		module_put(ops->owner);
2954 		return -EOPNOTSUPP;
2955 	}
2956 
2957 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2958 	if (IS_ERR(tmplt_priv)) {
2959 		module_put(ops->owner);
2960 		return PTR_ERR(tmplt_priv);
2961 	}
2962 	chain->tmplt_ops = ops;
2963 	chain->tmplt_priv = tmplt_priv;
2964 	return 0;
2965 }
2966 
2967 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2968 			       void *tmplt_priv)
2969 {
2970 	/* If template ops are set, no work to do for us. */
2971 	if (!tmplt_ops)
2972 		return;
2973 
2974 	tmplt_ops->tmplt_destroy(tmplt_priv);
2975 	module_put(tmplt_ops->owner);
2976 }
2977 
2978 /* Add/delete/get a chain */
2979 
2980 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2981 			struct netlink_ext_ack *extack)
2982 {
2983 	struct net *net = sock_net(skb->sk);
2984 	struct nlattr *tca[TCA_MAX + 1];
2985 	struct tcmsg *t;
2986 	u32 parent;
2987 	u32 chain_index;
2988 	struct Qdisc *q;
2989 	struct tcf_chain *chain;
2990 	struct tcf_block *block;
2991 	unsigned long cl;
2992 	int err;
2993 
2994 replay:
2995 	q = NULL;
2996 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2997 				     rtm_tca_policy, extack);
2998 	if (err < 0)
2999 		return err;
3000 
3001 	t = nlmsg_data(n);
3002 	parent = t->tcm_parent;
3003 	cl = 0;
3004 
3005 	block = tcf_block_find(net, &q, &parent, &cl,
3006 			       t->tcm_ifindex, t->tcm_block_index, extack);
3007 	if (IS_ERR(block))
3008 		return PTR_ERR(block);
3009 
3010 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3011 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3012 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3013 		err = -EINVAL;
3014 		goto errout_block;
3015 	}
3016 
3017 	mutex_lock(&block->lock);
3018 	chain = tcf_chain_lookup(block, chain_index);
3019 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3020 		if (chain) {
3021 			if (tcf_chain_held_by_acts_only(chain)) {
3022 				/* The chain exists only because there is
3023 				 * some action referencing it.
3024 				 */
3025 				tcf_chain_hold(chain);
3026 			} else {
3027 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3028 				err = -EEXIST;
3029 				goto errout_block_locked;
3030 			}
3031 		} else {
3032 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3033 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3034 				err = -ENOENT;
3035 				goto errout_block_locked;
3036 			}
3037 			chain = tcf_chain_create(block, chain_index);
3038 			if (!chain) {
3039 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3040 				err = -ENOMEM;
3041 				goto errout_block_locked;
3042 			}
3043 		}
3044 	} else {
3045 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3046 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3047 			err = -EINVAL;
3048 			goto errout_block_locked;
3049 		}
3050 		tcf_chain_hold(chain);
3051 	}
3052 
3053 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3054 		/* Modifying chain requires holding parent block lock. In case
3055 		 * the chain was successfully added, take a reference to the
3056 		 * chain. This ensures that an empty chain does not disappear at
3057 		 * the end of this function.
3058 		 */
3059 		tcf_chain_hold(chain);
3060 		chain->explicitly_created = true;
3061 	}
3062 	mutex_unlock(&block->lock);
3063 
3064 	switch (n->nlmsg_type) {
3065 	case RTM_NEWCHAIN:
3066 		err = tc_chain_tmplt_add(chain, net, tca, extack);
3067 		if (err) {
3068 			tcf_chain_put_explicitly_created(chain);
3069 			goto errout;
3070 		}
3071 
3072 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3073 				RTM_NEWCHAIN, false, extack);
3074 		break;
3075 	case RTM_DELCHAIN:
3076 		tfilter_notify_chain(net, skb, block, q, parent, n,
3077 				     chain, RTM_DELTFILTER, extack);
3078 		/* Flush the chain first as the user requested chain removal. */
3079 		tcf_chain_flush(chain, true);
3080 		/* In case the chain was successfully deleted, put a reference
3081 		 * to the chain previously taken during addition.
3082 		 */
3083 		tcf_chain_put_explicitly_created(chain);
3084 		break;
3085 	case RTM_GETCHAIN:
3086 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3087 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3088 		if (err < 0)
3089 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3090 		break;
3091 	default:
3092 		err = -EOPNOTSUPP;
3093 		NL_SET_ERR_MSG(extack, "Unsupported message type");
3094 		goto errout;
3095 	}
3096 
3097 errout:
3098 	tcf_chain_put(chain);
3099 errout_block:
3100 	tcf_block_release(q, block, true);
3101 	if (err == -EAGAIN)
3102 		/* Replay the request. */
3103 		goto replay;
3104 	return err;
3105 
3106 errout_block_locked:
3107 	mutex_unlock(&block->lock);
3108 	goto errout_block;
3109 }
3110 
3111 /* called with RTNL */
3112 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3113 {
3114 	struct net *net = sock_net(skb->sk);
3115 	struct nlattr *tca[TCA_MAX + 1];
3116 	struct Qdisc *q = NULL;
3117 	struct tcf_block *block;
3118 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3119 	struct tcf_chain *chain;
3120 	long index_start;
3121 	long index;
3122 	int err;
3123 
3124 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3125 		return skb->len;
3126 
3127 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3128 				     rtm_tca_policy, cb->extack);
3129 	if (err)
3130 		return err;
3131 
3132 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3133 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3134 		if (!block)
3135 			goto out;
3136 	} else {
3137 		const struct Qdisc_class_ops *cops;
3138 		struct net_device *dev;
3139 		unsigned long cl = 0;
3140 
3141 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3142 		if (!dev)
3143 			return skb->len;
3144 
3145 		if (!tcm->tcm_parent)
3146 			q = rtnl_dereference(dev->qdisc);
3147 		else
3148 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3149 
3150 		if (!q)
3151 			goto out;
3152 		cops = q->ops->cl_ops;
3153 		if (!cops)
3154 			goto out;
3155 		if (!cops->tcf_block)
3156 			goto out;
3157 		if (TC_H_MIN(tcm->tcm_parent)) {
3158 			cl = cops->find(q, tcm->tcm_parent);
3159 			if (cl == 0)
3160 				goto out;
3161 		}
3162 		block = cops->tcf_block(q, cl, NULL);
3163 		if (!block)
3164 			goto out;
3165 		if (tcf_block_shared(block))
3166 			q = NULL;
3167 	}
3168 
3169 	index_start = cb->args[0];
3170 	index = 0;
3171 
3172 	mutex_lock(&block->lock);
3173 	list_for_each_entry(chain, &block->chain_list, list) {
3174 		if ((tca[TCA_CHAIN] &&
3175 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3176 			continue;
3177 		if (index < index_start) {
3178 			index++;
3179 			continue;
3180 		}
3181 		if (tcf_chain_held_by_acts_only(chain))
3182 			continue;
3183 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3184 					 chain->index, net, skb, block,
3185 					 NETLINK_CB(cb->skb).portid,
3186 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3187 					 RTM_NEWCHAIN, NULL);
3188 		if (err <= 0)
3189 			break;
3190 		index++;
3191 	}
3192 	mutex_unlock(&block->lock);
3193 
3194 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3195 		tcf_block_refcnt_put(block, true);
3196 	cb->args[0] = index;
3197 
3198 out:
3199 	/* If we did no progress, the error (EMSGSIZE) is real */
3200 	if (skb->len == 0 && err)
3201 		return err;
3202 	return skb->len;
3203 }
3204 
3205 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3206 		     int police, struct tcf_proto *tp, u32 handle,
3207 		     bool use_action_miss)
3208 {
3209 	int err = 0;
3210 
3211 #ifdef CONFIG_NET_CLS_ACT
3212 	exts->type = 0;
3213 	exts->nr_actions = 0;
3214 	exts->miss_cookie_node = NULL;
3215 	/* Note: we do not own yet a reference on net.
3216 	 * This reference might be taken later from tcf_exts_get_net().
3217 	 */
3218 	exts->net = net;
3219 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3220 				GFP_KERNEL);
3221 	if (!exts->actions)
3222 		return -ENOMEM;
3223 #endif
3224 
3225 	exts->action = action;
3226 	exts->police = police;
3227 
3228 	if (!use_action_miss)
3229 		return 0;
3230 
3231 	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3232 	if (err)
3233 		goto err_miss_alloc;
3234 
3235 	return 0;
3236 
3237 err_miss_alloc:
3238 	tcf_exts_destroy(exts);
3239 #ifdef CONFIG_NET_CLS_ACT
3240 	exts->actions = NULL;
3241 #endif
3242 	return err;
3243 }
3244 EXPORT_SYMBOL(tcf_exts_init_ex);
3245 
3246 void tcf_exts_destroy(struct tcf_exts *exts)
3247 {
3248 	tcf_exts_miss_cookie_base_destroy(exts);
3249 
3250 #ifdef CONFIG_NET_CLS_ACT
3251 	if (exts->actions) {
3252 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3253 		kfree(exts->actions);
3254 	}
3255 	exts->nr_actions = 0;
3256 #endif
3257 }
3258 EXPORT_SYMBOL(tcf_exts_destroy);
3259 
3260 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3261 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3262 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3263 {
3264 #ifdef CONFIG_NET_CLS_ACT
3265 	{
3266 		int init_res[TCA_ACT_MAX_PRIO] = {};
3267 		struct tc_action *act;
3268 		size_t attr_size = 0;
3269 
3270 		if (exts->police && tb[exts->police]) {
3271 			struct tc_action_ops *a_o;
3272 
3273 			a_o = tc_action_load_ops(tb[exts->police], true,
3274 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3275 						 extack);
3276 			if (IS_ERR(a_o))
3277 				return PTR_ERR(a_o);
3278 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3279 			act = tcf_action_init_1(net, tp, tb[exts->police],
3280 						rate_tlv, a_o, init_res, flags,
3281 						extack);
3282 			module_put(a_o->owner);
3283 			if (IS_ERR(act))
3284 				return PTR_ERR(act);
3285 
3286 			act->type = exts->type = TCA_OLD_COMPAT;
3287 			exts->actions[0] = act;
3288 			exts->nr_actions = 1;
3289 			tcf_idr_insert_many(exts->actions);
3290 		} else if (exts->action && tb[exts->action]) {
3291 			int err;
3292 
3293 			flags |= TCA_ACT_FLAGS_BIND;
3294 			err = tcf_action_init(net, tp, tb[exts->action],
3295 					      rate_tlv, exts->actions, init_res,
3296 					      &attr_size, flags, fl_flags,
3297 					      extack);
3298 			if (err < 0)
3299 				return err;
3300 			exts->nr_actions = err;
3301 		}
3302 	}
3303 #else
3304 	if ((exts->action && tb[exts->action]) ||
3305 	    (exts->police && tb[exts->police])) {
3306 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3307 		return -EOPNOTSUPP;
3308 	}
3309 #endif
3310 
3311 	return 0;
3312 }
3313 EXPORT_SYMBOL(tcf_exts_validate_ex);
3314 
3315 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3316 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3317 		      u32 flags, struct netlink_ext_ack *extack)
3318 {
3319 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3320 				    flags, 0, extack);
3321 }
3322 EXPORT_SYMBOL(tcf_exts_validate);
3323 
3324 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3325 {
3326 #ifdef CONFIG_NET_CLS_ACT
3327 	struct tcf_exts old = *dst;
3328 
3329 	*dst = *src;
3330 	tcf_exts_destroy(&old);
3331 #endif
3332 }
3333 EXPORT_SYMBOL(tcf_exts_change);
3334 
3335 #ifdef CONFIG_NET_CLS_ACT
3336 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3337 {
3338 	if (exts->nr_actions == 0)
3339 		return NULL;
3340 	else
3341 		return exts->actions[0];
3342 }
3343 #endif
3344 
3345 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3346 {
3347 #ifdef CONFIG_NET_CLS_ACT
3348 	struct nlattr *nest;
3349 
3350 	if (exts->action && tcf_exts_has_actions(exts)) {
3351 		/*
3352 		 * again for backward compatible mode - we want
3353 		 * to work with both old and new modes of entering
3354 		 * tc data even if iproute2  was newer - jhs
3355 		 */
3356 		if (exts->type != TCA_OLD_COMPAT) {
3357 			nest = nla_nest_start_noflag(skb, exts->action);
3358 			if (nest == NULL)
3359 				goto nla_put_failure;
3360 
3361 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3362 			    < 0)
3363 				goto nla_put_failure;
3364 			nla_nest_end(skb, nest);
3365 		} else if (exts->police) {
3366 			struct tc_action *act = tcf_exts_first_act(exts);
3367 			nest = nla_nest_start_noflag(skb, exts->police);
3368 			if (nest == NULL || !act)
3369 				goto nla_put_failure;
3370 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3371 				goto nla_put_failure;
3372 			nla_nest_end(skb, nest);
3373 		}
3374 	}
3375 	return 0;
3376 
3377 nla_put_failure:
3378 	nla_nest_cancel(skb, nest);
3379 	return -1;
3380 #else
3381 	return 0;
3382 #endif
3383 }
3384 EXPORT_SYMBOL(tcf_exts_dump);
3385 
3386 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3387 {
3388 #ifdef CONFIG_NET_CLS_ACT
3389 	struct nlattr *nest;
3390 
3391 	if (!exts->action || !tcf_exts_has_actions(exts))
3392 		return 0;
3393 
3394 	nest = nla_nest_start_noflag(skb, exts->action);
3395 	if (!nest)
3396 		goto nla_put_failure;
3397 
3398 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3399 		goto nla_put_failure;
3400 	nla_nest_end(skb, nest);
3401 	return 0;
3402 
3403 nla_put_failure:
3404 	nla_nest_cancel(skb, nest);
3405 	return -1;
3406 #else
3407 	return 0;
3408 #endif
3409 }
3410 EXPORT_SYMBOL(tcf_exts_terse_dump);
3411 
3412 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3413 {
3414 #ifdef CONFIG_NET_CLS_ACT
3415 	struct tc_action *a = tcf_exts_first_act(exts);
3416 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3417 		return -1;
3418 #endif
3419 	return 0;
3420 }
3421 EXPORT_SYMBOL(tcf_exts_dump_stats);
3422 
3423 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3424 {
3425 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3426 		return;
3427 	*flags |= TCA_CLS_FLAGS_IN_HW;
3428 	atomic_inc(&block->offloadcnt);
3429 }
3430 
3431 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3432 {
3433 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3434 		return;
3435 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3436 	atomic_dec(&block->offloadcnt);
3437 }
3438 
3439 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3440 				      struct tcf_proto *tp, u32 *cnt,
3441 				      u32 *flags, u32 diff, bool add)
3442 {
3443 	lockdep_assert_held(&block->cb_lock);
3444 
3445 	spin_lock(&tp->lock);
3446 	if (add) {
3447 		if (!*cnt)
3448 			tcf_block_offload_inc(block, flags);
3449 		*cnt += diff;
3450 	} else {
3451 		*cnt -= diff;
3452 		if (!*cnt)
3453 			tcf_block_offload_dec(block, flags);
3454 	}
3455 	spin_unlock(&tp->lock);
3456 }
3457 
3458 static void
3459 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3460 			 u32 *cnt, u32 *flags)
3461 {
3462 	lockdep_assert_held(&block->cb_lock);
3463 
3464 	spin_lock(&tp->lock);
3465 	tcf_block_offload_dec(block, flags);
3466 	*cnt = 0;
3467 	spin_unlock(&tp->lock);
3468 }
3469 
3470 static int
3471 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3472 		   void *type_data, bool err_stop)
3473 {
3474 	struct flow_block_cb *block_cb;
3475 	int ok_count = 0;
3476 	int err;
3477 
3478 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3479 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3480 		if (err) {
3481 			if (err_stop)
3482 				return err;
3483 		} else {
3484 			ok_count++;
3485 		}
3486 	}
3487 	return ok_count;
3488 }
3489 
3490 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3491 		     void *type_data, bool err_stop, bool rtnl_held)
3492 {
3493 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3494 	int ok_count;
3495 
3496 retry:
3497 	if (take_rtnl)
3498 		rtnl_lock();
3499 	down_read(&block->cb_lock);
3500 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3501 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3502 	 * obtain the locks in same order here.
3503 	 */
3504 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3505 		up_read(&block->cb_lock);
3506 		take_rtnl = true;
3507 		goto retry;
3508 	}
3509 
3510 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3511 
3512 	up_read(&block->cb_lock);
3513 	if (take_rtnl)
3514 		rtnl_unlock();
3515 	return ok_count;
3516 }
3517 EXPORT_SYMBOL(tc_setup_cb_call);
3518 
3519 /* Non-destructive filter add. If filter that wasn't already in hardware is
3520  * successfully offloaded, increment block offloads counter. On failure,
3521  * previously offloaded filter is considered to be intact and offloads counter
3522  * is not decremented.
3523  */
3524 
3525 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3526 		    enum tc_setup_type type, void *type_data, bool err_stop,
3527 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3528 {
3529 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3530 	int ok_count;
3531 
3532 retry:
3533 	if (take_rtnl)
3534 		rtnl_lock();
3535 	down_read(&block->cb_lock);
3536 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3537 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3538 	 * obtain the locks in same order here.
3539 	 */
3540 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3541 		up_read(&block->cb_lock);
3542 		take_rtnl = true;
3543 		goto retry;
3544 	}
3545 
3546 	/* Make sure all netdevs sharing this block are offload-capable. */
3547 	if (block->nooffloaddevcnt && err_stop) {
3548 		ok_count = -EOPNOTSUPP;
3549 		goto err_unlock;
3550 	}
3551 
3552 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3553 	if (ok_count < 0)
3554 		goto err_unlock;
3555 
3556 	if (tp->ops->hw_add)
3557 		tp->ops->hw_add(tp, type_data);
3558 	if (ok_count > 0)
3559 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3560 					  ok_count, true);
3561 err_unlock:
3562 	up_read(&block->cb_lock);
3563 	if (take_rtnl)
3564 		rtnl_unlock();
3565 	return min(ok_count, 0);
3566 }
3567 EXPORT_SYMBOL(tc_setup_cb_add);
3568 
3569 /* Destructive filter replace. If filter that wasn't already in hardware is
3570  * successfully offloaded, increment block offload counter. On failure,
3571  * previously offloaded filter is considered to be destroyed and offload counter
3572  * is decremented.
3573  */
3574 
3575 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3576 			enum tc_setup_type type, void *type_data, bool err_stop,
3577 			u32 *old_flags, unsigned int *old_in_hw_count,
3578 			u32 *new_flags, unsigned int *new_in_hw_count,
3579 			bool rtnl_held)
3580 {
3581 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3582 	int ok_count;
3583 
3584 retry:
3585 	if (take_rtnl)
3586 		rtnl_lock();
3587 	down_read(&block->cb_lock);
3588 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3589 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3590 	 * obtain the locks in same order here.
3591 	 */
3592 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3593 		up_read(&block->cb_lock);
3594 		take_rtnl = true;
3595 		goto retry;
3596 	}
3597 
3598 	/* Make sure all netdevs sharing this block are offload-capable. */
3599 	if (block->nooffloaddevcnt && err_stop) {
3600 		ok_count = -EOPNOTSUPP;
3601 		goto err_unlock;
3602 	}
3603 
3604 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3605 	if (tp->ops->hw_del)
3606 		tp->ops->hw_del(tp, type_data);
3607 
3608 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3609 	if (ok_count < 0)
3610 		goto err_unlock;
3611 
3612 	if (tp->ops->hw_add)
3613 		tp->ops->hw_add(tp, type_data);
3614 	if (ok_count > 0)
3615 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3616 					  new_flags, ok_count, true);
3617 err_unlock:
3618 	up_read(&block->cb_lock);
3619 	if (take_rtnl)
3620 		rtnl_unlock();
3621 	return min(ok_count, 0);
3622 }
3623 EXPORT_SYMBOL(tc_setup_cb_replace);
3624 
3625 /* Destroy filter and decrement block offload counter, if filter was previously
3626  * offloaded.
3627  */
3628 
3629 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3630 			enum tc_setup_type type, void *type_data, bool err_stop,
3631 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3632 {
3633 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3634 	int ok_count;
3635 
3636 retry:
3637 	if (take_rtnl)
3638 		rtnl_lock();
3639 	down_read(&block->cb_lock);
3640 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3641 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3642 	 * obtain the locks in same order here.
3643 	 */
3644 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3645 		up_read(&block->cb_lock);
3646 		take_rtnl = true;
3647 		goto retry;
3648 	}
3649 
3650 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3651 
3652 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3653 	if (tp->ops->hw_del)
3654 		tp->ops->hw_del(tp, type_data);
3655 
3656 	up_read(&block->cb_lock);
3657 	if (take_rtnl)
3658 		rtnl_unlock();
3659 	return min(ok_count, 0);
3660 }
3661 EXPORT_SYMBOL(tc_setup_cb_destroy);
3662 
3663 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3664 			  bool add, flow_setup_cb_t *cb,
3665 			  enum tc_setup_type type, void *type_data,
3666 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3667 {
3668 	int err = cb(type, type_data, cb_priv);
3669 
3670 	if (err) {
3671 		if (add && tc_skip_sw(*flags))
3672 			return err;
3673 	} else {
3674 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3675 					  add);
3676 	}
3677 
3678 	return 0;
3679 }
3680 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3681 
3682 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3683 				   const struct tc_action *act)
3684 {
3685 	struct tc_cookie *user_cookie;
3686 	int err = 0;
3687 
3688 	rcu_read_lock();
3689 	user_cookie = rcu_dereference(act->user_cookie);
3690 	if (user_cookie) {
3691 		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3692 							       user_cookie->len,
3693 							       GFP_ATOMIC);
3694 		if (!entry->user_cookie)
3695 			err = -ENOMEM;
3696 	}
3697 	rcu_read_unlock();
3698 	return err;
3699 }
3700 
3701 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3702 {
3703 	flow_action_cookie_destroy(entry->user_cookie);
3704 }
3705 
3706 void tc_cleanup_offload_action(struct flow_action *flow_action)
3707 {
3708 	struct flow_action_entry *entry;
3709 	int i;
3710 
3711 	flow_action_for_each(i, entry, flow_action) {
3712 		tcf_act_put_user_cookie(entry);
3713 		if (entry->destructor)
3714 			entry->destructor(entry->destructor_priv);
3715 	}
3716 }
3717 EXPORT_SYMBOL(tc_cleanup_offload_action);
3718 
3719 static int tc_setup_offload_act(struct tc_action *act,
3720 				struct flow_action_entry *entry,
3721 				u32 *index_inc,
3722 				struct netlink_ext_ack *extack)
3723 {
3724 #ifdef CONFIG_NET_CLS_ACT
3725 	if (act->ops->offload_act_setup) {
3726 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3727 						   extack);
3728 	} else {
3729 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3730 		return -EOPNOTSUPP;
3731 	}
3732 #else
3733 	return 0;
3734 #endif
3735 }
3736 
3737 int tc_setup_action(struct flow_action *flow_action,
3738 		    struct tc_action *actions[],
3739 		    u32 miss_cookie_base,
3740 		    struct netlink_ext_ack *extack)
3741 {
3742 	int i, j, k, index, err = 0;
3743 	struct tc_action *act;
3744 
3745 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3746 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3747 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3748 
3749 	if (!actions)
3750 		return 0;
3751 
3752 	j = 0;
3753 	tcf_act_for_each_action(i, act, actions) {
3754 		struct flow_action_entry *entry;
3755 
3756 		entry = &flow_action->entries[j];
3757 		spin_lock_bh(&act->tcfa_lock);
3758 		err = tcf_act_get_user_cookie(entry, act);
3759 		if (err)
3760 			goto err_out_locked;
3761 
3762 		index = 0;
3763 		err = tc_setup_offload_act(act, entry, &index, extack);
3764 		if (err)
3765 			goto err_out_locked;
3766 
3767 		for (k = 0; k < index ; k++) {
3768 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3769 			entry[k].hw_index = act->tcfa_index;
3770 			entry[k].cookie = (unsigned long)act;
3771 			entry[k].miss_cookie =
3772 				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3773 		}
3774 
3775 		j += index;
3776 
3777 		spin_unlock_bh(&act->tcfa_lock);
3778 	}
3779 
3780 err_out:
3781 	if (err)
3782 		tc_cleanup_offload_action(flow_action);
3783 
3784 	return err;
3785 err_out_locked:
3786 	spin_unlock_bh(&act->tcfa_lock);
3787 	goto err_out;
3788 }
3789 
3790 int tc_setup_offload_action(struct flow_action *flow_action,
3791 			    const struct tcf_exts *exts,
3792 			    struct netlink_ext_ack *extack)
3793 {
3794 #ifdef CONFIG_NET_CLS_ACT
3795 	u32 miss_cookie_base;
3796 
3797 	if (!exts)
3798 		return 0;
3799 
3800 	miss_cookie_base = exts->miss_cookie_node ?
3801 			   exts->miss_cookie_node->miss_cookie_base : 0;
3802 	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3803 			       extack);
3804 #else
3805 	return 0;
3806 #endif
3807 }
3808 EXPORT_SYMBOL(tc_setup_offload_action);
3809 
3810 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3811 {
3812 	unsigned int num_acts = 0;
3813 	struct tc_action *act;
3814 	int i;
3815 
3816 	tcf_exts_for_each_action(i, act, exts) {
3817 		if (is_tcf_pedit(act))
3818 			num_acts += tcf_pedit_nkeys(act);
3819 		else
3820 			num_acts++;
3821 	}
3822 	return num_acts;
3823 }
3824 EXPORT_SYMBOL(tcf_exts_num_actions);
3825 
3826 #ifdef CONFIG_NET_CLS_ACT
3827 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3828 					u32 *p_block_index,
3829 					struct netlink_ext_ack *extack)
3830 {
3831 	*p_block_index = nla_get_u32(block_index_attr);
3832 	if (!*p_block_index) {
3833 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3834 		return -EINVAL;
3835 	}
3836 
3837 	return 0;
3838 }
3839 
3840 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3841 		    enum flow_block_binder_type binder_type,
3842 		    struct nlattr *block_index_attr,
3843 		    struct netlink_ext_ack *extack)
3844 {
3845 	u32 block_index;
3846 	int err;
3847 
3848 	if (!block_index_attr)
3849 		return 0;
3850 
3851 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3852 	if (err)
3853 		return err;
3854 
3855 	qe->info.binder_type = binder_type;
3856 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3857 	qe->info.chain_head_change_priv = &qe->filter_chain;
3858 	qe->info.block_index = block_index;
3859 
3860 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3861 }
3862 EXPORT_SYMBOL(tcf_qevent_init);
3863 
3864 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3865 {
3866 	if (qe->info.block_index)
3867 		tcf_block_put_ext(qe->block, sch, &qe->info);
3868 }
3869 EXPORT_SYMBOL(tcf_qevent_destroy);
3870 
3871 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3872 			       struct netlink_ext_ack *extack)
3873 {
3874 	u32 block_index;
3875 	int err;
3876 
3877 	if (!block_index_attr)
3878 		return 0;
3879 
3880 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3881 	if (err)
3882 		return err;
3883 
3884 	/* Bounce newly-configured block or change in block. */
3885 	if (block_index != qe->info.block_index) {
3886 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3887 		return -EINVAL;
3888 	}
3889 
3890 	return 0;
3891 }
3892 EXPORT_SYMBOL(tcf_qevent_validate_change);
3893 
3894 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3895 				  struct sk_buff **to_free, int *ret)
3896 {
3897 	struct tcf_result cl_res;
3898 	struct tcf_proto *fl;
3899 
3900 	if (!qe->info.block_index)
3901 		return skb;
3902 
3903 	fl = rcu_dereference_bh(qe->filter_chain);
3904 
3905 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3906 	case TC_ACT_SHOT:
3907 		qdisc_qstats_drop(sch);
3908 		__qdisc_drop(skb, to_free);
3909 		*ret = __NET_XMIT_BYPASS;
3910 		return NULL;
3911 	case TC_ACT_STOLEN:
3912 	case TC_ACT_QUEUED:
3913 	case TC_ACT_TRAP:
3914 		__qdisc_drop(skb, to_free);
3915 		*ret = __NET_XMIT_STOLEN;
3916 		return NULL;
3917 	case TC_ACT_REDIRECT:
3918 		skb_do_redirect(skb);
3919 		*ret = __NET_XMIT_STOLEN;
3920 		return NULL;
3921 	}
3922 
3923 	return skb;
3924 }
3925 EXPORT_SYMBOL(tcf_qevent_handle);
3926 
3927 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3928 {
3929 	if (!qe->info.block_index)
3930 		return 0;
3931 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3932 }
3933 EXPORT_SYMBOL(tcf_qevent_dump);
3934 #endif
3935 
3936 static __net_init int tcf_net_init(struct net *net)
3937 {
3938 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3939 
3940 	spin_lock_init(&tn->idr_lock);
3941 	idr_init(&tn->idr);
3942 	return 0;
3943 }
3944 
3945 static void __net_exit tcf_net_exit(struct net *net)
3946 {
3947 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3948 
3949 	idr_destroy(&tn->idr);
3950 }
3951 
3952 static struct pernet_operations tcf_net_ops = {
3953 	.init = tcf_net_init,
3954 	.exit = tcf_net_exit,
3955 	.id   = &tcf_net_id,
3956 	.size = sizeof(struct tcf_net),
3957 };
3958 
3959 static int __init tc_filter_init(void)
3960 {
3961 	int err;
3962 
3963 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3964 	if (!tc_filter_wq)
3965 		return -ENOMEM;
3966 
3967 	err = register_pernet_subsys(&tcf_net_ops);
3968 	if (err)
3969 		goto err_register_pernet_subsys;
3970 
3971 	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3972 
3973 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3974 		      RTNL_FLAG_DOIT_UNLOCKED);
3975 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3976 		      RTNL_FLAG_DOIT_UNLOCKED);
3977 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3978 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3979 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3980 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3981 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3982 		      tc_dump_chain, 0);
3983 
3984 	return 0;
3985 
3986 err_register_pernet_subsys:
3987 	destroy_workqueue(tc_filter_wq);
3988 	return err;
3989 }
3990 
3991 subsys_initcall(tc_filter_init);
3992