xref: /openbmc/linux/net/sched/cls_api.c (revision 7ae5c03a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43 
44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 #ifdef CONFIG_NET_CLS_ACT
53 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
54 EXPORT_SYMBOL(tc_skb_ext_tc);
55 
56 void tc_skb_ext_tc_enable(void)
57 {
58 	static_branch_inc(&tc_skb_ext_tc);
59 }
60 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
61 
62 void tc_skb_ext_tc_disable(void)
63 {
64 	static_branch_dec(&tc_skb_ext_tc);
65 }
66 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
67 #endif
68 
69 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
70 {
71 	return jhash_3words(tp->chain->index, tp->prio,
72 			    (__force __u32)tp->protocol, 0);
73 }
74 
75 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
76 					struct tcf_proto *tp)
77 {
78 	struct tcf_block *block = chain->block;
79 
80 	mutex_lock(&block->proto_destroy_lock);
81 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
82 		     destroy_obj_hashfn(tp));
83 	mutex_unlock(&block->proto_destroy_lock);
84 }
85 
86 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
87 			  const struct tcf_proto *tp2)
88 {
89 	return tp1->chain->index == tp2->chain->index &&
90 	       tp1->prio == tp2->prio &&
91 	       tp1->protocol == tp2->protocol;
92 }
93 
94 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
95 					struct tcf_proto *tp)
96 {
97 	u32 hash = destroy_obj_hashfn(tp);
98 	struct tcf_proto *iter;
99 	bool found = false;
100 
101 	rcu_read_lock();
102 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
103 				   destroy_ht_node, hash) {
104 		if (tcf_proto_cmp(tp, iter)) {
105 			found = true;
106 			break;
107 		}
108 	}
109 	rcu_read_unlock();
110 
111 	return found;
112 }
113 
114 static void
115 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
116 {
117 	struct tcf_block *block = chain->block;
118 
119 	mutex_lock(&block->proto_destroy_lock);
120 	if (hash_hashed(&tp->destroy_ht_node))
121 		hash_del_rcu(&tp->destroy_ht_node);
122 	mutex_unlock(&block->proto_destroy_lock);
123 }
124 
125 /* Find classifier type by string name */
126 
127 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
128 {
129 	const struct tcf_proto_ops *t, *res = NULL;
130 
131 	if (kind) {
132 		read_lock(&cls_mod_lock);
133 		list_for_each_entry(t, &tcf_proto_base, head) {
134 			if (strcmp(kind, t->kind) == 0) {
135 				if (try_module_get(t->owner))
136 					res = t;
137 				break;
138 			}
139 		}
140 		read_unlock(&cls_mod_lock);
141 	}
142 	return res;
143 }
144 
145 static const struct tcf_proto_ops *
146 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
147 		     struct netlink_ext_ack *extack)
148 {
149 	const struct tcf_proto_ops *ops;
150 
151 	ops = __tcf_proto_lookup_ops(kind);
152 	if (ops)
153 		return ops;
154 #ifdef CONFIG_MODULES
155 	if (rtnl_held)
156 		rtnl_unlock();
157 	request_module("cls_%s", kind);
158 	if (rtnl_held)
159 		rtnl_lock();
160 	ops = __tcf_proto_lookup_ops(kind);
161 	/* We dropped the RTNL semaphore in order to perform
162 	 * the module load. So, even if we succeeded in loading
163 	 * the module we have to replay the request. We indicate
164 	 * this using -EAGAIN.
165 	 */
166 	if (ops) {
167 		module_put(ops->owner);
168 		return ERR_PTR(-EAGAIN);
169 	}
170 #endif
171 	NL_SET_ERR_MSG(extack, "TC classifier not found");
172 	return ERR_PTR(-ENOENT);
173 }
174 
175 /* Register(unregister) new classifier type */
176 
177 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
178 {
179 	struct tcf_proto_ops *t;
180 	int rc = -EEXIST;
181 
182 	write_lock(&cls_mod_lock);
183 	list_for_each_entry(t, &tcf_proto_base, head)
184 		if (!strcmp(ops->kind, t->kind))
185 			goto out;
186 
187 	list_add_tail(&ops->head, &tcf_proto_base);
188 	rc = 0;
189 out:
190 	write_unlock(&cls_mod_lock);
191 	return rc;
192 }
193 EXPORT_SYMBOL(register_tcf_proto_ops);
194 
195 static struct workqueue_struct *tc_filter_wq;
196 
197 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
198 {
199 	struct tcf_proto_ops *t;
200 	int rc = -ENOENT;
201 
202 	/* Wait for outstanding call_rcu()s, if any, from a
203 	 * tcf_proto_ops's destroy() handler.
204 	 */
205 	rcu_barrier();
206 	flush_workqueue(tc_filter_wq);
207 
208 	write_lock(&cls_mod_lock);
209 	list_for_each_entry(t, &tcf_proto_base, head) {
210 		if (t == ops) {
211 			list_del(&t->head);
212 			rc = 0;
213 			break;
214 		}
215 	}
216 	write_unlock(&cls_mod_lock);
217 
218 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
219 }
220 EXPORT_SYMBOL(unregister_tcf_proto_ops);
221 
222 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
223 {
224 	INIT_RCU_WORK(rwork, func);
225 	return queue_rcu_work(tc_filter_wq, rwork);
226 }
227 EXPORT_SYMBOL(tcf_queue_work);
228 
229 /* Select new prio value from the range, managed by kernel. */
230 
231 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
232 {
233 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
234 
235 	if (tp)
236 		first = tp->prio - 1;
237 
238 	return TC_H_MAJ(first);
239 }
240 
241 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
242 {
243 	if (kind)
244 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
245 	memset(name, 0, IFNAMSIZ);
246 	return false;
247 }
248 
249 static bool tcf_proto_is_unlocked(const char *kind)
250 {
251 	const struct tcf_proto_ops *ops;
252 	bool ret;
253 
254 	if (strlen(kind) == 0)
255 		return false;
256 
257 	ops = tcf_proto_lookup_ops(kind, false, NULL);
258 	/* On error return false to take rtnl lock. Proto lookup/create
259 	 * functions will perform lookup again and properly handle errors.
260 	 */
261 	if (IS_ERR(ops))
262 		return false;
263 
264 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
265 	module_put(ops->owner);
266 	return ret;
267 }
268 
269 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
270 					  u32 prio, struct tcf_chain *chain,
271 					  bool rtnl_held,
272 					  struct netlink_ext_ack *extack)
273 {
274 	struct tcf_proto *tp;
275 	int err;
276 
277 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
278 	if (!tp)
279 		return ERR_PTR(-ENOBUFS);
280 
281 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
282 	if (IS_ERR(tp->ops)) {
283 		err = PTR_ERR(tp->ops);
284 		goto errout;
285 	}
286 	tp->classify = tp->ops->classify;
287 	tp->protocol = protocol;
288 	tp->prio = prio;
289 	tp->chain = chain;
290 	spin_lock_init(&tp->lock);
291 	refcount_set(&tp->refcnt, 1);
292 
293 	err = tp->ops->init(tp);
294 	if (err) {
295 		module_put(tp->ops->owner);
296 		goto errout;
297 	}
298 	return tp;
299 
300 errout:
301 	kfree(tp);
302 	return ERR_PTR(err);
303 }
304 
305 static void tcf_proto_get(struct tcf_proto *tp)
306 {
307 	refcount_inc(&tp->refcnt);
308 }
309 
310 static void tcf_chain_put(struct tcf_chain *chain);
311 
312 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
313 			      bool sig_destroy, struct netlink_ext_ack *extack)
314 {
315 	tp->ops->destroy(tp, rtnl_held, extack);
316 	if (sig_destroy)
317 		tcf_proto_signal_destroyed(tp->chain, tp);
318 	tcf_chain_put(tp->chain);
319 	module_put(tp->ops->owner);
320 	kfree_rcu(tp, rcu);
321 }
322 
323 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
324 			  struct netlink_ext_ack *extack)
325 {
326 	if (refcount_dec_and_test(&tp->refcnt))
327 		tcf_proto_destroy(tp, rtnl_held, true, extack);
328 }
329 
330 static bool tcf_proto_check_delete(struct tcf_proto *tp)
331 {
332 	if (tp->ops->delete_empty)
333 		return tp->ops->delete_empty(tp);
334 
335 	tp->deleting = true;
336 	return tp->deleting;
337 }
338 
339 static void tcf_proto_mark_delete(struct tcf_proto *tp)
340 {
341 	spin_lock(&tp->lock);
342 	tp->deleting = true;
343 	spin_unlock(&tp->lock);
344 }
345 
346 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
347 {
348 	bool deleting;
349 
350 	spin_lock(&tp->lock);
351 	deleting = tp->deleting;
352 	spin_unlock(&tp->lock);
353 
354 	return deleting;
355 }
356 
357 #define ASSERT_BLOCK_LOCKED(block)					\
358 	lockdep_assert_held(&(block)->lock)
359 
360 struct tcf_filter_chain_list_item {
361 	struct list_head list;
362 	tcf_chain_head_change_t *chain_head_change;
363 	void *chain_head_change_priv;
364 };
365 
366 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
367 					  u32 chain_index)
368 {
369 	struct tcf_chain *chain;
370 
371 	ASSERT_BLOCK_LOCKED(block);
372 
373 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
374 	if (!chain)
375 		return NULL;
376 	list_add_tail_rcu(&chain->list, &block->chain_list);
377 	mutex_init(&chain->filter_chain_lock);
378 	chain->block = block;
379 	chain->index = chain_index;
380 	chain->refcnt = 1;
381 	if (!chain->index)
382 		block->chain0.chain = chain;
383 	return chain;
384 }
385 
386 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
387 				       struct tcf_proto *tp_head)
388 {
389 	if (item->chain_head_change)
390 		item->chain_head_change(tp_head, item->chain_head_change_priv);
391 }
392 
393 static void tcf_chain0_head_change(struct tcf_chain *chain,
394 				   struct tcf_proto *tp_head)
395 {
396 	struct tcf_filter_chain_list_item *item;
397 	struct tcf_block *block = chain->block;
398 
399 	if (chain->index)
400 		return;
401 
402 	mutex_lock(&block->lock);
403 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
404 		tcf_chain_head_change_item(item, tp_head);
405 	mutex_unlock(&block->lock);
406 }
407 
408 /* Returns true if block can be safely freed. */
409 
410 static bool tcf_chain_detach(struct tcf_chain *chain)
411 {
412 	struct tcf_block *block = chain->block;
413 
414 	ASSERT_BLOCK_LOCKED(block);
415 
416 	list_del_rcu(&chain->list);
417 	if (!chain->index)
418 		block->chain0.chain = NULL;
419 
420 	if (list_empty(&block->chain_list) &&
421 	    refcount_read(&block->refcnt) == 0)
422 		return true;
423 
424 	return false;
425 }
426 
427 static void tcf_block_destroy(struct tcf_block *block)
428 {
429 	mutex_destroy(&block->lock);
430 	mutex_destroy(&block->proto_destroy_lock);
431 	kfree_rcu(block, rcu);
432 }
433 
434 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
435 {
436 	struct tcf_block *block = chain->block;
437 
438 	mutex_destroy(&chain->filter_chain_lock);
439 	kfree_rcu(chain, rcu);
440 	if (free_block)
441 		tcf_block_destroy(block);
442 }
443 
444 static void tcf_chain_hold(struct tcf_chain *chain)
445 {
446 	ASSERT_BLOCK_LOCKED(chain->block);
447 
448 	++chain->refcnt;
449 }
450 
451 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
452 {
453 	ASSERT_BLOCK_LOCKED(chain->block);
454 
455 	/* In case all the references are action references, this
456 	 * chain should not be shown to the user.
457 	 */
458 	return chain->refcnt == chain->action_refcnt;
459 }
460 
461 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
462 					  u32 chain_index)
463 {
464 	struct tcf_chain *chain;
465 
466 	ASSERT_BLOCK_LOCKED(block);
467 
468 	list_for_each_entry(chain, &block->chain_list, list) {
469 		if (chain->index == chain_index)
470 			return chain;
471 	}
472 	return NULL;
473 }
474 
475 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
476 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
477 					      u32 chain_index)
478 {
479 	struct tcf_chain *chain;
480 
481 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
482 		if (chain->index == chain_index)
483 			return chain;
484 	}
485 	return NULL;
486 }
487 #endif
488 
489 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
490 			   u32 seq, u16 flags, int event, bool unicast);
491 
492 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
493 					 u32 chain_index, bool create,
494 					 bool by_act)
495 {
496 	struct tcf_chain *chain = NULL;
497 	bool is_first_reference;
498 
499 	mutex_lock(&block->lock);
500 	chain = tcf_chain_lookup(block, chain_index);
501 	if (chain) {
502 		tcf_chain_hold(chain);
503 	} else {
504 		if (!create)
505 			goto errout;
506 		chain = tcf_chain_create(block, chain_index);
507 		if (!chain)
508 			goto errout;
509 	}
510 
511 	if (by_act)
512 		++chain->action_refcnt;
513 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
514 	mutex_unlock(&block->lock);
515 
516 	/* Send notification only in case we got the first
517 	 * non-action reference. Until then, the chain acts only as
518 	 * a placeholder for actions pointing to it and user ought
519 	 * not know about them.
520 	 */
521 	if (is_first_reference && !by_act)
522 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
523 				RTM_NEWCHAIN, false);
524 
525 	return chain;
526 
527 errout:
528 	mutex_unlock(&block->lock);
529 	return chain;
530 }
531 
532 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
533 				       bool create)
534 {
535 	return __tcf_chain_get(block, chain_index, create, false);
536 }
537 
538 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
539 {
540 	return __tcf_chain_get(block, chain_index, true, true);
541 }
542 EXPORT_SYMBOL(tcf_chain_get_by_act);
543 
544 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
545 			       void *tmplt_priv);
546 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
547 				  void *tmplt_priv, u32 chain_index,
548 				  struct tcf_block *block, struct sk_buff *oskb,
549 				  u32 seq, u16 flags, bool unicast);
550 
551 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
552 			    bool explicitly_created)
553 {
554 	struct tcf_block *block = chain->block;
555 	const struct tcf_proto_ops *tmplt_ops;
556 	bool free_block = false;
557 	unsigned int refcnt;
558 	void *tmplt_priv;
559 
560 	mutex_lock(&block->lock);
561 	if (explicitly_created) {
562 		if (!chain->explicitly_created) {
563 			mutex_unlock(&block->lock);
564 			return;
565 		}
566 		chain->explicitly_created = false;
567 	}
568 
569 	if (by_act)
570 		chain->action_refcnt--;
571 
572 	/* tc_chain_notify_delete can't be called while holding block lock.
573 	 * However, when block is unlocked chain can be changed concurrently, so
574 	 * save these to temporary variables.
575 	 */
576 	refcnt = --chain->refcnt;
577 	tmplt_ops = chain->tmplt_ops;
578 	tmplt_priv = chain->tmplt_priv;
579 
580 	/* The last dropped non-action reference will trigger notification. */
581 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
582 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
583 				       block, NULL, 0, 0, false);
584 		/* Last reference to chain, no need to lock. */
585 		chain->flushing = false;
586 	}
587 
588 	if (refcnt == 0)
589 		free_block = tcf_chain_detach(chain);
590 	mutex_unlock(&block->lock);
591 
592 	if (refcnt == 0) {
593 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
594 		tcf_chain_destroy(chain, free_block);
595 	}
596 }
597 
598 static void tcf_chain_put(struct tcf_chain *chain)
599 {
600 	__tcf_chain_put(chain, false, false);
601 }
602 
603 void tcf_chain_put_by_act(struct tcf_chain *chain)
604 {
605 	__tcf_chain_put(chain, true, false);
606 }
607 EXPORT_SYMBOL(tcf_chain_put_by_act);
608 
609 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
610 {
611 	__tcf_chain_put(chain, false, true);
612 }
613 
614 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
615 {
616 	struct tcf_proto *tp, *tp_next;
617 
618 	mutex_lock(&chain->filter_chain_lock);
619 	tp = tcf_chain_dereference(chain->filter_chain, chain);
620 	while (tp) {
621 		tp_next = rcu_dereference_protected(tp->next, 1);
622 		tcf_proto_signal_destroying(chain, tp);
623 		tp = tp_next;
624 	}
625 	tp = tcf_chain_dereference(chain->filter_chain, chain);
626 	RCU_INIT_POINTER(chain->filter_chain, NULL);
627 	tcf_chain0_head_change(chain, NULL);
628 	chain->flushing = true;
629 	mutex_unlock(&chain->filter_chain_lock);
630 
631 	while (tp) {
632 		tp_next = rcu_dereference_protected(tp->next, 1);
633 		tcf_proto_put(tp, rtnl_held, NULL);
634 		tp = tp_next;
635 	}
636 }
637 
638 static int tcf_block_setup(struct tcf_block *block,
639 			   struct flow_block_offload *bo);
640 
641 static void tcf_block_offload_init(struct flow_block_offload *bo,
642 				   struct net_device *dev, struct Qdisc *sch,
643 				   enum flow_block_command command,
644 				   enum flow_block_binder_type binder_type,
645 				   struct flow_block *flow_block,
646 				   bool shared, struct netlink_ext_ack *extack)
647 {
648 	bo->net = dev_net(dev);
649 	bo->command = command;
650 	bo->binder_type = binder_type;
651 	bo->block = flow_block;
652 	bo->block_shared = shared;
653 	bo->extack = extack;
654 	bo->sch = sch;
655 	bo->cb_list_head = &flow_block->cb_list;
656 	INIT_LIST_HEAD(&bo->cb_list);
657 }
658 
659 static void tcf_block_unbind(struct tcf_block *block,
660 			     struct flow_block_offload *bo);
661 
662 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
663 {
664 	struct tcf_block *block = block_cb->indr.data;
665 	struct net_device *dev = block_cb->indr.dev;
666 	struct Qdisc *sch = block_cb->indr.sch;
667 	struct netlink_ext_ack extack = {};
668 	struct flow_block_offload bo = {};
669 
670 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
671 			       block_cb->indr.binder_type,
672 			       &block->flow_block, tcf_block_shared(block),
673 			       &extack);
674 	rtnl_lock();
675 	down_write(&block->cb_lock);
676 	list_del(&block_cb->driver_list);
677 	list_move(&block_cb->list, &bo.cb_list);
678 	tcf_block_unbind(block, &bo);
679 	up_write(&block->cb_lock);
680 	rtnl_unlock();
681 }
682 
683 static bool tcf_block_offload_in_use(struct tcf_block *block)
684 {
685 	return atomic_read(&block->offloadcnt);
686 }
687 
688 static int tcf_block_offload_cmd(struct tcf_block *block,
689 				 struct net_device *dev, struct Qdisc *sch,
690 				 struct tcf_block_ext_info *ei,
691 				 enum flow_block_command command,
692 				 struct netlink_ext_ack *extack)
693 {
694 	struct flow_block_offload bo = {};
695 
696 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
697 			       &block->flow_block, tcf_block_shared(block),
698 			       extack);
699 
700 	if (dev->netdev_ops->ndo_setup_tc) {
701 		int err;
702 
703 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
704 		if (err < 0) {
705 			if (err != -EOPNOTSUPP)
706 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
707 			return err;
708 		}
709 
710 		return tcf_block_setup(block, &bo);
711 	}
712 
713 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
714 				    tc_block_indr_cleanup);
715 	tcf_block_setup(block, &bo);
716 
717 	return -EOPNOTSUPP;
718 }
719 
720 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
721 				  struct tcf_block_ext_info *ei,
722 				  struct netlink_ext_ack *extack)
723 {
724 	struct net_device *dev = q->dev_queue->dev;
725 	int err;
726 
727 	down_write(&block->cb_lock);
728 
729 	/* If tc offload feature is disabled and the block we try to bind
730 	 * to already has some offloaded filters, forbid to bind.
731 	 */
732 	if (dev->netdev_ops->ndo_setup_tc &&
733 	    !tc_can_offload(dev) &&
734 	    tcf_block_offload_in_use(block)) {
735 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
736 		err = -EOPNOTSUPP;
737 		goto err_unlock;
738 	}
739 
740 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
741 	if (err == -EOPNOTSUPP)
742 		goto no_offload_dev_inc;
743 	if (err)
744 		goto err_unlock;
745 
746 	up_write(&block->cb_lock);
747 	return 0;
748 
749 no_offload_dev_inc:
750 	if (tcf_block_offload_in_use(block))
751 		goto err_unlock;
752 
753 	err = 0;
754 	block->nooffloaddevcnt++;
755 err_unlock:
756 	up_write(&block->cb_lock);
757 	return err;
758 }
759 
760 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
761 				     struct tcf_block_ext_info *ei)
762 {
763 	struct net_device *dev = q->dev_queue->dev;
764 	int err;
765 
766 	down_write(&block->cb_lock);
767 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
768 	if (err == -EOPNOTSUPP)
769 		goto no_offload_dev_dec;
770 	up_write(&block->cb_lock);
771 	return;
772 
773 no_offload_dev_dec:
774 	WARN_ON(block->nooffloaddevcnt-- == 0);
775 	up_write(&block->cb_lock);
776 }
777 
778 static int
779 tcf_chain0_head_change_cb_add(struct tcf_block *block,
780 			      struct tcf_block_ext_info *ei,
781 			      struct netlink_ext_ack *extack)
782 {
783 	struct tcf_filter_chain_list_item *item;
784 	struct tcf_chain *chain0;
785 
786 	item = kmalloc(sizeof(*item), GFP_KERNEL);
787 	if (!item) {
788 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
789 		return -ENOMEM;
790 	}
791 	item->chain_head_change = ei->chain_head_change;
792 	item->chain_head_change_priv = ei->chain_head_change_priv;
793 
794 	mutex_lock(&block->lock);
795 	chain0 = block->chain0.chain;
796 	if (chain0)
797 		tcf_chain_hold(chain0);
798 	else
799 		list_add(&item->list, &block->chain0.filter_chain_list);
800 	mutex_unlock(&block->lock);
801 
802 	if (chain0) {
803 		struct tcf_proto *tp_head;
804 
805 		mutex_lock(&chain0->filter_chain_lock);
806 
807 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
808 		if (tp_head)
809 			tcf_chain_head_change_item(item, tp_head);
810 
811 		mutex_lock(&block->lock);
812 		list_add(&item->list, &block->chain0.filter_chain_list);
813 		mutex_unlock(&block->lock);
814 
815 		mutex_unlock(&chain0->filter_chain_lock);
816 		tcf_chain_put(chain0);
817 	}
818 
819 	return 0;
820 }
821 
822 static void
823 tcf_chain0_head_change_cb_del(struct tcf_block *block,
824 			      struct tcf_block_ext_info *ei)
825 {
826 	struct tcf_filter_chain_list_item *item;
827 
828 	mutex_lock(&block->lock);
829 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
830 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
831 		    (item->chain_head_change == ei->chain_head_change &&
832 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
833 			if (block->chain0.chain)
834 				tcf_chain_head_change_item(item, NULL);
835 			list_del(&item->list);
836 			mutex_unlock(&block->lock);
837 
838 			kfree(item);
839 			return;
840 		}
841 	}
842 	mutex_unlock(&block->lock);
843 	WARN_ON(1);
844 }
845 
846 struct tcf_net {
847 	spinlock_t idr_lock; /* Protects idr */
848 	struct idr idr;
849 };
850 
851 static unsigned int tcf_net_id;
852 
853 static int tcf_block_insert(struct tcf_block *block, struct net *net,
854 			    struct netlink_ext_ack *extack)
855 {
856 	struct tcf_net *tn = net_generic(net, tcf_net_id);
857 	int err;
858 
859 	idr_preload(GFP_KERNEL);
860 	spin_lock(&tn->idr_lock);
861 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
862 			    GFP_NOWAIT);
863 	spin_unlock(&tn->idr_lock);
864 	idr_preload_end();
865 
866 	return err;
867 }
868 
869 static void tcf_block_remove(struct tcf_block *block, struct net *net)
870 {
871 	struct tcf_net *tn = net_generic(net, tcf_net_id);
872 
873 	spin_lock(&tn->idr_lock);
874 	idr_remove(&tn->idr, block->index);
875 	spin_unlock(&tn->idr_lock);
876 }
877 
878 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
879 					  u32 block_index,
880 					  struct netlink_ext_ack *extack)
881 {
882 	struct tcf_block *block;
883 
884 	block = kzalloc(sizeof(*block), GFP_KERNEL);
885 	if (!block) {
886 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
887 		return ERR_PTR(-ENOMEM);
888 	}
889 	mutex_init(&block->lock);
890 	mutex_init(&block->proto_destroy_lock);
891 	init_rwsem(&block->cb_lock);
892 	flow_block_init(&block->flow_block);
893 	INIT_LIST_HEAD(&block->chain_list);
894 	INIT_LIST_HEAD(&block->owner_list);
895 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
896 
897 	refcount_set(&block->refcnt, 1);
898 	block->net = net;
899 	block->index = block_index;
900 
901 	/* Don't store q pointer for blocks which are shared */
902 	if (!tcf_block_shared(block))
903 		block->q = q;
904 	return block;
905 }
906 
907 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
908 {
909 	struct tcf_net *tn = net_generic(net, tcf_net_id);
910 
911 	return idr_find(&tn->idr, block_index);
912 }
913 
914 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
915 {
916 	struct tcf_block *block;
917 
918 	rcu_read_lock();
919 	block = tcf_block_lookup(net, block_index);
920 	if (block && !refcount_inc_not_zero(&block->refcnt))
921 		block = NULL;
922 	rcu_read_unlock();
923 
924 	return block;
925 }
926 
927 static struct tcf_chain *
928 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
929 {
930 	mutex_lock(&block->lock);
931 	if (chain)
932 		chain = list_is_last(&chain->list, &block->chain_list) ?
933 			NULL : list_next_entry(chain, list);
934 	else
935 		chain = list_first_entry_or_null(&block->chain_list,
936 						 struct tcf_chain, list);
937 
938 	/* skip all action-only chains */
939 	while (chain && tcf_chain_held_by_acts_only(chain))
940 		chain = list_is_last(&chain->list, &block->chain_list) ?
941 			NULL : list_next_entry(chain, list);
942 
943 	if (chain)
944 		tcf_chain_hold(chain);
945 	mutex_unlock(&block->lock);
946 
947 	return chain;
948 }
949 
950 /* Function to be used by all clients that want to iterate over all chains on
951  * block. It properly obtains block->lock and takes reference to chain before
952  * returning it. Users of this function must be tolerant to concurrent chain
953  * insertion/deletion or ensure that no concurrent chain modification is
954  * possible. Note that all netlink dump callbacks cannot guarantee to provide
955  * consistent dump because rtnl lock is released each time skb is filled with
956  * data and sent to user-space.
957  */
958 
959 struct tcf_chain *
960 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
961 {
962 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
963 
964 	if (chain)
965 		tcf_chain_put(chain);
966 
967 	return chain_next;
968 }
969 EXPORT_SYMBOL(tcf_get_next_chain);
970 
971 static struct tcf_proto *
972 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
973 {
974 	u32 prio = 0;
975 
976 	ASSERT_RTNL();
977 	mutex_lock(&chain->filter_chain_lock);
978 
979 	if (!tp) {
980 		tp = tcf_chain_dereference(chain->filter_chain, chain);
981 	} else if (tcf_proto_is_deleting(tp)) {
982 		/* 'deleting' flag is set and chain->filter_chain_lock was
983 		 * unlocked, which means next pointer could be invalid. Restart
984 		 * search.
985 		 */
986 		prio = tp->prio + 1;
987 		tp = tcf_chain_dereference(chain->filter_chain, chain);
988 
989 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
990 			if (!tp->deleting && tp->prio >= prio)
991 				break;
992 	} else {
993 		tp = tcf_chain_dereference(tp->next, chain);
994 	}
995 
996 	if (tp)
997 		tcf_proto_get(tp);
998 
999 	mutex_unlock(&chain->filter_chain_lock);
1000 
1001 	return tp;
1002 }
1003 
1004 /* Function to be used by all clients that want to iterate over all tp's on
1005  * chain. Users of this function must be tolerant to concurrent tp
1006  * insertion/deletion or ensure that no concurrent chain modification is
1007  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1008  * consistent dump because rtnl lock is released each time skb is filled with
1009  * data and sent to user-space.
1010  */
1011 
1012 struct tcf_proto *
1013 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1014 {
1015 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1016 
1017 	if (tp)
1018 		tcf_proto_put(tp, true, NULL);
1019 
1020 	return tp_next;
1021 }
1022 EXPORT_SYMBOL(tcf_get_next_proto);
1023 
1024 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1025 {
1026 	struct tcf_chain *chain;
1027 
1028 	/* Last reference to block. At this point chains cannot be added or
1029 	 * removed concurrently.
1030 	 */
1031 	for (chain = tcf_get_next_chain(block, NULL);
1032 	     chain;
1033 	     chain = tcf_get_next_chain(block, chain)) {
1034 		tcf_chain_put_explicitly_created(chain);
1035 		tcf_chain_flush(chain, rtnl_held);
1036 	}
1037 }
1038 
1039 /* Lookup Qdisc and increments its reference counter.
1040  * Set parent, if necessary.
1041  */
1042 
1043 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1044 			    u32 *parent, int ifindex, bool rtnl_held,
1045 			    struct netlink_ext_ack *extack)
1046 {
1047 	const struct Qdisc_class_ops *cops;
1048 	struct net_device *dev;
1049 	int err = 0;
1050 
1051 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1052 		return 0;
1053 
1054 	rcu_read_lock();
1055 
1056 	/* Find link */
1057 	dev = dev_get_by_index_rcu(net, ifindex);
1058 	if (!dev) {
1059 		rcu_read_unlock();
1060 		return -ENODEV;
1061 	}
1062 
1063 	/* Find qdisc */
1064 	if (!*parent) {
1065 		*q = rcu_dereference(dev->qdisc);
1066 		*parent = (*q)->handle;
1067 	} else {
1068 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1069 		if (!*q) {
1070 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1071 			err = -EINVAL;
1072 			goto errout_rcu;
1073 		}
1074 	}
1075 
1076 	*q = qdisc_refcount_inc_nz(*q);
1077 	if (!*q) {
1078 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1079 		err = -EINVAL;
1080 		goto errout_rcu;
1081 	}
1082 
1083 	/* Is it classful? */
1084 	cops = (*q)->ops->cl_ops;
1085 	if (!cops) {
1086 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1087 		err = -EINVAL;
1088 		goto errout_qdisc;
1089 	}
1090 
1091 	if (!cops->tcf_block) {
1092 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1093 		err = -EOPNOTSUPP;
1094 		goto errout_qdisc;
1095 	}
1096 
1097 errout_rcu:
1098 	/* At this point we know that qdisc is not noop_qdisc,
1099 	 * which means that qdisc holds a reference to net_device
1100 	 * and we hold a reference to qdisc, so it is safe to release
1101 	 * rcu read lock.
1102 	 */
1103 	rcu_read_unlock();
1104 	return err;
1105 
1106 errout_qdisc:
1107 	rcu_read_unlock();
1108 
1109 	if (rtnl_held)
1110 		qdisc_put(*q);
1111 	else
1112 		qdisc_put_unlocked(*q);
1113 	*q = NULL;
1114 
1115 	return err;
1116 }
1117 
1118 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1119 			       int ifindex, struct netlink_ext_ack *extack)
1120 {
1121 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1122 		return 0;
1123 
1124 	/* Do we search for filter, attached to class? */
1125 	if (TC_H_MIN(parent)) {
1126 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1127 
1128 		*cl = cops->find(q, parent);
1129 		if (*cl == 0) {
1130 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1131 			return -ENOENT;
1132 		}
1133 	}
1134 
1135 	return 0;
1136 }
1137 
1138 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1139 					  unsigned long cl, int ifindex,
1140 					  u32 block_index,
1141 					  struct netlink_ext_ack *extack)
1142 {
1143 	struct tcf_block *block;
1144 
1145 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1146 		block = tcf_block_refcnt_get(net, block_index);
1147 		if (!block) {
1148 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1149 			return ERR_PTR(-EINVAL);
1150 		}
1151 	} else {
1152 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1153 
1154 		block = cops->tcf_block(q, cl, extack);
1155 		if (!block)
1156 			return ERR_PTR(-EINVAL);
1157 
1158 		if (tcf_block_shared(block)) {
1159 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1160 			return ERR_PTR(-EOPNOTSUPP);
1161 		}
1162 
1163 		/* Always take reference to block in order to support execution
1164 		 * of rules update path of cls API without rtnl lock. Caller
1165 		 * must release block when it is finished using it. 'if' block
1166 		 * of this conditional obtain reference to block by calling
1167 		 * tcf_block_refcnt_get().
1168 		 */
1169 		refcount_inc(&block->refcnt);
1170 	}
1171 
1172 	return block;
1173 }
1174 
1175 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1176 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1177 {
1178 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1179 		/* Flushing/putting all chains will cause the block to be
1180 		 * deallocated when last chain is freed. However, if chain_list
1181 		 * is empty, block has to be manually deallocated. After block
1182 		 * reference counter reached 0, it is no longer possible to
1183 		 * increment it or add new chains to block.
1184 		 */
1185 		bool free_block = list_empty(&block->chain_list);
1186 
1187 		mutex_unlock(&block->lock);
1188 		if (tcf_block_shared(block))
1189 			tcf_block_remove(block, block->net);
1190 
1191 		if (q)
1192 			tcf_block_offload_unbind(block, q, ei);
1193 
1194 		if (free_block)
1195 			tcf_block_destroy(block);
1196 		else
1197 			tcf_block_flush_all_chains(block, rtnl_held);
1198 	} else if (q) {
1199 		tcf_block_offload_unbind(block, q, ei);
1200 	}
1201 }
1202 
1203 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1204 {
1205 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1206 }
1207 
1208 /* Find tcf block.
1209  * Set q, parent, cl when appropriate.
1210  */
1211 
1212 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1213 					u32 *parent, unsigned long *cl,
1214 					int ifindex, u32 block_index,
1215 					struct netlink_ext_ack *extack)
1216 {
1217 	struct tcf_block *block;
1218 	int err = 0;
1219 
1220 	ASSERT_RTNL();
1221 
1222 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1223 	if (err)
1224 		goto errout;
1225 
1226 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1227 	if (err)
1228 		goto errout_qdisc;
1229 
1230 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1231 	if (IS_ERR(block)) {
1232 		err = PTR_ERR(block);
1233 		goto errout_qdisc;
1234 	}
1235 
1236 	return block;
1237 
1238 errout_qdisc:
1239 	if (*q)
1240 		qdisc_put(*q);
1241 errout:
1242 	*q = NULL;
1243 	return ERR_PTR(err);
1244 }
1245 
1246 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1247 			      bool rtnl_held)
1248 {
1249 	if (!IS_ERR_OR_NULL(block))
1250 		tcf_block_refcnt_put(block, rtnl_held);
1251 
1252 	if (q) {
1253 		if (rtnl_held)
1254 			qdisc_put(q);
1255 		else
1256 			qdisc_put_unlocked(q);
1257 	}
1258 }
1259 
1260 struct tcf_block_owner_item {
1261 	struct list_head list;
1262 	struct Qdisc *q;
1263 	enum flow_block_binder_type binder_type;
1264 };
1265 
1266 static void
1267 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1268 			       struct Qdisc *q,
1269 			       enum flow_block_binder_type binder_type)
1270 {
1271 	if (block->keep_dst &&
1272 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1273 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1274 		netif_keep_dst(qdisc_dev(q));
1275 }
1276 
1277 void tcf_block_netif_keep_dst(struct tcf_block *block)
1278 {
1279 	struct tcf_block_owner_item *item;
1280 
1281 	block->keep_dst = true;
1282 	list_for_each_entry(item, &block->owner_list, list)
1283 		tcf_block_owner_netif_keep_dst(block, item->q,
1284 					       item->binder_type);
1285 }
1286 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1287 
1288 static int tcf_block_owner_add(struct tcf_block *block,
1289 			       struct Qdisc *q,
1290 			       enum flow_block_binder_type binder_type)
1291 {
1292 	struct tcf_block_owner_item *item;
1293 
1294 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1295 	if (!item)
1296 		return -ENOMEM;
1297 	item->q = q;
1298 	item->binder_type = binder_type;
1299 	list_add(&item->list, &block->owner_list);
1300 	return 0;
1301 }
1302 
1303 static void tcf_block_owner_del(struct tcf_block *block,
1304 				struct Qdisc *q,
1305 				enum flow_block_binder_type binder_type)
1306 {
1307 	struct tcf_block_owner_item *item;
1308 
1309 	list_for_each_entry(item, &block->owner_list, list) {
1310 		if (item->q == q && item->binder_type == binder_type) {
1311 			list_del(&item->list);
1312 			kfree(item);
1313 			return;
1314 		}
1315 	}
1316 	WARN_ON(1);
1317 }
1318 
1319 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1320 		      struct tcf_block_ext_info *ei,
1321 		      struct netlink_ext_ack *extack)
1322 {
1323 	struct net *net = qdisc_net(q);
1324 	struct tcf_block *block = NULL;
1325 	int err;
1326 
1327 	if (ei->block_index)
1328 		/* block_index not 0 means the shared block is requested */
1329 		block = tcf_block_refcnt_get(net, ei->block_index);
1330 
1331 	if (!block) {
1332 		block = tcf_block_create(net, q, ei->block_index, extack);
1333 		if (IS_ERR(block))
1334 			return PTR_ERR(block);
1335 		if (tcf_block_shared(block)) {
1336 			err = tcf_block_insert(block, net, extack);
1337 			if (err)
1338 				goto err_block_insert;
1339 		}
1340 	}
1341 
1342 	err = tcf_block_owner_add(block, q, ei->binder_type);
1343 	if (err)
1344 		goto err_block_owner_add;
1345 
1346 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1347 
1348 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1349 	if (err)
1350 		goto err_chain0_head_change_cb_add;
1351 
1352 	err = tcf_block_offload_bind(block, q, ei, extack);
1353 	if (err)
1354 		goto err_block_offload_bind;
1355 
1356 	*p_block = block;
1357 	return 0;
1358 
1359 err_block_offload_bind:
1360 	tcf_chain0_head_change_cb_del(block, ei);
1361 err_chain0_head_change_cb_add:
1362 	tcf_block_owner_del(block, q, ei->binder_type);
1363 err_block_owner_add:
1364 err_block_insert:
1365 	tcf_block_refcnt_put(block, true);
1366 	return err;
1367 }
1368 EXPORT_SYMBOL(tcf_block_get_ext);
1369 
1370 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1371 {
1372 	struct tcf_proto __rcu **p_filter_chain = priv;
1373 
1374 	rcu_assign_pointer(*p_filter_chain, tp_head);
1375 }
1376 
1377 int tcf_block_get(struct tcf_block **p_block,
1378 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1379 		  struct netlink_ext_ack *extack)
1380 {
1381 	struct tcf_block_ext_info ei = {
1382 		.chain_head_change = tcf_chain_head_change_dflt,
1383 		.chain_head_change_priv = p_filter_chain,
1384 	};
1385 
1386 	WARN_ON(!p_filter_chain);
1387 	return tcf_block_get_ext(p_block, q, &ei, extack);
1388 }
1389 EXPORT_SYMBOL(tcf_block_get);
1390 
1391 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1392  * actions should be all removed after flushing.
1393  */
1394 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1395 		       struct tcf_block_ext_info *ei)
1396 {
1397 	if (!block)
1398 		return;
1399 	tcf_chain0_head_change_cb_del(block, ei);
1400 	tcf_block_owner_del(block, q, ei->binder_type);
1401 
1402 	__tcf_block_put(block, q, ei, true);
1403 }
1404 EXPORT_SYMBOL(tcf_block_put_ext);
1405 
1406 void tcf_block_put(struct tcf_block *block)
1407 {
1408 	struct tcf_block_ext_info ei = {0, };
1409 
1410 	if (!block)
1411 		return;
1412 	tcf_block_put_ext(block, block->q, &ei);
1413 }
1414 
1415 EXPORT_SYMBOL(tcf_block_put);
1416 
1417 static int
1418 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1419 			    void *cb_priv, bool add, bool offload_in_use,
1420 			    struct netlink_ext_ack *extack)
1421 {
1422 	struct tcf_chain *chain, *chain_prev;
1423 	struct tcf_proto *tp, *tp_prev;
1424 	int err;
1425 
1426 	lockdep_assert_held(&block->cb_lock);
1427 
1428 	for (chain = __tcf_get_next_chain(block, NULL);
1429 	     chain;
1430 	     chain_prev = chain,
1431 		     chain = __tcf_get_next_chain(block, chain),
1432 		     tcf_chain_put(chain_prev)) {
1433 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1434 		     tp_prev = tp,
1435 			     tp = __tcf_get_next_proto(chain, tp),
1436 			     tcf_proto_put(tp_prev, true, NULL)) {
1437 			if (tp->ops->reoffload) {
1438 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1439 							 extack);
1440 				if (err && add)
1441 					goto err_playback_remove;
1442 			} else if (add && offload_in_use) {
1443 				err = -EOPNOTSUPP;
1444 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1445 				goto err_playback_remove;
1446 			}
1447 		}
1448 	}
1449 
1450 	return 0;
1451 
1452 err_playback_remove:
1453 	tcf_proto_put(tp, true, NULL);
1454 	tcf_chain_put(chain);
1455 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1456 				    extack);
1457 	return err;
1458 }
1459 
1460 static int tcf_block_bind(struct tcf_block *block,
1461 			  struct flow_block_offload *bo)
1462 {
1463 	struct flow_block_cb *block_cb, *next;
1464 	int err, i = 0;
1465 
1466 	lockdep_assert_held(&block->cb_lock);
1467 
1468 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1469 		err = tcf_block_playback_offloads(block, block_cb->cb,
1470 						  block_cb->cb_priv, true,
1471 						  tcf_block_offload_in_use(block),
1472 						  bo->extack);
1473 		if (err)
1474 			goto err_unroll;
1475 		if (!bo->unlocked_driver_cb)
1476 			block->lockeddevcnt++;
1477 
1478 		i++;
1479 	}
1480 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1481 
1482 	return 0;
1483 
1484 err_unroll:
1485 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1486 		if (i-- > 0) {
1487 			list_del(&block_cb->list);
1488 			tcf_block_playback_offloads(block, block_cb->cb,
1489 						    block_cb->cb_priv, false,
1490 						    tcf_block_offload_in_use(block),
1491 						    NULL);
1492 			if (!bo->unlocked_driver_cb)
1493 				block->lockeddevcnt--;
1494 		}
1495 		flow_block_cb_free(block_cb);
1496 	}
1497 
1498 	return err;
1499 }
1500 
1501 static void tcf_block_unbind(struct tcf_block *block,
1502 			     struct flow_block_offload *bo)
1503 {
1504 	struct flow_block_cb *block_cb, *next;
1505 
1506 	lockdep_assert_held(&block->cb_lock);
1507 
1508 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1509 		tcf_block_playback_offloads(block, block_cb->cb,
1510 					    block_cb->cb_priv, false,
1511 					    tcf_block_offload_in_use(block),
1512 					    NULL);
1513 		list_del(&block_cb->list);
1514 		flow_block_cb_free(block_cb);
1515 		if (!bo->unlocked_driver_cb)
1516 			block->lockeddevcnt--;
1517 	}
1518 }
1519 
1520 static int tcf_block_setup(struct tcf_block *block,
1521 			   struct flow_block_offload *bo)
1522 {
1523 	int err;
1524 
1525 	switch (bo->command) {
1526 	case FLOW_BLOCK_BIND:
1527 		err = tcf_block_bind(block, bo);
1528 		break;
1529 	case FLOW_BLOCK_UNBIND:
1530 		err = 0;
1531 		tcf_block_unbind(block, bo);
1532 		break;
1533 	default:
1534 		WARN_ON_ONCE(1);
1535 		err = -EOPNOTSUPP;
1536 	}
1537 
1538 	return err;
1539 }
1540 
1541 /* Main classifier routine: scans classifier chain attached
1542  * to this qdisc, (optionally) tests for protocol and asks
1543  * specific classifiers.
1544  */
1545 static inline int __tcf_classify(struct sk_buff *skb,
1546 				 const struct tcf_proto *tp,
1547 				 const struct tcf_proto *orig_tp,
1548 				 struct tcf_result *res,
1549 				 bool compat_mode,
1550 				 u32 *last_executed_chain)
1551 {
1552 #ifdef CONFIG_NET_CLS_ACT
1553 	const int max_reclassify_loop = 16;
1554 	const struct tcf_proto *first_tp;
1555 	int limit = 0;
1556 
1557 reclassify:
1558 #endif
1559 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1560 		__be16 protocol = skb_protocol(skb, false);
1561 		int err;
1562 
1563 		if (tp->protocol != protocol &&
1564 		    tp->protocol != htons(ETH_P_ALL))
1565 			continue;
1566 
1567 		err = tp->classify(skb, tp, res);
1568 #ifdef CONFIG_NET_CLS_ACT
1569 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1570 			first_tp = orig_tp;
1571 			*last_executed_chain = first_tp->chain->index;
1572 			goto reset;
1573 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1574 			first_tp = res->goto_tp;
1575 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1576 			goto reset;
1577 		}
1578 #endif
1579 		if (err >= 0)
1580 			return err;
1581 	}
1582 
1583 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1584 #ifdef CONFIG_NET_CLS_ACT
1585 reset:
1586 	if (unlikely(limit++ >= max_reclassify_loop)) {
1587 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1588 				       tp->chain->block->index,
1589 				       tp->prio & 0xffff,
1590 				       ntohs(tp->protocol));
1591 		return TC_ACT_SHOT;
1592 	}
1593 
1594 	tp = first_tp;
1595 	goto reclassify;
1596 #endif
1597 }
1598 
1599 int tcf_classify(struct sk_buff *skb,
1600 		 const struct tcf_block *block,
1601 		 const struct tcf_proto *tp,
1602 		 struct tcf_result *res, bool compat_mode)
1603 {
1604 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1605 	u32 last_executed_chain = 0;
1606 
1607 	return __tcf_classify(skb, tp, tp, res, compat_mode,
1608 			      &last_executed_chain);
1609 #else
1610 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1611 	const struct tcf_proto *orig_tp = tp;
1612 	struct tc_skb_ext *ext;
1613 	int ret;
1614 
1615 	if (block) {
1616 		ext = skb_ext_find(skb, TC_SKB_EXT);
1617 
1618 		if (ext && ext->chain) {
1619 			struct tcf_chain *fchain;
1620 
1621 			fchain = tcf_chain_lookup_rcu(block, ext->chain);
1622 			if (!fchain)
1623 				return TC_ACT_SHOT;
1624 
1625 			/* Consume, so cloned/redirect skbs won't inherit ext */
1626 			skb_ext_del(skb, TC_SKB_EXT);
1627 
1628 			tp = rcu_dereference_bh(fchain->filter_chain);
1629 			last_executed_chain = fchain->index;
1630 		}
1631 	}
1632 
1633 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1634 			     &last_executed_chain);
1635 
1636 	if (tc_skb_ext_tc_enabled()) {
1637 		/* If we missed on some chain */
1638 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1639 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1640 
1641 			ext = tc_skb_ext_alloc(skb);
1642 			if (WARN_ON_ONCE(!ext))
1643 				return TC_ACT_SHOT;
1644 			ext->chain = last_executed_chain;
1645 			ext->mru = cb->mru;
1646 			ext->post_ct = cb->post_ct;
1647 			ext->post_ct_snat = cb->post_ct_snat;
1648 			ext->post_ct_dnat = cb->post_ct_dnat;
1649 			ext->zone = cb->zone;
1650 		}
1651 	}
1652 
1653 	return ret;
1654 #endif
1655 }
1656 EXPORT_SYMBOL(tcf_classify);
1657 
1658 struct tcf_chain_info {
1659 	struct tcf_proto __rcu **pprev;
1660 	struct tcf_proto __rcu *next;
1661 };
1662 
1663 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1664 					   struct tcf_chain_info *chain_info)
1665 {
1666 	return tcf_chain_dereference(*chain_info->pprev, chain);
1667 }
1668 
1669 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1670 			       struct tcf_chain_info *chain_info,
1671 			       struct tcf_proto *tp)
1672 {
1673 	if (chain->flushing)
1674 		return -EAGAIN;
1675 
1676 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1677 	if (*chain_info->pprev == chain->filter_chain)
1678 		tcf_chain0_head_change(chain, tp);
1679 	tcf_proto_get(tp);
1680 	rcu_assign_pointer(*chain_info->pprev, tp);
1681 
1682 	return 0;
1683 }
1684 
1685 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1686 				struct tcf_chain_info *chain_info,
1687 				struct tcf_proto *tp)
1688 {
1689 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1690 
1691 	tcf_proto_mark_delete(tp);
1692 	if (tp == chain->filter_chain)
1693 		tcf_chain0_head_change(chain, next);
1694 	RCU_INIT_POINTER(*chain_info->pprev, next);
1695 }
1696 
1697 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1698 					   struct tcf_chain_info *chain_info,
1699 					   u32 protocol, u32 prio,
1700 					   bool prio_allocate);
1701 
1702 /* Try to insert new proto.
1703  * If proto with specified priority already exists, free new proto
1704  * and return existing one.
1705  */
1706 
1707 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1708 						    struct tcf_proto *tp_new,
1709 						    u32 protocol, u32 prio,
1710 						    bool rtnl_held)
1711 {
1712 	struct tcf_chain_info chain_info;
1713 	struct tcf_proto *tp;
1714 	int err = 0;
1715 
1716 	mutex_lock(&chain->filter_chain_lock);
1717 
1718 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1719 		mutex_unlock(&chain->filter_chain_lock);
1720 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1721 		return ERR_PTR(-EAGAIN);
1722 	}
1723 
1724 	tp = tcf_chain_tp_find(chain, &chain_info,
1725 			       protocol, prio, false);
1726 	if (!tp)
1727 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1728 	mutex_unlock(&chain->filter_chain_lock);
1729 
1730 	if (tp) {
1731 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1732 		tp_new = tp;
1733 	} else if (err) {
1734 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1735 		tp_new = ERR_PTR(err);
1736 	}
1737 
1738 	return tp_new;
1739 }
1740 
1741 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1742 				      struct tcf_proto *tp, bool rtnl_held,
1743 				      struct netlink_ext_ack *extack)
1744 {
1745 	struct tcf_chain_info chain_info;
1746 	struct tcf_proto *tp_iter;
1747 	struct tcf_proto **pprev;
1748 	struct tcf_proto *next;
1749 
1750 	mutex_lock(&chain->filter_chain_lock);
1751 
1752 	/* Atomically find and remove tp from chain. */
1753 	for (pprev = &chain->filter_chain;
1754 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1755 	     pprev = &tp_iter->next) {
1756 		if (tp_iter == tp) {
1757 			chain_info.pprev = pprev;
1758 			chain_info.next = tp_iter->next;
1759 			WARN_ON(tp_iter->deleting);
1760 			break;
1761 		}
1762 	}
1763 	/* Verify that tp still exists and no new filters were inserted
1764 	 * concurrently.
1765 	 * Mark tp for deletion if it is empty.
1766 	 */
1767 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1768 		mutex_unlock(&chain->filter_chain_lock);
1769 		return;
1770 	}
1771 
1772 	tcf_proto_signal_destroying(chain, tp);
1773 	next = tcf_chain_dereference(chain_info.next, chain);
1774 	if (tp == chain->filter_chain)
1775 		tcf_chain0_head_change(chain, next);
1776 	RCU_INIT_POINTER(*chain_info.pprev, next);
1777 	mutex_unlock(&chain->filter_chain_lock);
1778 
1779 	tcf_proto_put(tp, rtnl_held, extack);
1780 }
1781 
1782 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1783 					   struct tcf_chain_info *chain_info,
1784 					   u32 protocol, u32 prio,
1785 					   bool prio_allocate)
1786 {
1787 	struct tcf_proto **pprev;
1788 	struct tcf_proto *tp;
1789 
1790 	/* Check the chain for existence of proto-tcf with this priority */
1791 	for (pprev = &chain->filter_chain;
1792 	     (tp = tcf_chain_dereference(*pprev, chain));
1793 	     pprev = &tp->next) {
1794 		if (tp->prio >= prio) {
1795 			if (tp->prio == prio) {
1796 				if (prio_allocate ||
1797 				    (tp->protocol != protocol && protocol))
1798 					return ERR_PTR(-EINVAL);
1799 			} else {
1800 				tp = NULL;
1801 			}
1802 			break;
1803 		}
1804 	}
1805 	chain_info->pprev = pprev;
1806 	if (tp) {
1807 		chain_info->next = tp->next;
1808 		tcf_proto_get(tp);
1809 	} else {
1810 		chain_info->next = NULL;
1811 	}
1812 	return tp;
1813 }
1814 
1815 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1816 			 struct tcf_proto *tp, struct tcf_block *block,
1817 			 struct Qdisc *q, u32 parent, void *fh,
1818 			 u32 portid, u32 seq, u16 flags, int event,
1819 			 bool terse_dump, bool rtnl_held)
1820 {
1821 	struct tcmsg *tcm;
1822 	struct nlmsghdr  *nlh;
1823 	unsigned char *b = skb_tail_pointer(skb);
1824 
1825 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1826 	if (!nlh)
1827 		goto out_nlmsg_trim;
1828 	tcm = nlmsg_data(nlh);
1829 	tcm->tcm_family = AF_UNSPEC;
1830 	tcm->tcm__pad1 = 0;
1831 	tcm->tcm__pad2 = 0;
1832 	if (q) {
1833 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1834 		tcm->tcm_parent = parent;
1835 	} else {
1836 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1837 		tcm->tcm_block_index = block->index;
1838 	}
1839 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1840 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1841 		goto nla_put_failure;
1842 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1843 		goto nla_put_failure;
1844 	if (!fh) {
1845 		tcm->tcm_handle = 0;
1846 	} else if (terse_dump) {
1847 		if (tp->ops->terse_dump) {
1848 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1849 						rtnl_held) < 0)
1850 				goto nla_put_failure;
1851 		} else {
1852 			goto cls_op_not_supp;
1853 		}
1854 	} else {
1855 		if (tp->ops->dump &&
1856 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1857 			goto nla_put_failure;
1858 	}
1859 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1860 	return skb->len;
1861 
1862 out_nlmsg_trim:
1863 nla_put_failure:
1864 cls_op_not_supp:
1865 	nlmsg_trim(skb, b);
1866 	return -1;
1867 }
1868 
1869 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1870 			  struct nlmsghdr *n, struct tcf_proto *tp,
1871 			  struct tcf_block *block, struct Qdisc *q,
1872 			  u32 parent, void *fh, int event, bool unicast,
1873 			  bool rtnl_held)
1874 {
1875 	struct sk_buff *skb;
1876 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1877 	int err = 0;
1878 
1879 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1880 	if (!skb)
1881 		return -ENOBUFS;
1882 
1883 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1884 			  n->nlmsg_seq, n->nlmsg_flags, event,
1885 			  false, rtnl_held) <= 0) {
1886 		kfree_skb(skb);
1887 		return -EINVAL;
1888 	}
1889 
1890 	if (unicast)
1891 		err = rtnl_unicast(skb, net, portid);
1892 	else
1893 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1894 				     n->nlmsg_flags & NLM_F_ECHO);
1895 	return err;
1896 }
1897 
1898 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1899 			      struct nlmsghdr *n, struct tcf_proto *tp,
1900 			      struct tcf_block *block, struct Qdisc *q,
1901 			      u32 parent, void *fh, bool unicast, bool *last,
1902 			      bool rtnl_held, struct netlink_ext_ack *extack)
1903 {
1904 	struct sk_buff *skb;
1905 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1906 	int err;
1907 
1908 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1909 	if (!skb)
1910 		return -ENOBUFS;
1911 
1912 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1913 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1914 			  false, rtnl_held) <= 0) {
1915 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1916 		kfree_skb(skb);
1917 		return -EINVAL;
1918 	}
1919 
1920 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1921 	if (err) {
1922 		kfree_skb(skb);
1923 		return err;
1924 	}
1925 
1926 	if (unicast)
1927 		err = rtnl_unicast(skb, net, portid);
1928 	else
1929 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1930 				     n->nlmsg_flags & NLM_F_ECHO);
1931 	if (err < 0)
1932 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1933 
1934 	return err;
1935 }
1936 
1937 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1938 				 struct tcf_block *block, struct Qdisc *q,
1939 				 u32 parent, struct nlmsghdr *n,
1940 				 struct tcf_chain *chain, int event)
1941 {
1942 	struct tcf_proto *tp;
1943 
1944 	for (tp = tcf_get_next_proto(chain, NULL);
1945 	     tp; tp = tcf_get_next_proto(chain, tp))
1946 		tfilter_notify(net, oskb, n, tp, block,
1947 			       q, parent, NULL, event, false, true);
1948 }
1949 
1950 static void tfilter_put(struct tcf_proto *tp, void *fh)
1951 {
1952 	if (tp->ops->put && fh)
1953 		tp->ops->put(tp, fh);
1954 }
1955 
1956 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1957 			  struct netlink_ext_ack *extack)
1958 {
1959 	struct net *net = sock_net(skb->sk);
1960 	struct nlattr *tca[TCA_MAX + 1];
1961 	char name[IFNAMSIZ];
1962 	struct tcmsg *t;
1963 	u32 protocol;
1964 	u32 prio;
1965 	bool prio_allocate;
1966 	u32 parent;
1967 	u32 chain_index;
1968 	struct Qdisc *q;
1969 	struct tcf_chain_info chain_info;
1970 	struct tcf_chain *chain;
1971 	struct tcf_block *block;
1972 	struct tcf_proto *tp;
1973 	unsigned long cl;
1974 	void *fh;
1975 	int err;
1976 	int tp_created;
1977 	bool rtnl_held = false;
1978 	u32 flags;
1979 
1980 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1981 		return -EPERM;
1982 
1983 replay:
1984 	tp_created = 0;
1985 
1986 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1987 				     rtm_tca_policy, extack);
1988 	if (err < 0)
1989 		return err;
1990 
1991 	t = nlmsg_data(n);
1992 	protocol = TC_H_MIN(t->tcm_info);
1993 	prio = TC_H_MAJ(t->tcm_info);
1994 	prio_allocate = false;
1995 	parent = t->tcm_parent;
1996 	tp = NULL;
1997 	cl = 0;
1998 	block = NULL;
1999 	q = NULL;
2000 	chain = NULL;
2001 	flags = 0;
2002 
2003 	if (prio == 0) {
2004 		/* If no priority is provided by the user,
2005 		 * we allocate one.
2006 		 */
2007 		if (n->nlmsg_flags & NLM_F_CREATE) {
2008 			prio = TC_H_MAKE(0x80000000U, 0U);
2009 			prio_allocate = true;
2010 		} else {
2011 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2012 			return -ENOENT;
2013 		}
2014 	}
2015 
2016 	/* Find head of filter chain. */
2017 
2018 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2019 	if (err)
2020 		return err;
2021 
2022 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2023 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2024 		err = -EINVAL;
2025 		goto errout;
2026 	}
2027 
2028 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2029 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2030 	 * type is not specified, classifier is not unlocked.
2031 	 */
2032 	if (rtnl_held ||
2033 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2034 	    !tcf_proto_is_unlocked(name)) {
2035 		rtnl_held = true;
2036 		rtnl_lock();
2037 	}
2038 
2039 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2040 	if (err)
2041 		goto errout;
2042 
2043 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2044 				 extack);
2045 	if (IS_ERR(block)) {
2046 		err = PTR_ERR(block);
2047 		goto errout;
2048 	}
2049 	block->classid = parent;
2050 
2051 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2052 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2053 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2054 		err = -EINVAL;
2055 		goto errout;
2056 	}
2057 	chain = tcf_chain_get(block, chain_index, true);
2058 	if (!chain) {
2059 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2060 		err = -ENOMEM;
2061 		goto errout;
2062 	}
2063 
2064 	mutex_lock(&chain->filter_chain_lock);
2065 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2066 			       prio, prio_allocate);
2067 	if (IS_ERR(tp)) {
2068 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2069 		err = PTR_ERR(tp);
2070 		goto errout_locked;
2071 	}
2072 
2073 	if (tp == NULL) {
2074 		struct tcf_proto *tp_new = NULL;
2075 
2076 		if (chain->flushing) {
2077 			err = -EAGAIN;
2078 			goto errout_locked;
2079 		}
2080 
2081 		/* Proto-tcf does not exist, create new one */
2082 
2083 		if (tca[TCA_KIND] == NULL || !protocol) {
2084 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2085 			err = -EINVAL;
2086 			goto errout_locked;
2087 		}
2088 
2089 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2090 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2091 			err = -ENOENT;
2092 			goto errout_locked;
2093 		}
2094 
2095 		if (prio_allocate)
2096 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2097 							       &chain_info));
2098 
2099 		mutex_unlock(&chain->filter_chain_lock);
2100 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2101 					  rtnl_held, extack);
2102 		if (IS_ERR(tp_new)) {
2103 			err = PTR_ERR(tp_new);
2104 			goto errout_tp;
2105 		}
2106 
2107 		tp_created = 1;
2108 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2109 						rtnl_held);
2110 		if (IS_ERR(tp)) {
2111 			err = PTR_ERR(tp);
2112 			goto errout_tp;
2113 		}
2114 	} else {
2115 		mutex_unlock(&chain->filter_chain_lock);
2116 	}
2117 
2118 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2119 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2120 		err = -EINVAL;
2121 		goto errout;
2122 	}
2123 
2124 	fh = tp->ops->get(tp, t->tcm_handle);
2125 
2126 	if (!fh) {
2127 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2128 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2129 			err = -ENOENT;
2130 			goto errout;
2131 		}
2132 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2133 		tfilter_put(tp, fh);
2134 		NL_SET_ERR_MSG(extack, "Filter already exists");
2135 		err = -EEXIST;
2136 		goto errout;
2137 	}
2138 
2139 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2140 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2141 		err = -EINVAL;
2142 		goto errout;
2143 	}
2144 
2145 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2146 		flags |= TCA_ACT_FLAGS_REPLACE;
2147 	if (!rtnl_held)
2148 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2149 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2150 			      flags, extack);
2151 	if (err == 0) {
2152 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2153 			       RTM_NEWTFILTER, false, rtnl_held);
2154 		tfilter_put(tp, fh);
2155 		/* q pointer is NULL for shared blocks */
2156 		if (q)
2157 			q->flags &= ~TCQ_F_CAN_BYPASS;
2158 	}
2159 
2160 errout:
2161 	if (err && tp_created)
2162 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2163 errout_tp:
2164 	if (chain) {
2165 		if (tp && !IS_ERR(tp))
2166 			tcf_proto_put(tp, rtnl_held, NULL);
2167 		if (!tp_created)
2168 			tcf_chain_put(chain);
2169 	}
2170 	tcf_block_release(q, block, rtnl_held);
2171 
2172 	if (rtnl_held)
2173 		rtnl_unlock();
2174 
2175 	if (err == -EAGAIN) {
2176 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2177 		 * of target chain.
2178 		 */
2179 		rtnl_held = true;
2180 		/* Replay the request. */
2181 		goto replay;
2182 	}
2183 	return err;
2184 
2185 errout_locked:
2186 	mutex_unlock(&chain->filter_chain_lock);
2187 	goto errout;
2188 }
2189 
2190 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2191 			  struct netlink_ext_ack *extack)
2192 {
2193 	struct net *net = sock_net(skb->sk);
2194 	struct nlattr *tca[TCA_MAX + 1];
2195 	char name[IFNAMSIZ];
2196 	struct tcmsg *t;
2197 	u32 protocol;
2198 	u32 prio;
2199 	u32 parent;
2200 	u32 chain_index;
2201 	struct Qdisc *q = NULL;
2202 	struct tcf_chain_info chain_info;
2203 	struct tcf_chain *chain = NULL;
2204 	struct tcf_block *block = NULL;
2205 	struct tcf_proto *tp = NULL;
2206 	unsigned long cl = 0;
2207 	void *fh = NULL;
2208 	int err;
2209 	bool rtnl_held = false;
2210 
2211 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2212 		return -EPERM;
2213 
2214 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2215 				     rtm_tca_policy, extack);
2216 	if (err < 0)
2217 		return err;
2218 
2219 	t = nlmsg_data(n);
2220 	protocol = TC_H_MIN(t->tcm_info);
2221 	prio = TC_H_MAJ(t->tcm_info);
2222 	parent = t->tcm_parent;
2223 
2224 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2225 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2226 		return -ENOENT;
2227 	}
2228 
2229 	/* Find head of filter chain. */
2230 
2231 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2232 	if (err)
2233 		return err;
2234 
2235 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2236 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2237 		err = -EINVAL;
2238 		goto errout;
2239 	}
2240 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2241 	 * found), qdisc is not unlocked, classifier type is not specified,
2242 	 * classifier is not unlocked.
2243 	 */
2244 	if (!prio ||
2245 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2246 	    !tcf_proto_is_unlocked(name)) {
2247 		rtnl_held = true;
2248 		rtnl_lock();
2249 	}
2250 
2251 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2252 	if (err)
2253 		goto errout;
2254 
2255 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2256 				 extack);
2257 	if (IS_ERR(block)) {
2258 		err = PTR_ERR(block);
2259 		goto errout;
2260 	}
2261 
2262 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2263 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2264 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2265 		err = -EINVAL;
2266 		goto errout;
2267 	}
2268 	chain = tcf_chain_get(block, chain_index, false);
2269 	if (!chain) {
2270 		/* User requested flush on non-existent chain. Nothing to do,
2271 		 * so just return success.
2272 		 */
2273 		if (prio == 0) {
2274 			err = 0;
2275 			goto errout;
2276 		}
2277 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2278 		err = -ENOENT;
2279 		goto errout;
2280 	}
2281 
2282 	if (prio == 0) {
2283 		tfilter_notify_chain(net, skb, block, q, parent, n,
2284 				     chain, RTM_DELTFILTER);
2285 		tcf_chain_flush(chain, rtnl_held);
2286 		err = 0;
2287 		goto errout;
2288 	}
2289 
2290 	mutex_lock(&chain->filter_chain_lock);
2291 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2292 			       prio, false);
2293 	if (!tp || IS_ERR(tp)) {
2294 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2295 		err = tp ? PTR_ERR(tp) : -ENOENT;
2296 		goto errout_locked;
2297 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2299 		err = -EINVAL;
2300 		goto errout_locked;
2301 	} else if (t->tcm_handle == 0) {
2302 		tcf_proto_signal_destroying(chain, tp);
2303 		tcf_chain_tp_remove(chain, &chain_info, tp);
2304 		mutex_unlock(&chain->filter_chain_lock);
2305 
2306 		tcf_proto_put(tp, rtnl_held, NULL);
2307 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2308 			       RTM_DELTFILTER, false, rtnl_held);
2309 		err = 0;
2310 		goto errout;
2311 	}
2312 	mutex_unlock(&chain->filter_chain_lock);
2313 
2314 	fh = tp->ops->get(tp, t->tcm_handle);
2315 
2316 	if (!fh) {
2317 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2318 		err = -ENOENT;
2319 	} else {
2320 		bool last;
2321 
2322 		err = tfilter_del_notify(net, skb, n, tp, block,
2323 					 q, parent, fh, false, &last,
2324 					 rtnl_held, extack);
2325 
2326 		if (err)
2327 			goto errout;
2328 		if (last)
2329 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2330 	}
2331 
2332 errout:
2333 	if (chain) {
2334 		if (tp && !IS_ERR(tp))
2335 			tcf_proto_put(tp, rtnl_held, NULL);
2336 		tcf_chain_put(chain);
2337 	}
2338 	tcf_block_release(q, block, rtnl_held);
2339 
2340 	if (rtnl_held)
2341 		rtnl_unlock();
2342 
2343 	return err;
2344 
2345 errout_locked:
2346 	mutex_unlock(&chain->filter_chain_lock);
2347 	goto errout;
2348 }
2349 
2350 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2351 			  struct netlink_ext_ack *extack)
2352 {
2353 	struct net *net = sock_net(skb->sk);
2354 	struct nlattr *tca[TCA_MAX + 1];
2355 	char name[IFNAMSIZ];
2356 	struct tcmsg *t;
2357 	u32 protocol;
2358 	u32 prio;
2359 	u32 parent;
2360 	u32 chain_index;
2361 	struct Qdisc *q = NULL;
2362 	struct tcf_chain_info chain_info;
2363 	struct tcf_chain *chain = NULL;
2364 	struct tcf_block *block = NULL;
2365 	struct tcf_proto *tp = NULL;
2366 	unsigned long cl = 0;
2367 	void *fh = NULL;
2368 	int err;
2369 	bool rtnl_held = false;
2370 
2371 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2372 				     rtm_tca_policy, extack);
2373 	if (err < 0)
2374 		return err;
2375 
2376 	t = nlmsg_data(n);
2377 	protocol = TC_H_MIN(t->tcm_info);
2378 	prio = TC_H_MAJ(t->tcm_info);
2379 	parent = t->tcm_parent;
2380 
2381 	if (prio == 0) {
2382 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2383 		return -ENOENT;
2384 	}
2385 
2386 	/* Find head of filter chain. */
2387 
2388 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2389 	if (err)
2390 		return err;
2391 
2392 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2393 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2394 		err = -EINVAL;
2395 		goto errout;
2396 	}
2397 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2398 	 * unlocked, classifier type is not specified, classifier is not
2399 	 * unlocked.
2400 	 */
2401 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2402 	    !tcf_proto_is_unlocked(name)) {
2403 		rtnl_held = true;
2404 		rtnl_lock();
2405 	}
2406 
2407 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2408 	if (err)
2409 		goto errout;
2410 
2411 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2412 				 extack);
2413 	if (IS_ERR(block)) {
2414 		err = PTR_ERR(block);
2415 		goto errout;
2416 	}
2417 
2418 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2419 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2420 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2421 		err = -EINVAL;
2422 		goto errout;
2423 	}
2424 	chain = tcf_chain_get(block, chain_index, false);
2425 	if (!chain) {
2426 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2427 		err = -EINVAL;
2428 		goto errout;
2429 	}
2430 
2431 	mutex_lock(&chain->filter_chain_lock);
2432 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2433 			       prio, false);
2434 	mutex_unlock(&chain->filter_chain_lock);
2435 	if (!tp || IS_ERR(tp)) {
2436 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2437 		err = tp ? PTR_ERR(tp) : -ENOENT;
2438 		goto errout;
2439 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2440 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2441 		err = -EINVAL;
2442 		goto errout;
2443 	}
2444 
2445 	fh = tp->ops->get(tp, t->tcm_handle);
2446 
2447 	if (!fh) {
2448 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2449 		err = -ENOENT;
2450 	} else {
2451 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2452 				     fh, RTM_NEWTFILTER, true, rtnl_held);
2453 		if (err < 0)
2454 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2455 	}
2456 
2457 	tfilter_put(tp, fh);
2458 errout:
2459 	if (chain) {
2460 		if (tp && !IS_ERR(tp))
2461 			tcf_proto_put(tp, rtnl_held, NULL);
2462 		tcf_chain_put(chain);
2463 	}
2464 	tcf_block_release(q, block, rtnl_held);
2465 
2466 	if (rtnl_held)
2467 		rtnl_unlock();
2468 
2469 	return err;
2470 }
2471 
2472 struct tcf_dump_args {
2473 	struct tcf_walker w;
2474 	struct sk_buff *skb;
2475 	struct netlink_callback *cb;
2476 	struct tcf_block *block;
2477 	struct Qdisc *q;
2478 	u32 parent;
2479 	bool terse_dump;
2480 };
2481 
2482 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2483 {
2484 	struct tcf_dump_args *a = (void *)arg;
2485 	struct net *net = sock_net(a->skb->sk);
2486 
2487 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2488 			     n, NETLINK_CB(a->cb->skb).portid,
2489 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2490 			     RTM_NEWTFILTER, a->terse_dump, true);
2491 }
2492 
2493 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2494 			   struct sk_buff *skb, struct netlink_callback *cb,
2495 			   long index_start, long *p_index, bool terse)
2496 {
2497 	struct net *net = sock_net(skb->sk);
2498 	struct tcf_block *block = chain->block;
2499 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2500 	struct tcf_proto *tp, *tp_prev;
2501 	struct tcf_dump_args arg;
2502 
2503 	for (tp = __tcf_get_next_proto(chain, NULL);
2504 	     tp;
2505 	     tp_prev = tp,
2506 		     tp = __tcf_get_next_proto(chain, tp),
2507 		     tcf_proto_put(tp_prev, true, NULL),
2508 		     (*p_index)++) {
2509 		if (*p_index < index_start)
2510 			continue;
2511 		if (TC_H_MAJ(tcm->tcm_info) &&
2512 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2513 			continue;
2514 		if (TC_H_MIN(tcm->tcm_info) &&
2515 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2516 			continue;
2517 		if (*p_index > index_start)
2518 			memset(&cb->args[1], 0,
2519 			       sizeof(cb->args) - sizeof(cb->args[0]));
2520 		if (cb->args[1] == 0) {
2521 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2522 					  NETLINK_CB(cb->skb).portid,
2523 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2524 					  RTM_NEWTFILTER, false, true) <= 0)
2525 				goto errout;
2526 			cb->args[1] = 1;
2527 		}
2528 		if (!tp->ops->walk)
2529 			continue;
2530 		arg.w.fn = tcf_node_dump;
2531 		arg.skb = skb;
2532 		arg.cb = cb;
2533 		arg.block = block;
2534 		arg.q = q;
2535 		arg.parent = parent;
2536 		arg.w.stop = 0;
2537 		arg.w.skip = cb->args[1] - 1;
2538 		arg.w.count = 0;
2539 		arg.w.cookie = cb->args[2];
2540 		arg.terse_dump = terse;
2541 		tp->ops->walk(tp, &arg.w, true);
2542 		cb->args[2] = arg.w.cookie;
2543 		cb->args[1] = arg.w.count + 1;
2544 		if (arg.w.stop)
2545 			goto errout;
2546 	}
2547 	return true;
2548 
2549 errout:
2550 	tcf_proto_put(tp, true, NULL);
2551 	return false;
2552 }
2553 
2554 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2555 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2556 };
2557 
2558 /* called with RTNL */
2559 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2560 {
2561 	struct tcf_chain *chain, *chain_prev;
2562 	struct net *net = sock_net(skb->sk);
2563 	struct nlattr *tca[TCA_MAX + 1];
2564 	struct Qdisc *q = NULL;
2565 	struct tcf_block *block;
2566 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2567 	bool terse_dump = false;
2568 	long index_start;
2569 	long index;
2570 	u32 parent;
2571 	int err;
2572 
2573 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2574 		return skb->len;
2575 
2576 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2577 				     tcf_tfilter_dump_policy, cb->extack);
2578 	if (err)
2579 		return err;
2580 
2581 	if (tca[TCA_DUMP_FLAGS]) {
2582 		struct nla_bitfield32 flags =
2583 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2584 
2585 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2586 	}
2587 
2588 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2589 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2590 		if (!block)
2591 			goto out;
2592 		/* If we work with block index, q is NULL and parent value
2593 		 * will never be used in the following code. The check
2594 		 * in tcf_fill_node prevents it. However, compiler does not
2595 		 * see that far, so set parent to zero to silence the warning
2596 		 * about parent being uninitialized.
2597 		 */
2598 		parent = 0;
2599 	} else {
2600 		const struct Qdisc_class_ops *cops;
2601 		struct net_device *dev;
2602 		unsigned long cl = 0;
2603 
2604 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2605 		if (!dev)
2606 			return skb->len;
2607 
2608 		parent = tcm->tcm_parent;
2609 		if (!parent)
2610 			q = rtnl_dereference(dev->qdisc);
2611 		else
2612 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2613 		if (!q)
2614 			goto out;
2615 		cops = q->ops->cl_ops;
2616 		if (!cops)
2617 			goto out;
2618 		if (!cops->tcf_block)
2619 			goto out;
2620 		if (TC_H_MIN(tcm->tcm_parent)) {
2621 			cl = cops->find(q, tcm->tcm_parent);
2622 			if (cl == 0)
2623 				goto out;
2624 		}
2625 		block = cops->tcf_block(q, cl, NULL);
2626 		if (!block)
2627 			goto out;
2628 		parent = block->classid;
2629 		if (tcf_block_shared(block))
2630 			q = NULL;
2631 	}
2632 
2633 	index_start = cb->args[0];
2634 	index = 0;
2635 
2636 	for (chain = __tcf_get_next_chain(block, NULL);
2637 	     chain;
2638 	     chain_prev = chain,
2639 		     chain = __tcf_get_next_chain(block, chain),
2640 		     tcf_chain_put(chain_prev)) {
2641 		if (tca[TCA_CHAIN] &&
2642 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2643 			continue;
2644 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2645 				    index_start, &index, terse_dump)) {
2646 			tcf_chain_put(chain);
2647 			err = -EMSGSIZE;
2648 			break;
2649 		}
2650 	}
2651 
2652 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2653 		tcf_block_refcnt_put(block, true);
2654 	cb->args[0] = index;
2655 
2656 out:
2657 	/* If we did no progress, the error (EMSGSIZE) is real */
2658 	if (skb->len == 0 && err)
2659 		return err;
2660 	return skb->len;
2661 }
2662 
2663 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2664 			      void *tmplt_priv, u32 chain_index,
2665 			      struct net *net, struct sk_buff *skb,
2666 			      struct tcf_block *block,
2667 			      u32 portid, u32 seq, u16 flags, int event)
2668 {
2669 	unsigned char *b = skb_tail_pointer(skb);
2670 	const struct tcf_proto_ops *ops;
2671 	struct nlmsghdr *nlh;
2672 	struct tcmsg *tcm;
2673 	void *priv;
2674 
2675 	ops = tmplt_ops;
2676 	priv = tmplt_priv;
2677 
2678 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2679 	if (!nlh)
2680 		goto out_nlmsg_trim;
2681 	tcm = nlmsg_data(nlh);
2682 	tcm->tcm_family = AF_UNSPEC;
2683 	tcm->tcm__pad1 = 0;
2684 	tcm->tcm__pad2 = 0;
2685 	tcm->tcm_handle = 0;
2686 	if (block->q) {
2687 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2688 		tcm->tcm_parent = block->q->handle;
2689 	} else {
2690 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2691 		tcm->tcm_block_index = block->index;
2692 	}
2693 
2694 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2695 		goto nla_put_failure;
2696 
2697 	if (ops) {
2698 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2699 			goto nla_put_failure;
2700 		if (ops->tmplt_dump(skb, net, priv) < 0)
2701 			goto nla_put_failure;
2702 	}
2703 
2704 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2705 	return skb->len;
2706 
2707 out_nlmsg_trim:
2708 nla_put_failure:
2709 	nlmsg_trim(skb, b);
2710 	return -EMSGSIZE;
2711 }
2712 
2713 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2714 			   u32 seq, u16 flags, int event, bool unicast)
2715 {
2716 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2717 	struct tcf_block *block = chain->block;
2718 	struct net *net = block->net;
2719 	struct sk_buff *skb;
2720 	int err = 0;
2721 
2722 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2723 	if (!skb)
2724 		return -ENOBUFS;
2725 
2726 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2727 			       chain->index, net, skb, block, portid,
2728 			       seq, flags, event) <= 0) {
2729 		kfree_skb(skb);
2730 		return -EINVAL;
2731 	}
2732 
2733 	if (unicast)
2734 		err = rtnl_unicast(skb, net, portid);
2735 	else
2736 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2737 				     flags & NLM_F_ECHO);
2738 
2739 	return err;
2740 }
2741 
2742 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2743 				  void *tmplt_priv, u32 chain_index,
2744 				  struct tcf_block *block, struct sk_buff *oskb,
2745 				  u32 seq, u16 flags, bool unicast)
2746 {
2747 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2748 	struct net *net = block->net;
2749 	struct sk_buff *skb;
2750 
2751 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2752 	if (!skb)
2753 		return -ENOBUFS;
2754 
2755 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2756 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2757 		kfree_skb(skb);
2758 		return -EINVAL;
2759 	}
2760 
2761 	if (unicast)
2762 		return rtnl_unicast(skb, net, portid);
2763 
2764 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2765 }
2766 
2767 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2768 			      struct nlattr **tca,
2769 			      struct netlink_ext_ack *extack)
2770 {
2771 	const struct tcf_proto_ops *ops;
2772 	char name[IFNAMSIZ];
2773 	void *tmplt_priv;
2774 
2775 	/* If kind is not set, user did not specify template. */
2776 	if (!tca[TCA_KIND])
2777 		return 0;
2778 
2779 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2780 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2781 		return -EINVAL;
2782 	}
2783 
2784 	ops = tcf_proto_lookup_ops(name, true, extack);
2785 	if (IS_ERR(ops))
2786 		return PTR_ERR(ops);
2787 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2788 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2789 		return -EOPNOTSUPP;
2790 	}
2791 
2792 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2793 	if (IS_ERR(tmplt_priv)) {
2794 		module_put(ops->owner);
2795 		return PTR_ERR(tmplt_priv);
2796 	}
2797 	chain->tmplt_ops = ops;
2798 	chain->tmplt_priv = tmplt_priv;
2799 	return 0;
2800 }
2801 
2802 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2803 			       void *tmplt_priv)
2804 {
2805 	/* If template ops are set, no work to do for us. */
2806 	if (!tmplt_ops)
2807 		return;
2808 
2809 	tmplt_ops->tmplt_destroy(tmplt_priv);
2810 	module_put(tmplt_ops->owner);
2811 }
2812 
2813 /* Add/delete/get a chain */
2814 
2815 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2816 			struct netlink_ext_ack *extack)
2817 {
2818 	struct net *net = sock_net(skb->sk);
2819 	struct nlattr *tca[TCA_MAX + 1];
2820 	struct tcmsg *t;
2821 	u32 parent;
2822 	u32 chain_index;
2823 	struct Qdisc *q;
2824 	struct tcf_chain *chain;
2825 	struct tcf_block *block;
2826 	unsigned long cl;
2827 	int err;
2828 
2829 	if (n->nlmsg_type != RTM_GETCHAIN &&
2830 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2831 		return -EPERM;
2832 
2833 replay:
2834 	q = NULL;
2835 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2836 				     rtm_tca_policy, extack);
2837 	if (err < 0)
2838 		return err;
2839 
2840 	t = nlmsg_data(n);
2841 	parent = t->tcm_parent;
2842 	cl = 0;
2843 
2844 	block = tcf_block_find(net, &q, &parent, &cl,
2845 			       t->tcm_ifindex, t->tcm_block_index, extack);
2846 	if (IS_ERR(block))
2847 		return PTR_ERR(block);
2848 
2849 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2850 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2851 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2852 		err = -EINVAL;
2853 		goto errout_block;
2854 	}
2855 
2856 	mutex_lock(&block->lock);
2857 	chain = tcf_chain_lookup(block, chain_index);
2858 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2859 		if (chain) {
2860 			if (tcf_chain_held_by_acts_only(chain)) {
2861 				/* The chain exists only because there is
2862 				 * some action referencing it.
2863 				 */
2864 				tcf_chain_hold(chain);
2865 			} else {
2866 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2867 				err = -EEXIST;
2868 				goto errout_block_locked;
2869 			}
2870 		} else {
2871 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2872 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2873 				err = -ENOENT;
2874 				goto errout_block_locked;
2875 			}
2876 			chain = tcf_chain_create(block, chain_index);
2877 			if (!chain) {
2878 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2879 				err = -ENOMEM;
2880 				goto errout_block_locked;
2881 			}
2882 		}
2883 	} else {
2884 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2885 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2886 			err = -EINVAL;
2887 			goto errout_block_locked;
2888 		}
2889 		tcf_chain_hold(chain);
2890 	}
2891 
2892 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2893 		/* Modifying chain requires holding parent block lock. In case
2894 		 * the chain was successfully added, take a reference to the
2895 		 * chain. This ensures that an empty chain does not disappear at
2896 		 * the end of this function.
2897 		 */
2898 		tcf_chain_hold(chain);
2899 		chain->explicitly_created = true;
2900 	}
2901 	mutex_unlock(&block->lock);
2902 
2903 	switch (n->nlmsg_type) {
2904 	case RTM_NEWCHAIN:
2905 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2906 		if (err) {
2907 			tcf_chain_put_explicitly_created(chain);
2908 			goto errout;
2909 		}
2910 
2911 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2912 				RTM_NEWCHAIN, false);
2913 		break;
2914 	case RTM_DELCHAIN:
2915 		tfilter_notify_chain(net, skb, block, q, parent, n,
2916 				     chain, RTM_DELTFILTER);
2917 		/* Flush the chain first as the user requested chain removal. */
2918 		tcf_chain_flush(chain, true);
2919 		/* In case the chain was successfully deleted, put a reference
2920 		 * to the chain previously taken during addition.
2921 		 */
2922 		tcf_chain_put_explicitly_created(chain);
2923 		break;
2924 	case RTM_GETCHAIN:
2925 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2926 				      n->nlmsg_flags, n->nlmsg_type, true);
2927 		if (err < 0)
2928 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2929 		break;
2930 	default:
2931 		err = -EOPNOTSUPP;
2932 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2933 		goto errout;
2934 	}
2935 
2936 errout:
2937 	tcf_chain_put(chain);
2938 errout_block:
2939 	tcf_block_release(q, block, true);
2940 	if (err == -EAGAIN)
2941 		/* Replay the request. */
2942 		goto replay;
2943 	return err;
2944 
2945 errout_block_locked:
2946 	mutex_unlock(&block->lock);
2947 	goto errout_block;
2948 }
2949 
2950 /* called with RTNL */
2951 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2952 {
2953 	struct net *net = sock_net(skb->sk);
2954 	struct nlattr *tca[TCA_MAX + 1];
2955 	struct Qdisc *q = NULL;
2956 	struct tcf_block *block;
2957 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2958 	struct tcf_chain *chain;
2959 	long index_start;
2960 	long index;
2961 	int err;
2962 
2963 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2964 		return skb->len;
2965 
2966 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2967 				     rtm_tca_policy, cb->extack);
2968 	if (err)
2969 		return err;
2970 
2971 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2972 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2973 		if (!block)
2974 			goto out;
2975 	} else {
2976 		const struct Qdisc_class_ops *cops;
2977 		struct net_device *dev;
2978 		unsigned long cl = 0;
2979 
2980 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2981 		if (!dev)
2982 			return skb->len;
2983 
2984 		if (!tcm->tcm_parent)
2985 			q = rtnl_dereference(dev->qdisc);
2986 		else
2987 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2988 
2989 		if (!q)
2990 			goto out;
2991 		cops = q->ops->cl_ops;
2992 		if (!cops)
2993 			goto out;
2994 		if (!cops->tcf_block)
2995 			goto out;
2996 		if (TC_H_MIN(tcm->tcm_parent)) {
2997 			cl = cops->find(q, tcm->tcm_parent);
2998 			if (cl == 0)
2999 				goto out;
3000 		}
3001 		block = cops->tcf_block(q, cl, NULL);
3002 		if (!block)
3003 			goto out;
3004 		if (tcf_block_shared(block))
3005 			q = NULL;
3006 	}
3007 
3008 	index_start = cb->args[0];
3009 	index = 0;
3010 
3011 	mutex_lock(&block->lock);
3012 	list_for_each_entry(chain, &block->chain_list, list) {
3013 		if ((tca[TCA_CHAIN] &&
3014 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3015 			continue;
3016 		if (index < index_start) {
3017 			index++;
3018 			continue;
3019 		}
3020 		if (tcf_chain_held_by_acts_only(chain))
3021 			continue;
3022 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3023 					 chain->index, net, skb, block,
3024 					 NETLINK_CB(cb->skb).portid,
3025 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3026 					 RTM_NEWCHAIN);
3027 		if (err <= 0)
3028 			break;
3029 		index++;
3030 	}
3031 	mutex_unlock(&block->lock);
3032 
3033 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3034 		tcf_block_refcnt_put(block, true);
3035 	cb->args[0] = index;
3036 
3037 out:
3038 	/* If we did no progress, the error (EMSGSIZE) is real */
3039 	if (skb->len == 0 && err)
3040 		return err;
3041 	return skb->len;
3042 }
3043 
3044 void tcf_exts_destroy(struct tcf_exts *exts)
3045 {
3046 #ifdef CONFIG_NET_CLS_ACT
3047 	if (exts->actions) {
3048 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3049 		kfree(exts->actions);
3050 	}
3051 	exts->nr_actions = 0;
3052 #endif
3053 }
3054 EXPORT_SYMBOL(tcf_exts_destroy);
3055 
3056 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3057 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3058 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3059 {
3060 #ifdef CONFIG_NET_CLS_ACT
3061 	{
3062 		int init_res[TCA_ACT_MAX_PRIO] = {};
3063 		struct tc_action *act;
3064 		size_t attr_size = 0;
3065 
3066 		if (exts->police && tb[exts->police]) {
3067 			struct tc_action_ops *a_o;
3068 
3069 			a_o = tc_action_load_ops(tb[exts->police], true,
3070 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3071 						 extack);
3072 			if (IS_ERR(a_o))
3073 				return PTR_ERR(a_o);
3074 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3075 			act = tcf_action_init_1(net, tp, tb[exts->police],
3076 						rate_tlv, a_o, init_res, flags,
3077 						extack);
3078 			module_put(a_o->owner);
3079 			if (IS_ERR(act))
3080 				return PTR_ERR(act);
3081 
3082 			act->type = exts->type = TCA_OLD_COMPAT;
3083 			exts->actions[0] = act;
3084 			exts->nr_actions = 1;
3085 			tcf_idr_insert_many(exts->actions);
3086 		} else if (exts->action && tb[exts->action]) {
3087 			int err;
3088 
3089 			flags |= TCA_ACT_FLAGS_BIND;
3090 			err = tcf_action_init(net, tp, tb[exts->action],
3091 					      rate_tlv, exts->actions, init_res,
3092 					      &attr_size, flags, fl_flags,
3093 					      extack);
3094 			if (err < 0)
3095 				return err;
3096 			exts->nr_actions = err;
3097 		}
3098 	}
3099 #else
3100 	if ((exts->action && tb[exts->action]) ||
3101 	    (exts->police && tb[exts->police])) {
3102 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3103 		return -EOPNOTSUPP;
3104 	}
3105 #endif
3106 
3107 	return 0;
3108 }
3109 EXPORT_SYMBOL(tcf_exts_validate_ex);
3110 
3111 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3112 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3113 		      u32 flags, struct netlink_ext_ack *extack)
3114 {
3115 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3116 				    flags, 0, extack);
3117 }
3118 EXPORT_SYMBOL(tcf_exts_validate);
3119 
3120 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3121 {
3122 #ifdef CONFIG_NET_CLS_ACT
3123 	struct tcf_exts old = *dst;
3124 
3125 	*dst = *src;
3126 	tcf_exts_destroy(&old);
3127 #endif
3128 }
3129 EXPORT_SYMBOL(tcf_exts_change);
3130 
3131 #ifdef CONFIG_NET_CLS_ACT
3132 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3133 {
3134 	if (exts->nr_actions == 0)
3135 		return NULL;
3136 	else
3137 		return exts->actions[0];
3138 }
3139 #endif
3140 
3141 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3142 {
3143 #ifdef CONFIG_NET_CLS_ACT
3144 	struct nlattr *nest;
3145 
3146 	if (exts->action && tcf_exts_has_actions(exts)) {
3147 		/*
3148 		 * again for backward compatible mode - we want
3149 		 * to work with both old and new modes of entering
3150 		 * tc data even if iproute2  was newer - jhs
3151 		 */
3152 		if (exts->type != TCA_OLD_COMPAT) {
3153 			nest = nla_nest_start_noflag(skb, exts->action);
3154 			if (nest == NULL)
3155 				goto nla_put_failure;
3156 
3157 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3158 			    < 0)
3159 				goto nla_put_failure;
3160 			nla_nest_end(skb, nest);
3161 		} else if (exts->police) {
3162 			struct tc_action *act = tcf_exts_first_act(exts);
3163 			nest = nla_nest_start_noflag(skb, exts->police);
3164 			if (nest == NULL || !act)
3165 				goto nla_put_failure;
3166 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3167 				goto nla_put_failure;
3168 			nla_nest_end(skb, nest);
3169 		}
3170 	}
3171 	return 0;
3172 
3173 nla_put_failure:
3174 	nla_nest_cancel(skb, nest);
3175 	return -1;
3176 #else
3177 	return 0;
3178 #endif
3179 }
3180 EXPORT_SYMBOL(tcf_exts_dump);
3181 
3182 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3183 {
3184 #ifdef CONFIG_NET_CLS_ACT
3185 	struct nlattr *nest;
3186 
3187 	if (!exts->action || !tcf_exts_has_actions(exts))
3188 		return 0;
3189 
3190 	nest = nla_nest_start_noflag(skb, exts->action);
3191 	if (!nest)
3192 		goto nla_put_failure;
3193 
3194 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3195 		goto nla_put_failure;
3196 	nla_nest_end(skb, nest);
3197 	return 0;
3198 
3199 nla_put_failure:
3200 	nla_nest_cancel(skb, nest);
3201 	return -1;
3202 #else
3203 	return 0;
3204 #endif
3205 }
3206 EXPORT_SYMBOL(tcf_exts_terse_dump);
3207 
3208 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3209 {
3210 #ifdef CONFIG_NET_CLS_ACT
3211 	struct tc_action *a = tcf_exts_first_act(exts);
3212 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3213 		return -1;
3214 #endif
3215 	return 0;
3216 }
3217 EXPORT_SYMBOL(tcf_exts_dump_stats);
3218 
3219 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3220 {
3221 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3222 		return;
3223 	*flags |= TCA_CLS_FLAGS_IN_HW;
3224 	atomic_inc(&block->offloadcnt);
3225 }
3226 
3227 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3228 {
3229 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3230 		return;
3231 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3232 	atomic_dec(&block->offloadcnt);
3233 }
3234 
3235 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3236 				      struct tcf_proto *tp, u32 *cnt,
3237 				      u32 *flags, u32 diff, bool add)
3238 {
3239 	lockdep_assert_held(&block->cb_lock);
3240 
3241 	spin_lock(&tp->lock);
3242 	if (add) {
3243 		if (!*cnt)
3244 			tcf_block_offload_inc(block, flags);
3245 		*cnt += diff;
3246 	} else {
3247 		*cnt -= diff;
3248 		if (!*cnt)
3249 			tcf_block_offload_dec(block, flags);
3250 	}
3251 	spin_unlock(&tp->lock);
3252 }
3253 
3254 static void
3255 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3256 			 u32 *cnt, u32 *flags)
3257 {
3258 	lockdep_assert_held(&block->cb_lock);
3259 
3260 	spin_lock(&tp->lock);
3261 	tcf_block_offload_dec(block, flags);
3262 	*cnt = 0;
3263 	spin_unlock(&tp->lock);
3264 }
3265 
3266 static int
3267 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3268 		   void *type_data, bool err_stop)
3269 {
3270 	struct flow_block_cb *block_cb;
3271 	int ok_count = 0;
3272 	int err;
3273 
3274 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3275 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3276 		if (err) {
3277 			if (err_stop)
3278 				return err;
3279 		} else {
3280 			ok_count++;
3281 		}
3282 	}
3283 	return ok_count;
3284 }
3285 
3286 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3287 		     void *type_data, bool err_stop, bool rtnl_held)
3288 {
3289 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3290 	int ok_count;
3291 
3292 retry:
3293 	if (take_rtnl)
3294 		rtnl_lock();
3295 	down_read(&block->cb_lock);
3296 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3297 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3298 	 * obtain the locks in same order here.
3299 	 */
3300 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3301 		up_read(&block->cb_lock);
3302 		take_rtnl = true;
3303 		goto retry;
3304 	}
3305 
3306 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3307 
3308 	up_read(&block->cb_lock);
3309 	if (take_rtnl)
3310 		rtnl_unlock();
3311 	return ok_count;
3312 }
3313 EXPORT_SYMBOL(tc_setup_cb_call);
3314 
3315 /* Non-destructive filter add. If filter that wasn't already in hardware is
3316  * successfully offloaded, increment block offloads counter. On failure,
3317  * previously offloaded filter is considered to be intact and offloads counter
3318  * is not decremented.
3319  */
3320 
3321 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3322 		    enum tc_setup_type type, void *type_data, bool err_stop,
3323 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3324 {
3325 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3326 	int ok_count;
3327 
3328 retry:
3329 	if (take_rtnl)
3330 		rtnl_lock();
3331 	down_read(&block->cb_lock);
3332 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3333 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3334 	 * obtain the locks in same order here.
3335 	 */
3336 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3337 		up_read(&block->cb_lock);
3338 		take_rtnl = true;
3339 		goto retry;
3340 	}
3341 
3342 	/* Make sure all netdevs sharing this block are offload-capable. */
3343 	if (block->nooffloaddevcnt && err_stop) {
3344 		ok_count = -EOPNOTSUPP;
3345 		goto err_unlock;
3346 	}
3347 
3348 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3349 	if (ok_count < 0)
3350 		goto err_unlock;
3351 
3352 	if (tp->ops->hw_add)
3353 		tp->ops->hw_add(tp, type_data);
3354 	if (ok_count > 0)
3355 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3356 					  ok_count, true);
3357 err_unlock:
3358 	up_read(&block->cb_lock);
3359 	if (take_rtnl)
3360 		rtnl_unlock();
3361 	return min(ok_count, 0);
3362 }
3363 EXPORT_SYMBOL(tc_setup_cb_add);
3364 
3365 /* Destructive filter replace. If filter that wasn't already in hardware is
3366  * successfully offloaded, increment block offload counter. On failure,
3367  * previously offloaded filter is considered to be destroyed and offload counter
3368  * is decremented.
3369  */
3370 
3371 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3372 			enum tc_setup_type type, void *type_data, bool err_stop,
3373 			u32 *old_flags, unsigned int *old_in_hw_count,
3374 			u32 *new_flags, unsigned int *new_in_hw_count,
3375 			bool rtnl_held)
3376 {
3377 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3378 	int ok_count;
3379 
3380 retry:
3381 	if (take_rtnl)
3382 		rtnl_lock();
3383 	down_read(&block->cb_lock);
3384 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3385 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3386 	 * obtain the locks in same order here.
3387 	 */
3388 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3389 		up_read(&block->cb_lock);
3390 		take_rtnl = true;
3391 		goto retry;
3392 	}
3393 
3394 	/* Make sure all netdevs sharing this block are offload-capable. */
3395 	if (block->nooffloaddevcnt && err_stop) {
3396 		ok_count = -EOPNOTSUPP;
3397 		goto err_unlock;
3398 	}
3399 
3400 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3401 	if (tp->ops->hw_del)
3402 		tp->ops->hw_del(tp, type_data);
3403 
3404 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3405 	if (ok_count < 0)
3406 		goto err_unlock;
3407 
3408 	if (tp->ops->hw_add)
3409 		tp->ops->hw_add(tp, type_data);
3410 	if (ok_count > 0)
3411 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3412 					  new_flags, ok_count, true);
3413 err_unlock:
3414 	up_read(&block->cb_lock);
3415 	if (take_rtnl)
3416 		rtnl_unlock();
3417 	return min(ok_count, 0);
3418 }
3419 EXPORT_SYMBOL(tc_setup_cb_replace);
3420 
3421 /* Destroy filter and decrement block offload counter, if filter was previously
3422  * offloaded.
3423  */
3424 
3425 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3426 			enum tc_setup_type type, void *type_data, bool err_stop,
3427 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3428 {
3429 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3430 	int ok_count;
3431 
3432 retry:
3433 	if (take_rtnl)
3434 		rtnl_lock();
3435 	down_read(&block->cb_lock);
3436 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3437 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3438 	 * obtain the locks in same order here.
3439 	 */
3440 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3441 		up_read(&block->cb_lock);
3442 		take_rtnl = true;
3443 		goto retry;
3444 	}
3445 
3446 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3447 
3448 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3449 	if (tp->ops->hw_del)
3450 		tp->ops->hw_del(tp, type_data);
3451 
3452 	up_read(&block->cb_lock);
3453 	if (take_rtnl)
3454 		rtnl_unlock();
3455 	return min(ok_count, 0);
3456 }
3457 EXPORT_SYMBOL(tc_setup_cb_destroy);
3458 
3459 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3460 			  bool add, flow_setup_cb_t *cb,
3461 			  enum tc_setup_type type, void *type_data,
3462 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3463 {
3464 	int err = cb(type, type_data, cb_priv);
3465 
3466 	if (err) {
3467 		if (add && tc_skip_sw(*flags))
3468 			return err;
3469 	} else {
3470 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3471 					  add);
3472 	}
3473 
3474 	return 0;
3475 }
3476 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3477 
3478 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3479 			      const struct tc_action *act)
3480 {
3481 	struct tc_cookie *cookie;
3482 	int err = 0;
3483 
3484 	rcu_read_lock();
3485 	cookie = rcu_dereference(act->act_cookie);
3486 	if (cookie) {
3487 		entry->cookie = flow_action_cookie_create(cookie->data,
3488 							  cookie->len,
3489 							  GFP_ATOMIC);
3490 		if (!entry->cookie)
3491 			err = -ENOMEM;
3492 	}
3493 	rcu_read_unlock();
3494 	return err;
3495 }
3496 
3497 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3498 {
3499 	flow_action_cookie_destroy(entry->cookie);
3500 }
3501 
3502 void tc_cleanup_offload_action(struct flow_action *flow_action)
3503 {
3504 	struct flow_action_entry *entry;
3505 	int i;
3506 
3507 	flow_action_for_each(i, entry, flow_action) {
3508 		tcf_act_put_cookie(entry);
3509 		if (entry->destructor)
3510 			entry->destructor(entry->destructor_priv);
3511 	}
3512 }
3513 EXPORT_SYMBOL(tc_cleanup_offload_action);
3514 
3515 static int tc_setup_offload_act(struct tc_action *act,
3516 				struct flow_action_entry *entry,
3517 				u32 *index_inc,
3518 				struct netlink_ext_ack *extack)
3519 {
3520 #ifdef CONFIG_NET_CLS_ACT
3521 	if (act->ops->offload_act_setup) {
3522 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3523 						   extack);
3524 	} else {
3525 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3526 		return -EOPNOTSUPP;
3527 	}
3528 #else
3529 	return 0;
3530 #endif
3531 }
3532 
3533 int tc_setup_action(struct flow_action *flow_action,
3534 		    struct tc_action *actions[],
3535 		    struct netlink_ext_ack *extack)
3536 {
3537 	int i, j, k, index, err = 0;
3538 	struct tc_action *act;
3539 
3540 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3541 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3542 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3543 
3544 	if (!actions)
3545 		return 0;
3546 
3547 	j = 0;
3548 	tcf_act_for_each_action(i, act, actions) {
3549 		struct flow_action_entry *entry;
3550 
3551 		entry = &flow_action->entries[j];
3552 		spin_lock_bh(&act->tcfa_lock);
3553 		err = tcf_act_get_cookie(entry, act);
3554 		if (err)
3555 			goto err_out_locked;
3556 
3557 		index = 0;
3558 		err = tc_setup_offload_act(act, entry, &index, extack);
3559 		if (err)
3560 			goto err_out_locked;
3561 
3562 		for (k = 0; k < index ; k++) {
3563 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3564 			entry[k].hw_index = act->tcfa_index;
3565 		}
3566 
3567 		j += index;
3568 
3569 		spin_unlock_bh(&act->tcfa_lock);
3570 	}
3571 
3572 err_out:
3573 	if (err)
3574 		tc_cleanup_offload_action(flow_action);
3575 
3576 	return err;
3577 err_out_locked:
3578 	spin_unlock_bh(&act->tcfa_lock);
3579 	goto err_out;
3580 }
3581 
3582 int tc_setup_offload_action(struct flow_action *flow_action,
3583 			    const struct tcf_exts *exts,
3584 			    struct netlink_ext_ack *extack)
3585 {
3586 #ifdef CONFIG_NET_CLS_ACT
3587 	if (!exts)
3588 		return 0;
3589 
3590 	return tc_setup_action(flow_action, exts->actions, extack);
3591 #else
3592 	return 0;
3593 #endif
3594 }
3595 EXPORT_SYMBOL(tc_setup_offload_action);
3596 
3597 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3598 {
3599 	unsigned int num_acts = 0;
3600 	struct tc_action *act;
3601 	int i;
3602 
3603 	tcf_exts_for_each_action(i, act, exts) {
3604 		if (is_tcf_pedit(act))
3605 			num_acts += tcf_pedit_nkeys(act);
3606 		else
3607 			num_acts++;
3608 	}
3609 	return num_acts;
3610 }
3611 EXPORT_SYMBOL(tcf_exts_num_actions);
3612 
3613 #ifdef CONFIG_NET_CLS_ACT
3614 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3615 					u32 *p_block_index,
3616 					struct netlink_ext_ack *extack)
3617 {
3618 	*p_block_index = nla_get_u32(block_index_attr);
3619 	if (!*p_block_index) {
3620 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3621 		return -EINVAL;
3622 	}
3623 
3624 	return 0;
3625 }
3626 
3627 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3628 		    enum flow_block_binder_type binder_type,
3629 		    struct nlattr *block_index_attr,
3630 		    struct netlink_ext_ack *extack)
3631 {
3632 	u32 block_index;
3633 	int err;
3634 
3635 	if (!block_index_attr)
3636 		return 0;
3637 
3638 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3639 	if (err)
3640 		return err;
3641 
3642 	if (!block_index)
3643 		return 0;
3644 
3645 	qe->info.binder_type = binder_type;
3646 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3647 	qe->info.chain_head_change_priv = &qe->filter_chain;
3648 	qe->info.block_index = block_index;
3649 
3650 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3651 }
3652 EXPORT_SYMBOL(tcf_qevent_init);
3653 
3654 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3655 {
3656 	if (qe->info.block_index)
3657 		tcf_block_put_ext(qe->block, sch, &qe->info);
3658 }
3659 EXPORT_SYMBOL(tcf_qevent_destroy);
3660 
3661 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3662 			       struct netlink_ext_ack *extack)
3663 {
3664 	u32 block_index;
3665 	int err;
3666 
3667 	if (!block_index_attr)
3668 		return 0;
3669 
3670 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3671 	if (err)
3672 		return err;
3673 
3674 	/* Bounce newly-configured block or change in block. */
3675 	if (block_index != qe->info.block_index) {
3676 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3677 		return -EINVAL;
3678 	}
3679 
3680 	return 0;
3681 }
3682 EXPORT_SYMBOL(tcf_qevent_validate_change);
3683 
3684 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3685 				  struct sk_buff **to_free, int *ret)
3686 {
3687 	struct tcf_result cl_res;
3688 	struct tcf_proto *fl;
3689 
3690 	if (!qe->info.block_index)
3691 		return skb;
3692 
3693 	fl = rcu_dereference_bh(qe->filter_chain);
3694 
3695 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3696 	case TC_ACT_SHOT:
3697 		qdisc_qstats_drop(sch);
3698 		__qdisc_drop(skb, to_free);
3699 		*ret = __NET_XMIT_BYPASS;
3700 		return NULL;
3701 	case TC_ACT_STOLEN:
3702 	case TC_ACT_QUEUED:
3703 	case TC_ACT_TRAP:
3704 		__qdisc_drop(skb, to_free);
3705 		*ret = __NET_XMIT_STOLEN;
3706 		return NULL;
3707 	case TC_ACT_REDIRECT:
3708 		skb_do_redirect(skb);
3709 		*ret = __NET_XMIT_STOLEN;
3710 		return NULL;
3711 	}
3712 
3713 	return skb;
3714 }
3715 EXPORT_SYMBOL(tcf_qevent_handle);
3716 
3717 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3718 {
3719 	if (!qe->info.block_index)
3720 		return 0;
3721 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3722 }
3723 EXPORT_SYMBOL(tcf_qevent_dump);
3724 #endif
3725 
3726 static __net_init int tcf_net_init(struct net *net)
3727 {
3728 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3729 
3730 	spin_lock_init(&tn->idr_lock);
3731 	idr_init(&tn->idr);
3732 	return 0;
3733 }
3734 
3735 static void __net_exit tcf_net_exit(struct net *net)
3736 {
3737 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3738 
3739 	idr_destroy(&tn->idr);
3740 }
3741 
3742 static struct pernet_operations tcf_net_ops = {
3743 	.init = tcf_net_init,
3744 	.exit = tcf_net_exit,
3745 	.id   = &tcf_net_id,
3746 	.size = sizeof(struct tcf_net),
3747 };
3748 
3749 static int __init tc_filter_init(void)
3750 {
3751 	int err;
3752 
3753 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3754 	if (!tc_filter_wq)
3755 		return -ENOMEM;
3756 
3757 	err = register_pernet_subsys(&tcf_net_ops);
3758 	if (err)
3759 		goto err_register_pernet_subsys;
3760 
3761 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3762 		      RTNL_FLAG_DOIT_UNLOCKED);
3763 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3764 		      RTNL_FLAG_DOIT_UNLOCKED);
3765 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3766 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3767 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3768 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3769 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3770 		      tc_dump_chain, 0);
3771 
3772 	return 0;
3773 
3774 err_register_pernet_subsys:
3775 	destroy_workqueue(tc_filter_wq);
3776 	return err;
3777 }
3778 
3779 subsys_initcall(tc_filter_init);
3780