xref: /openbmc/linux/net/sched/cls_api.c (revision 7b73a9c8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/flow_offload.h>
42 
43 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
44 
45 /* The list of all installed classifier types */
46 static LIST_HEAD(tcf_proto_base);
47 
48 /* Protects list of registered TC modules. It is pure SMP lock. */
49 static DEFINE_RWLOCK(cls_mod_lock);
50 
51 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
52 {
53 	return jhash_3words(tp->chain->index, tp->prio,
54 			    (__force __u32)tp->protocol, 0);
55 }
56 
57 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
58 					struct tcf_proto *tp)
59 {
60 	struct tcf_block *block = chain->block;
61 
62 	mutex_lock(&block->proto_destroy_lock);
63 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
64 		     destroy_obj_hashfn(tp));
65 	mutex_unlock(&block->proto_destroy_lock);
66 }
67 
68 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
69 			  const struct tcf_proto *tp2)
70 {
71 	return tp1->chain->index == tp2->chain->index &&
72 	       tp1->prio == tp2->prio &&
73 	       tp1->protocol == tp2->protocol;
74 }
75 
76 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
77 					struct tcf_proto *tp)
78 {
79 	u32 hash = destroy_obj_hashfn(tp);
80 	struct tcf_proto *iter;
81 	bool found = false;
82 
83 	rcu_read_lock();
84 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
85 				   destroy_ht_node, hash) {
86 		if (tcf_proto_cmp(tp, iter)) {
87 			found = true;
88 			break;
89 		}
90 	}
91 	rcu_read_unlock();
92 
93 	return found;
94 }
95 
96 static void
97 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
98 {
99 	struct tcf_block *block = chain->block;
100 
101 	mutex_lock(&block->proto_destroy_lock);
102 	if (hash_hashed(&tp->destroy_ht_node))
103 		hash_del_rcu(&tp->destroy_ht_node);
104 	mutex_unlock(&block->proto_destroy_lock);
105 }
106 
107 /* Find classifier type by string name */
108 
109 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
110 {
111 	const struct tcf_proto_ops *t, *res = NULL;
112 
113 	if (kind) {
114 		read_lock(&cls_mod_lock);
115 		list_for_each_entry(t, &tcf_proto_base, head) {
116 			if (strcmp(kind, t->kind) == 0) {
117 				if (try_module_get(t->owner))
118 					res = t;
119 				break;
120 			}
121 		}
122 		read_unlock(&cls_mod_lock);
123 	}
124 	return res;
125 }
126 
127 static const struct tcf_proto_ops *
128 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
129 		     struct netlink_ext_ack *extack)
130 {
131 	const struct tcf_proto_ops *ops;
132 
133 	ops = __tcf_proto_lookup_ops(kind);
134 	if (ops)
135 		return ops;
136 #ifdef CONFIG_MODULES
137 	if (rtnl_held)
138 		rtnl_unlock();
139 	request_module("cls_%s", kind);
140 	if (rtnl_held)
141 		rtnl_lock();
142 	ops = __tcf_proto_lookup_ops(kind);
143 	/* We dropped the RTNL semaphore in order to perform
144 	 * the module load. So, even if we succeeded in loading
145 	 * the module we have to replay the request. We indicate
146 	 * this using -EAGAIN.
147 	 */
148 	if (ops) {
149 		module_put(ops->owner);
150 		return ERR_PTR(-EAGAIN);
151 	}
152 #endif
153 	NL_SET_ERR_MSG(extack, "TC classifier not found");
154 	return ERR_PTR(-ENOENT);
155 }
156 
157 /* Register(unregister) new classifier type */
158 
159 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
160 {
161 	struct tcf_proto_ops *t;
162 	int rc = -EEXIST;
163 
164 	write_lock(&cls_mod_lock);
165 	list_for_each_entry(t, &tcf_proto_base, head)
166 		if (!strcmp(ops->kind, t->kind))
167 			goto out;
168 
169 	list_add_tail(&ops->head, &tcf_proto_base);
170 	rc = 0;
171 out:
172 	write_unlock(&cls_mod_lock);
173 	return rc;
174 }
175 EXPORT_SYMBOL(register_tcf_proto_ops);
176 
177 static struct workqueue_struct *tc_filter_wq;
178 
179 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
180 {
181 	struct tcf_proto_ops *t;
182 	int rc = -ENOENT;
183 
184 	/* Wait for outstanding call_rcu()s, if any, from a
185 	 * tcf_proto_ops's destroy() handler.
186 	 */
187 	rcu_barrier();
188 	flush_workqueue(tc_filter_wq);
189 
190 	write_lock(&cls_mod_lock);
191 	list_for_each_entry(t, &tcf_proto_base, head) {
192 		if (t == ops) {
193 			list_del(&t->head);
194 			rc = 0;
195 			break;
196 		}
197 	}
198 	write_unlock(&cls_mod_lock);
199 	return rc;
200 }
201 EXPORT_SYMBOL(unregister_tcf_proto_ops);
202 
203 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
204 {
205 	INIT_RCU_WORK(rwork, func);
206 	return queue_rcu_work(tc_filter_wq, rwork);
207 }
208 EXPORT_SYMBOL(tcf_queue_work);
209 
210 /* Select new prio value from the range, managed by kernel. */
211 
212 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
213 {
214 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
215 
216 	if (tp)
217 		first = tp->prio - 1;
218 
219 	return TC_H_MAJ(first);
220 }
221 
222 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
223 {
224 	if (kind)
225 		return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
226 	memset(name, 0, IFNAMSIZ);
227 	return false;
228 }
229 
230 static bool tcf_proto_is_unlocked(const char *kind)
231 {
232 	const struct tcf_proto_ops *ops;
233 	bool ret;
234 
235 	if (strlen(kind) == 0)
236 		return false;
237 
238 	ops = tcf_proto_lookup_ops(kind, false, NULL);
239 	/* On error return false to take rtnl lock. Proto lookup/create
240 	 * functions will perform lookup again and properly handle errors.
241 	 */
242 	if (IS_ERR(ops))
243 		return false;
244 
245 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
246 	module_put(ops->owner);
247 	return ret;
248 }
249 
250 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
251 					  u32 prio, struct tcf_chain *chain,
252 					  bool rtnl_held,
253 					  struct netlink_ext_ack *extack)
254 {
255 	struct tcf_proto *tp;
256 	int err;
257 
258 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
259 	if (!tp)
260 		return ERR_PTR(-ENOBUFS);
261 
262 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
263 	if (IS_ERR(tp->ops)) {
264 		err = PTR_ERR(tp->ops);
265 		goto errout;
266 	}
267 	tp->classify = tp->ops->classify;
268 	tp->protocol = protocol;
269 	tp->prio = prio;
270 	tp->chain = chain;
271 	spin_lock_init(&tp->lock);
272 	refcount_set(&tp->refcnt, 1);
273 
274 	err = tp->ops->init(tp);
275 	if (err) {
276 		module_put(tp->ops->owner);
277 		goto errout;
278 	}
279 	return tp;
280 
281 errout:
282 	kfree(tp);
283 	return ERR_PTR(err);
284 }
285 
286 static void tcf_proto_get(struct tcf_proto *tp)
287 {
288 	refcount_inc(&tp->refcnt);
289 }
290 
291 static void tcf_chain_put(struct tcf_chain *chain);
292 
293 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
294 			      bool sig_destroy, struct netlink_ext_ack *extack)
295 {
296 	tp->ops->destroy(tp, rtnl_held, extack);
297 	if (sig_destroy)
298 		tcf_proto_signal_destroyed(tp->chain, tp);
299 	tcf_chain_put(tp->chain);
300 	module_put(tp->ops->owner);
301 	kfree_rcu(tp, rcu);
302 }
303 
304 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
305 			  struct netlink_ext_ack *extack)
306 {
307 	if (refcount_dec_and_test(&tp->refcnt))
308 		tcf_proto_destroy(tp, rtnl_held, true, extack);
309 }
310 
311 static bool tcf_proto_check_delete(struct tcf_proto *tp)
312 {
313 	if (tp->ops->delete_empty)
314 		return tp->ops->delete_empty(tp);
315 
316 	tp->deleting = true;
317 	return tp->deleting;
318 }
319 
320 static void tcf_proto_mark_delete(struct tcf_proto *tp)
321 {
322 	spin_lock(&tp->lock);
323 	tp->deleting = true;
324 	spin_unlock(&tp->lock);
325 }
326 
327 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
328 {
329 	bool deleting;
330 
331 	spin_lock(&tp->lock);
332 	deleting = tp->deleting;
333 	spin_unlock(&tp->lock);
334 
335 	return deleting;
336 }
337 
338 #define ASSERT_BLOCK_LOCKED(block)					\
339 	lockdep_assert_held(&(block)->lock)
340 
341 struct tcf_filter_chain_list_item {
342 	struct list_head list;
343 	tcf_chain_head_change_t *chain_head_change;
344 	void *chain_head_change_priv;
345 };
346 
347 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
348 					  u32 chain_index)
349 {
350 	struct tcf_chain *chain;
351 
352 	ASSERT_BLOCK_LOCKED(block);
353 
354 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
355 	if (!chain)
356 		return NULL;
357 	list_add_tail(&chain->list, &block->chain_list);
358 	mutex_init(&chain->filter_chain_lock);
359 	chain->block = block;
360 	chain->index = chain_index;
361 	chain->refcnt = 1;
362 	if (!chain->index)
363 		block->chain0.chain = chain;
364 	return chain;
365 }
366 
367 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
368 				       struct tcf_proto *tp_head)
369 {
370 	if (item->chain_head_change)
371 		item->chain_head_change(tp_head, item->chain_head_change_priv);
372 }
373 
374 static void tcf_chain0_head_change(struct tcf_chain *chain,
375 				   struct tcf_proto *tp_head)
376 {
377 	struct tcf_filter_chain_list_item *item;
378 	struct tcf_block *block = chain->block;
379 
380 	if (chain->index)
381 		return;
382 
383 	mutex_lock(&block->lock);
384 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
385 		tcf_chain_head_change_item(item, tp_head);
386 	mutex_unlock(&block->lock);
387 }
388 
389 /* Returns true if block can be safely freed. */
390 
391 static bool tcf_chain_detach(struct tcf_chain *chain)
392 {
393 	struct tcf_block *block = chain->block;
394 
395 	ASSERT_BLOCK_LOCKED(block);
396 
397 	list_del(&chain->list);
398 	if (!chain->index)
399 		block->chain0.chain = NULL;
400 
401 	if (list_empty(&block->chain_list) &&
402 	    refcount_read(&block->refcnt) == 0)
403 		return true;
404 
405 	return false;
406 }
407 
408 static void tcf_block_destroy(struct tcf_block *block)
409 {
410 	mutex_destroy(&block->lock);
411 	mutex_destroy(&block->proto_destroy_lock);
412 	kfree_rcu(block, rcu);
413 }
414 
415 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
416 {
417 	struct tcf_block *block = chain->block;
418 
419 	mutex_destroy(&chain->filter_chain_lock);
420 	kfree_rcu(chain, rcu);
421 	if (free_block)
422 		tcf_block_destroy(block);
423 }
424 
425 static void tcf_chain_hold(struct tcf_chain *chain)
426 {
427 	ASSERT_BLOCK_LOCKED(chain->block);
428 
429 	++chain->refcnt;
430 }
431 
432 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
433 {
434 	ASSERT_BLOCK_LOCKED(chain->block);
435 
436 	/* In case all the references are action references, this
437 	 * chain should not be shown to the user.
438 	 */
439 	return chain->refcnt == chain->action_refcnt;
440 }
441 
442 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
443 					  u32 chain_index)
444 {
445 	struct tcf_chain *chain;
446 
447 	ASSERT_BLOCK_LOCKED(block);
448 
449 	list_for_each_entry(chain, &block->chain_list, list) {
450 		if (chain->index == chain_index)
451 			return chain;
452 	}
453 	return NULL;
454 }
455 
456 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
457 			   u32 seq, u16 flags, int event, bool unicast);
458 
459 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
460 					 u32 chain_index, bool create,
461 					 bool by_act)
462 {
463 	struct tcf_chain *chain = NULL;
464 	bool is_first_reference;
465 
466 	mutex_lock(&block->lock);
467 	chain = tcf_chain_lookup(block, chain_index);
468 	if (chain) {
469 		tcf_chain_hold(chain);
470 	} else {
471 		if (!create)
472 			goto errout;
473 		chain = tcf_chain_create(block, chain_index);
474 		if (!chain)
475 			goto errout;
476 	}
477 
478 	if (by_act)
479 		++chain->action_refcnt;
480 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
481 	mutex_unlock(&block->lock);
482 
483 	/* Send notification only in case we got the first
484 	 * non-action reference. Until then, the chain acts only as
485 	 * a placeholder for actions pointing to it and user ought
486 	 * not know about them.
487 	 */
488 	if (is_first_reference && !by_act)
489 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
490 				RTM_NEWCHAIN, false);
491 
492 	return chain;
493 
494 errout:
495 	mutex_unlock(&block->lock);
496 	return chain;
497 }
498 
499 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
500 				       bool create)
501 {
502 	return __tcf_chain_get(block, chain_index, create, false);
503 }
504 
505 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
506 {
507 	return __tcf_chain_get(block, chain_index, true, true);
508 }
509 EXPORT_SYMBOL(tcf_chain_get_by_act);
510 
511 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
512 			       void *tmplt_priv);
513 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
514 				  void *tmplt_priv, u32 chain_index,
515 				  struct tcf_block *block, struct sk_buff *oskb,
516 				  u32 seq, u16 flags, bool unicast);
517 
518 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
519 			    bool explicitly_created)
520 {
521 	struct tcf_block *block = chain->block;
522 	const struct tcf_proto_ops *tmplt_ops;
523 	bool free_block = false;
524 	unsigned int refcnt;
525 	void *tmplt_priv;
526 
527 	mutex_lock(&block->lock);
528 	if (explicitly_created) {
529 		if (!chain->explicitly_created) {
530 			mutex_unlock(&block->lock);
531 			return;
532 		}
533 		chain->explicitly_created = false;
534 	}
535 
536 	if (by_act)
537 		chain->action_refcnt--;
538 
539 	/* tc_chain_notify_delete can't be called while holding block lock.
540 	 * However, when block is unlocked chain can be changed concurrently, so
541 	 * save these to temporary variables.
542 	 */
543 	refcnt = --chain->refcnt;
544 	tmplt_ops = chain->tmplt_ops;
545 	tmplt_priv = chain->tmplt_priv;
546 
547 	/* The last dropped non-action reference will trigger notification. */
548 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
549 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
550 				       block, NULL, 0, 0, false);
551 		/* Last reference to chain, no need to lock. */
552 		chain->flushing = false;
553 	}
554 
555 	if (refcnt == 0)
556 		free_block = tcf_chain_detach(chain);
557 	mutex_unlock(&block->lock);
558 
559 	if (refcnt == 0) {
560 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
561 		tcf_chain_destroy(chain, free_block);
562 	}
563 }
564 
565 static void tcf_chain_put(struct tcf_chain *chain)
566 {
567 	__tcf_chain_put(chain, false, false);
568 }
569 
570 void tcf_chain_put_by_act(struct tcf_chain *chain)
571 {
572 	__tcf_chain_put(chain, true, false);
573 }
574 EXPORT_SYMBOL(tcf_chain_put_by_act);
575 
576 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
577 {
578 	__tcf_chain_put(chain, false, true);
579 }
580 
581 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
582 {
583 	struct tcf_proto *tp, *tp_next;
584 
585 	mutex_lock(&chain->filter_chain_lock);
586 	tp = tcf_chain_dereference(chain->filter_chain, chain);
587 	while (tp) {
588 		tp_next = rcu_dereference_protected(tp->next, 1);
589 		tcf_proto_signal_destroying(chain, tp);
590 		tp = tp_next;
591 	}
592 	tp = tcf_chain_dereference(chain->filter_chain, chain);
593 	RCU_INIT_POINTER(chain->filter_chain, NULL);
594 	tcf_chain0_head_change(chain, NULL);
595 	chain->flushing = true;
596 	mutex_unlock(&chain->filter_chain_lock);
597 
598 	while (tp) {
599 		tp_next = rcu_dereference_protected(tp->next, 1);
600 		tcf_proto_put(tp, rtnl_held, NULL);
601 		tp = tp_next;
602 	}
603 }
604 
605 static int tcf_block_setup(struct tcf_block *block,
606 			   struct flow_block_offload *bo);
607 
608 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
609 			      flow_indr_block_bind_cb_t *cb, void *cb_priv,
610 			      enum flow_block_command command, bool ingress)
611 {
612 	struct flow_block_offload bo = {
613 		.command	= command,
614 		.binder_type	= ingress ?
615 				  FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
616 				  FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
617 		.net		= dev_net(dev),
618 		.block_shared	= tcf_block_non_null_shared(block),
619 	};
620 	INIT_LIST_HEAD(&bo.cb_list);
621 
622 	if (!block)
623 		return;
624 
625 	bo.block = &block->flow_block;
626 
627 	down_write(&block->cb_lock);
628 	cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
629 
630 	tcf_block_setup(block, &bo);
631 	up_write(&block->cb_lock);
632 }
633 
634 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
635 {
636 	const struct Qdisc_class_ops *cops;
637 	const struct Qdisc_ops *ops;
638 	struct Qdisc *qdisc;
639 
640 	if (!dev_ingress_queue(dev))
641 		return NULL;
642 
643 	qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
644 	if (!qdisc)
645 		return NULL;
646 
647 	ops = qdisc->ops;
648 	if (!ops)
649 		return NULL;
650 
651 	if (!ingress && !strcmp("ingress", ops->id))
652 		return NULL;
653 
654 	cops = ops->cl_ops;
655 	if (!cops)
656 		return NULL;
657 
658 	if (!cops->tcf_block)
659 		return NULL;
660 
661 	return cops->tcf_block(qdisc,
662 			       ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
663 			       NULL);
664 }
665 
666 static void tc_indr_block_get_and_cmd(struct net_device *dev,
667 				      flow_indr_block_bind_cb_t *cb,
668 				      void *cb_priv,
669 				      enum flow_block_command command)
670 {
671 	struct tcf_block *block;
672 
673 	block = tc_dev_block(dev, true);
674 	tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
675 
676 	block = tc_dev_block(dev, false);
677 	tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
678 }
679 
680 static void tc_indr_block_call(struct tcf_block *block,
681 			       struct net_device *dev,
682 			       struct tcf_block_ext_info *ei,
683 			       enum flow_block_command command,
684 			       struct netlink_ext_ack *extack)
685 {
686 	struct flow_block_offload bo = {
687 		.command	= command,
688 		.binder_type	= ei->binder_type,
689 		.net		= dev_net(dev),
690 		.block		= &block->flow_block,
691 		.block_shared	= tcf_block_shared(block),
692 		.extack		= extack,
693 	};
694 	INIT_LIST_HEAD(&bo.cb_list);
695 
696 	flow_indr_block_call(dev, &bo, command);
697 	tcf_block_setup(block, &bo);
698 }
699 
700 static bool tcf_block_offload_in_use(struct tcf_block *block)
701 {
702 	return atomic_read(&block->offloadcnt);
703 }
704 
705 static int tcf_block_offload_cmd(struct tcf_block *block,
706 				 struct net_device *dev,
707 				 struct tcf_block_ext_info *ei,
708 				 enum flow_block_command command,
709 				 struct netlink_ext_ack *extack)
710 {
711 	struct flow_block_offload bo = {};
712 	int err;
713 
714 	bo.net = dev_net(dev);
715 	bo.command = command;
716 	bo.binder_type = ei->binder_type;
717 	bo.block = &block->flow_block;
718 	bo.block_shared = tcf_block_shared(block);
719 	bo.extack = extack;
720 	INIT_LIST_HEAD(&bo.cb_list);
721 
722 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
723 	if (err < 0)
724 		return err;
725 
726 	return tcf_block_setup(block, &bo);
727 }
728 
729 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
730 				  struct tcf_block_ext_info *ei,
731 				  struct netlink_ext_ack *extack)
732 {
733 	struct net_device *dev = q->dev_queue->dev;
734 	int err;
735 
736 	down_write(&block->cb_lock);
737 	if (!dev->netdev_ops->ndo_setup_tc)
738 		goto no_offload_dev_inc;
739 
740 	/* If tc offload feature is disabled and the block we try to bind
741 	 * to already has some offloaded filters, forbid to bind.
742 	 */
743 	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
744 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
745 		err = -EOPNOTSUPP;
746 		goto err_unlock;
747 	}
748 
749 	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
750 	if (err == -EOPNOTSUPP)
751 		goto no_offload_dev_inc;
752 	if (err)
753 		goto err_unlock;
754 
755 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
756 	up_write(&block->cb_lock);
757 	return 0;
758 
759 no_offload_dev_inc:
760 	if (tcf_block_offload_in_use(block)) {
761 		err = -EOPNOTSUPP;
762 		goto err_unlock;
763 	}
764 	err = 0;
765 	block->nooffloaddevcnt++;
766 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
767 err_unlock:
768 	up_write(&block->cb_lock);
769 	return err;
770 }
771 
772 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
773 				     struct tcf_block_ext_info *ei)
774 {
775 	struct net_device *dev = q->dev_queue->dev;
776 	int err;
777 
778 	down_write(&block->cb_lock);
779 	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
780 
781 	if (!dev->netdev_ops->ndo_setup_tc)
782 		goto no_offload_dev_dec;
783 	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
784 	if (err == -EOPNOTSUPP)
785 		goto no_offload_dev_dec;
786 	up_write(&block->cb_lock);
787 	return;
788 
789 no_offload_dev_dec:
790 	WARN_ON(block->nooffloaddevcnt-- == 0);
791 	up_write(&block->cb_lock);
792 }
793 
794 static int
795 tcf_chain0_head_change_cb_add(struct tcf_block *block,
796 			      struct tcf_block_ext_info *ei,
797 			      struct netlink_ext_ack *extack)
798 {
799 	struct tcf_filter_chain_list_item *item;
800 	struct tcf_chain *chain0;
801 
802 	item = kmalloc(sizeof(*item), GFP_KERNEL);
803 	if (!item) {
804 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
805 		return -ENOMEM;
806 	}
807 	item->chain_head_change = ei->chain_head_change;
808 	item->chain_head_change_priv = ei->chain_head_change_priv;
809 
810 	mutex_lock(&block->lock);
811 	chain0 = block->chain0.chain;
812 	if (chain0)
813 		tcf_chain_hold(chain0);
814 	else
815 		list_add(&item->list, &block->chain0.filter_chain_list);
816 	mutex_unlock(&block->lock);
817 
818 	if (chain0) {
819 		struct tcf_proto *tp_head;
820 
821 		mutex_lock(&chain0->filter_chain_lock);
822 
823 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
824 		if (tp_head)
825 			tcf_chain_head_change_item(item, tp_head);
826 
827 		mutex_lock(&block->lock);
828 		list_add(&item->list, &block->chain0.filter_chain_list);
829 		mutex_unlock(&block->lock);
830 
831 		mutex_unlock(&chain0->filter_chain_lock);
832 		tcf_chain_put(chain0);
833 	}
834 
835 	return 0;
836 }
837 
838 static void
839 tcf_chain0_head_change_cb_del(struct tcf_block *block,
840 			      struct tcf_block_ext_info *ei)
841 {
842 	struct tcf_filter_chain_list_item *item;
843 
844 	mutex_lock(&block->lock);
845 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
846 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
847 		    (item->chain_head_change == ei->chain_head_change &&
848 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
849 			if (block->chain0.chain)
850 				tcf_chain_head_change_item(item, NULL);
851 			list_del(&item->list);
852 			mutex_unlock(&block->lock);
853 
854 			kfree(item);
855 			return;
856 		}
857 	}
858 	mutex_unlock(&block->lock);
859 	WARN_ON(1);
860 }
861 
862 struct tcf_net {
863 	spinlock_t idr_lock; /* Protects idr */
864 	struct idr idr;
865 };
866 
867 static unsigned int tcf_net_id;
868 
869 static int tcf_block_insert(struct tcf_block *block, struct net *net,
870 			    struct netlink_ext_ack *extack)
871 {
872 	struct tcf_net *tn = net_generic(net, tcf_net_id);
873 	int err;
874 
875 	idr_preload(GFP_KERNEL);
876 	spin_lock(&tn->idr_lock);
877 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
878 			    GFP_NOWAIT);
879 	spin_unlock(&tn->idr_lock);
880 	idr_preload_end();
881 
882 	return err;
883 }
884 
885 static void tcf_block_remove(struct tcf_block *block, struct net *net)
886 {
887 	struct tcf_net *tn = net_generic(net, tcf_net_id);
888 
889 	spin_lock(&tn->idr_lock);
890 	idr_remove(&tn->idr, block->index);
891 	spin_unlock(&tn->idr_lock);
892 }
893 
894 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
895 					  u32 block_index,
896 					  struct netlink_ext_ack *extack)
897 {
898 	struct tcf_block *block;
899 
900 	block = kzalloc(sizeof(*block), GFP_KERNEL);
901 	if (!block) {
902 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
903 		return ERR_PTR(-ENOMEM);
904 	}
905 	mutex_init(&block->lock);
906 	mutex_init(&block->proto_destroy_lock);
907 	init_rwsem(&block->cb_lock);
908 	flow_block_init(&block->flow_block);
909 	INIT_LIST_HEAD(&block->chain_list);
910 	INIT_LIST_HEAD(&block->owner_list);
911 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
912 
913 	refcount_set(&block->refcnt, 1);
914 	block->net = net;
915 	block->index = block_index;
916 
917 	/* Don't store q pointer for blocks which are shared */
918 	if (!tcf_block_shared(block))
919 		block->q = q;
920 	return block;
921 }
922 
923 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
924 {
925 	struct tcf_net *tn = net_generic(net, tcf_net_id);
926 
927 	return idr_find(&tn->idr, block_index);
928 }
929 
930 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
931 {
932 	struct tcf_block *block;
933 
934 	rcu_read_lock();
935 	block = tcf_block_lookup(net, block_index);
936 	if (block && !refcount_inc_not_zero(&block->refcnt))
937 		block = NULL;
938 	rcu_read_unlock();
939 
940 	return block;
941 }
942 
943 static struct tcf_chain *
944 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
945 {
946 	mutex_lock(&block->lock);
947 	if (chain)
948 		chain = list_is_last(&chain->list, &block->chain_list) ?
949 			NULL : list_next_entry(chain, list);
950 	else
951 		chain = list_first_entry_or_null(&block->chain_list,
952 						 struct tcf_chain, list);
953 
954 	/* skip all action-only chains */
955 	while (chain && tcf_chain_held_by_acts_only(chain))
956 		chain = list_is_last(&chain->list, &block->chain_list) ?
957 			NULL : list_next_entry(chain, list);
958 
959 	if (chain)
960 		tcf_chain_hold(chain);
961 	mutex_unlock(&block->lock);
962 
963 	return chain;
964 }
965 
966 /* Function to be used by all clients that want to iterate over all chains on
967  * block. It properly obtains block->lock and takes reference to chain before
968  * returning it. Users of this function must be tolerant to concurrent chain
969  * insertion/deletion or ensure that no concurrent chain modification is
970  * possible. Note that all netlink dump callbacks cannot guarantee to provide
971  * consistent dump because rtnl lock is released each time skb is filled with
972  * data and sent to user-space.
973  */
974 
975 struct tcf_chain *
976 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
977 {
978 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
979 
980 	if (chain)
981 		tcf_chain_put(chain);
982 
983 	return chain_next;
984 }
985 EXPORT_SYMBOL(tcf_get_next_chain);
986 
987 static struct tcf_proto *
988 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
989 {
990 	u32 prio = 0;
991 
992 	ASSERT_RTNL();
993 	mutex_lock(&chain->filter_chain_lock);
994 
995 	if (!tp) {
996 		tp = tcf_chain_dereference(chain->filter_chain, chain);
997 	} else if (tcf_proto_is_deleting(tp)) {
998 		/* 'deleting' flag is set and chain->filter_chain_lock was
999 		 * unlocked, which means next pointer could be invalid. Restart
1000 		 * search.
1001 		 */
1002 		prio = tp->prio + 1;
1003 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1004 
1005 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1006 			if (!tp->deleting && tp->prio >= prio)
1007 				break;
1008 	} else {
1009 		tp = tcf_chain_dereference(tp->next, chain);
1010 	}
1011 
1012 	if (tp)
1013 		tcf_proto_get(tp);
1014 
1015 	mutex_unlock(&chain->filter_chain_lock);
1016 
1017 	return tp;
1018 }
1019 
1020 /* Function to be used by all clients that want to iterate over all tp's on
1021  * chain. Users of this function must be tolerant to concurrent tp
1022  * insertion/deletion or ensure that no concurrent chain modification is
1023  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1024  * consistent dump because rtnl lock is released each time skb is filled with
1025  * data and sent to user-space.
1026  */
1027 
1028 struct tcf_proto *
1029 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1030 		   bool rtnl_held)
1031 {
1032 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1033 
1034 	if (tp)
1035 		tcf_proto_put(tp, rtnl_held, NULL);
1036 
1037 	return tp_next;
1038 }
1039 EXPORT_SYMBOL(tcf_get_next_proto);
1040 
1041 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1042 {
1043 	struct tcf_chain *chain;
1044 
1045 	/* Last reference to block. At this point chains cannot be added or
1046 	 * removed concurrently.
1047 	 */
1048 	for (chain = tcf_get_next_chain(block, NULL);
1049 	     chain;
1050 	     chain = tcf_get_next_chain(block, chain)) {
1051 		tcf_chain_put_explicitly_created(chain);
1052 		tcf_chain_flush(chain, rtnl_held);
1053 	}
1054 }
1055 
1056 /* Lookup Qdisc and increments its reference counter.
1057  * Set parent, if necessary.
1058  */
1059 
1060 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1061 			    u32 *parent, int ifindex, bool rtnl_held,
1062 			    struct netlink_ext_ack *extack)
1063 {
1064 	const struct Qdisc_class_ops *cops;
1065 	struct net_device *dev;
1066 	int err = 0;
1067 
1068 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1069 		return 0;
1070 
1071 	rcu_read_lock();
1072 
1073 	/* Find link */
1074 	dev = dev_get_by_index_rcu(net, ifindex);
1075 	if (!dev) {
1076 		rcu_read_unlock();
1077 		return -ENODEV;
1078 	}
1079 
1080 	/* Find qdisc */
1081 	if (!*parent) {
1082 		*q = dev->qdisc;
1083 		*parent = (*q)->handle;
1084 	} else {
1085 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1086 		if (!*q) {
1087 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1088 			err = -EINVAL;
1089 			goto errout_rcu;
1090 		}
1091 	}
1092 
1093 	*q = qdisc_refcount_inc_nz(*q);
1094 	if (!*q) {
1095 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1096 		err = -EINVAL;
1097 		goto errout_rcu;
1098 	}
1099 
1100 	/* Is it classful? */
1101 	cops = (*q)->ops->cl_ops;
1102 	if (!cops) {
1103 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1104 		err = -EINVAL;
1105 		goto errout_qdisc;
1106 	}
1107 
1108 	if (!cops->tcf_block) {
1109 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1110 		err = -EOPNOTSUPP;
1111 		goto errout_qdisc;
1112 	}
1113 
1114 errout_rcu:
1115 	/* At this point we know that qdisc is not noop_qdisc,
1116 	 * which means that qdisc holds a reference to net_device
1117 	 * and we hold a reference to qdisc, so it is safe to release
1118 	 * rcu read lock.
1119 	 */
1120 	rcu_read_unlock();
1121 	return err;
1122 
1123 errout_qdisc:
1124 	rcu_read_unlock();
1125 
1126 	if (rtnl_held)
1127 		qdisc_put(*q);
1128 	else
1129 		qdisc_put_unlocked(*q);
1130 	*q = NULL;
1131 
1132 	return err;
1133 }
1134 
1135 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1136 			       int ifindex, struct netlink_ext_ack *extack)
1137 {
1138 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1139 		return 0;
1140 
1141 	/* Do we search for filter, attached to class? */
1142 	if (TC_H_MIN(parent)) {
1143 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1144 
1145 		*cl = cops->find(q, parent);
1146 		if (*cl == 0) {
1147 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1148 			return -ENOENT;
1149 		}
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1156 					  unsigned long cl, int ifindex,
1157 					  u32 block_index,
1158 					  struct netlink_ext_ack *extack)
1159 {
1160 	struct tcf_block *block;
1161 
1162 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1163 		block = tcf_block_refcnt_get(net, block_index);
1164 		if (!block) {
1165 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1166 			return ERR_PTR(-EINVAL);
1167 		}
1168 	} else {
1169 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1170 
1171 		block = cops->tcf_block(q, cl, extack);
1172 		if (!block)
1173 			return ERR_PTR(-EINVAL);
1174 
1175 		if (tcf_block_shared(block)) {
1176 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1177 			return ERR_PTR(-EOPNOTSUPP);
1178 		}
1179 
1180 		/* Always take reference to block in order to support execution
1181 		 * of rules update path of cls API without rtnl lock. Caller
1182 		 * must release block when it is finished using it. 'if' block
1183 		 * of this conditional obtain reference to block by calling
1184 		 * tcf_block_refcnt_get().
1185 		 */
1186 		refcount_inc(&block->refcnt);
1187 	}
1188 
1189 	return block;
1190 }
1191 
1192 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1193 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1194 {
1195 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1196 		/* Flushing/putting all chains will cause the block to be
1197 		 * deallocated when last chain is freed. However, if chain_list
1198 		 * is empty, block has to be manually deallocated. After block
1199 		 * reference counter reached 0, it is no longer possible to
1200 		 * increment it or add new chains to block.
1201 		 */
1202 		bool free_block = list_empty(&block->chain_list);
1203 
1204 		mutex_unlock(&block->lock);
1205 		if (tcf_block_shared(block))
1206 			tcf_block_remove(block, block->net);
1207 
1208 		if (q)
1209 			tcf_block_offload_unbind(block, q, ei);
1210 
1211 		if (free_block)
1212 			tcf_block_destroy(block);
1213 		else
1214 			tcf_block_flush_all_chains(block, rtnl_held);
1215 	} else if (q) {
1216 		tcf_block_offload_unbind(block, q, ei);
1217 	}
1218 }
1219 
1220 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1221 {
1222 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1223 }
1224 
1225 /* Find tcf block.
1226  * Set q, parent, cl when appropriate.
1227  */
1228 
1229 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1230 					u32 *parent, unsigned long *cl,
1231 					int ifindex, u32 block_index,
1232 					struct netlink_ext_ack *extack)
1233 {
1234 	struct tcf_block *block;
1235 	int err = 0;
1236 
1237 	ASSERT_RTNL();
1238 
1239 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1240 	if (err)
1241 		goto errout;
1242 
1243 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1244 	if (err)
1245 		goto errout_qdisc;
1246 
1247 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1248 	if (IS_ERR(block)) {
1249 		err = PTR_ERR(block);
1250 		goto errout_qdisc;
1251 	}
1252 
1253 	return block;
1254 
1255 errout_qdisc:
1256 	if (*q)
1257 		qdisc_put(*q);
1258 errout:
1259 	*q = NULL;
1260 	return ERR_PTR(err);
1261 }
1262 
1263 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1264 			      bool rtnl_held)
1265 {
1266 	if (!IS_ERR_OR_NULL(block))
1267 		tcf_block_refcnt_put(block, rtnl_held);
1268 
1269 	if (q) {
1270 		if (rtnl_held)
1271 			qdisc_put(q);
1272 		else
1273 			qdisc_put_unlocked(q);
1274 	}
1275 }
1276 
1277 struct tcf_block_owner_item {
1278 	struct list_head list;
1279 	struct Qdisc *q;
1280 	enum flow_block_binder_type binder_type;
1281 };
1282 
1283 static void
1284 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1285 			       struct Qdisc *q,
1286 			       enum flow_block_binder_type binder_type)
1287 {
1288 	if (block->keep_dst &&
1289 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1290 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1291 		netif_keep_dst(qdisc_dev(q));
1292 }
1293 
1294 void tcf_block_netif_keep_dst(struct tcf_block *block)
1295 {
1296 	struct tcf_block_owner_item *item;
1297 
1298 	block->keep_dst = true;
1299 	list_for_each_entry(item, &block->owner_list, list)
1300 		tcf_block_owner_netif_keep_dst(block, item->q,
1301 					       item->binder_type);
1302 }
1303 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1304 
1305 static int tcf_block_owner_add(struct tcf_block *block,
1306 			       struct Qdisc *q,
1307 			       enum flow_block_binder_type binder_type)
1308 {
1309 	struct tcf_block_owner_item *item;
1310 
1311 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1312 	if (!item)
1313 		return -ENOMEM;
1314 	item->q = q;
1315 	item->binder_type = binder_type;
1316 	list_add(&item->list, &block->owner_list);
1317 	return 0;
1318 }
1319 
1320 static void tcf_block_owner_del(struct tcf_block *block,
1321 				struct Qdisc *q,
1322 				enum flow_block_binder_type binder_type)
1323 {
1324 	struct tcf_block_owner_item *item;
1325 
1326 	list_for_each_entry(item, &block->owner_list, list) {
1327 		if (item->q == q && item->binder_type == binder_type) {
1328 			list_del(&item->list);
1329 			kfree(item);
1330 			return;
1331 		}
1332 	}
1333 	WARN_ON(1);
1334 }
1335 
1336 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1337 		      struct tcf_block_ext_info *ei,
1338 		      struct netlink_ext_ack *extack)
1339 {
1340 	struct net *net = qdisc_net(q);
1341 	struct tcf_block *block = NULL;
1342 	int err;
1343 
1344 	if (ei->block_index)
1345 		/* block_index not 0 means the shared block is requested */
1346 		block = tcf_block_refcnt_get(net, ei->block_index);
1347 
1348 	if (!block) {
1349 		block = tcf_block_create(net, q, ei->block_index, extack);
1350 		if (IS_ERR(block))
1351 			return PTR_ERR(block);
1352 		if (tcf_block_shared(block)) {
1353 			err = tcf_block_insert(block, net, extack);
1354 			if (err)
1355 				goto err_block_insert;
1356 		}
1357 	}
1358 
1359 	err = tcf_block_owner_add(block, q, ei->binder_type);
1360 	if (err)
1361 		goto err_block_owner_add;
1362 
1363 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1364 
1365 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1366 	if (err)
1367 		goto err_chain0_head_change_cb_add;
1368 
1369 	err = tcf_block_offload_bind(block, q, ei, extack);
1370 	if (err)
1371 		goto err_block_offload_bind;
1372 
1373 	*p_block = block;
1374 	return 0;
1375 
1376 err_block_offload_bind:
1377 	tcf_chain0_head_change_cb_del(block, ei);
1378 err_chain0_head_change_cb_add:
1379 	tcf_block_owner_del(block, q, ei->binder_type);
1380 err_block_owner_add:
1381 err_block_insert:
1382 	tcf_block_refcnt_put(block, true);
1383 	return err;
1384 }
1385 EXPORT_SYMBOL(tcf_block_get_ext);
1386 
1387 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1388 {
1389 	struct tcf_proto __rcu **p_filter_chain = priv;
1390 
1391 	rcu_assign_pointer(*p_filter_chain, tp_head);
1392 }
1393 
1394 int tcf_block_get(struct tcf_block **p_block,
1395 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1396 		  struct netlink_ext_ack *extack)
1397 {
1398 	struct tcf_block_ext_info ei = {
1399 		.chain_head_change = tcf_chain_head_change_dflt,
1400 		.chain_head_change_priv = p_filter_chain,
1401 	};
1402 
1403 	WARN_ON(!p_filter_chain);
1404 	return tcf_block_get_ext(p_block, q, &ei, extack);
1405 }
1406 EXPORT_SYMBOL(tcf_block_get);
1407 
1408 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1409  * actions should be all removed after flushing.
1410  */
1411 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1412 		       struct tcf_block_ext_info *ei)
1413 {
1414 	if (!block)
1415 		return;
1416 	tcf_chain0_head_change_cb_del(block, ei);
1417 	tcf_block_owner_del(block, q, ei->binder_type);
1418 
1419 	__tcf_block_put(block, q, ei, true);
1420 }
1421 EXPORT_SYMBOL(tcf_block_put_ext);
1422 
1423 void tcf_block_put(struct tcf_block *block)
1424 {
1425 	struct tcf_block_ext_info ei = {0, };
1426 
1427 	if (!block)
1428 		return;
1429 	tcf_block_put_ext(block, block->q, &ei);
1430 }
1431 
1432 EXPORT_SYMBOL(tcf_block_put);
1433 
1434 static int
1435 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1436 			    void *cb_priv, bool add, bool offload_in_use,
1437 			    struct netlink_ext_ack *extack)
1438 {
1439 	struct tcf_chain *chain, *chain_prev;
1440 	struct tcf_proto *tp, *tp_prev;
1441 	int err;
1442 
1443 	lockdep_assert_held(&block->cb_lock);
1444 
1445 	for (chain = __tcf_get_next_chain(block, NULL);
1446 	     chain;
1447 	     chain_prev = chain,
1448 		     chain = __tcf_get_next_chain(block, chain),
1449 		     tcf_chain_put(chain_prev)) {
1450 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1451 		     tp_prev = tp,
1452 			     tp = __tcf_get_next_proto(chain, tp),
1453 			     tcf_proto_put(tp_prev, true, NULL)) {
1454 			if (tp->ops->reoffload) {
1455 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1456 							 extack);
1457 				if (err && add)
1458 					goto err_playback_remove;
1459 			} else if (add && offload_in_use) {
1460 				err = -EOPNOTSUPP;
1461 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1462 				goto err_playback_remove;
1463 			}
1464 		}
1465 	}
1466 
1467 	return 0;
1468 
1469 err_playback_remove:
1470 	tcf_proto_put(tp, true, NULL);
1471 	tcf_chain_put(chain);
1472 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1473 				    extack);
1474 	return err;
1475 }
1476 
1477 static int tcf_block_bind(struct tcf_block *block,
1478 			  struct flow_block_offload *bo)
1479 {
1480 	struct flow_block_cb *block_cb, *next;
1481 	int err, i = 0;
1482 
1483 	lockdep_assert_held(&block->cb_lock);
1484 
1485 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1486 		err = tcf_block_playback_offloads(block, block_cb->cb,
1487 						  block_cb->cb_priv, true,
1488 						  tcf_block_offload_in_use(block),
1489 						  bo->extack);
1490 		if (err)
1491 			goto err_unroll;
1492 		if (!bo->unlocked_driver_cb)
1493 			block->lockeddevcnt++;
1494 
1495 		i++;
1496 	}
1497 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1498 
1499 	return 0;
1500 
1501 err_unroll:
1502 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1503 		if (i-- > 0) {
1504 			list_del(&block_cb->list);
1505 			tcf_block_playback_offloads(block, block_cb->cb,
1506 						    block_cb->cb_priv, false,
1507 						    tcf_block_offload_in_use(block),
1508 						    NULL);
1509 			if (!bo->unlocked_driver_cb)
1510 				block->lockeddevcnt--;
1511 		}
1512 		flow_block_cb_free(block_cb);
1513 	}
1514 
1515 	return err;
1516 }
1517 
1518 static void tcf_block_unbind(struct tcf_block *block,
1519 			     struct flow_block_offload *bo)
1520 {
1521 	struct flow_block_cb *block_cb, *next;
1522 
1523 	lockdep_assert_held(&block->cb_lock);
1524 
1525 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1526 		tcf_block_playback_offloads(block, block_cb->cb,
1527 					    block_cb->cb_priv, false,
1528 					    tcf_block_offload_in_use(block),
1529 					    NULL);
1530 		list_del(&block_cb->list);
1531 		flow_block_cb_free(block_cb);
1532 		if (!bo->unlocked_driver_cb)
1533 			block->lockeddevcnt--;
1534 	}
1535 }
1536 
1537 static int tcf_block_setup(struct tcf_block *block,
1538 			   struct flow_block_offload *bo)
1539 {
1540 	int err;
1541 
1542 	switch (bo->command) {
1543 	case FLOW_BLOCK_BIND:
1544 		err = tcf_block_bind(block, bo);
1545 		break;
1546 	case FLOW_BLOCK_UNBIND:
1547 		err = 0;
1548 		tcf_block_unbind(block, bo);
1549 		break;
1550 	default:
1551 		WARN_ON_ONCE(1);
1552 		err = -EOPNOTSUPP;
1553 	}
1554 
1555 	return err;
1556 }
1557 
1558 /* Main classifier routine: scans classifier chain attached
1559  * to this qdisc, (optionally) tests for protocol and asks
1560  * specific classifiers.
1561  */
1562 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1563 		 struct tcf_result *res, bool compat_mode)
1564 {
1565 #ifdef CONFIG_NET_CLS_ACT
1566 	const int max_reclassify_loop = 4;
1567 	const struct tcf_proto *orig_tp = tp;
1568 	const struct tcf_proto *first_tp;
1569 	int limit = 0;
1570 
1571 reclassify:
1572 #endif
1573 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1574 		__be16 protocol = tc_skb_protocol(skb);
1575 		int err;
1576 
1577 		if (tp->protocol != protocol &&
1578 		    tp->protocol != htons(ETH_P_ALL))
1579 			continue;
1580 
1581 		err = tp->classify(skb, tp, res);
1582 #ifdef CONFIG_NET_CLS_ACT
1583 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1584 			first_tp = orig_tp;
1585 			goto reset;
1586 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1587 			first_tp = res->goto_tp;
1588 
1589 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1590 			{
1591 				struct tc_skb_ext *ext;
1592 
1593 				ext = skb_ext_add(skb, TC_SKB_EXT);
1594 				if (WARN_ON_ONCE(!ext))
1595 					return TC_ACT_SHOT;
1596 
1597 				ext->chain = err & TC_ACT_EXT_VAL_MASK;
1598 			}
1599 #endif
1600 			goto reset;
1601 		}
1602 #endif
1603 		if (err >= 0)
1604 			return err;
1605 	}
1606 
1607 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1608 #ifdef CONFIG_NET_CLS_ACT
1609 reset:
1610 	if (unlikely(limit++ >= max_reclassify_loop)) {
1611 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1612 				       tp->chain->block->index,
1613 				       tp->prio & 0xffff,
1614 				       ntohs(tp->protocol));
1615 		return TC_ACT_SHOT;
1616 	}
1617 
1618 	tp = first_tp;
1619 	goto reclassify;
1620 #endif
1621 }
1622 EXPORT_SYMBOL(tcf_classify);
1623 
1624 struct tcf_chain_info {
1625 	struct tcf_proto __rcu **pprev;
1626 	struct tcf_proto __rcu *next;
1627 };
1628 
1629 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1630 					   struct tcf_chain_info *chain_info)
1631 {
1632 	return tcf_chain_dereference(*chain_info->pprev, chain);
1633 }
1634 
1635 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1636 			       struct tcf_chain_info *chain_info,
1637 			       struct tcf_proto *tp)
1638 {
1639 	if (chain->flushing)
1640 		return -EAGAIN;
1641 
1642 	if (*chain_info->pprev == chain->filter_chain)
1643 		tcf_chain0_head_change(chain, tp);
1644 	tcf_proto_get(tp);
1645 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1646 	rcu_assign_pointer(*chain_info->pprev, tp);
1647 
1648 	return 0;
1649 }
1650 
1651 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1652 				struct tcf_chain_info *chain_info,
1653 				struct tcf_proto *tp)
1654 {
1655 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1656 
1657 	tcf_proto_mark_delete(tp);
1658 	if (tp == chain->filter_chain)
1659 		tcf_chain0_head_change(chain, next);
1660 	RCU_INIT_POINTER(*chain_info->pprev, next);
1661 }
1662 
1663 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1664 					   struct tcf_chain_info *chain_info,
1665 					   u32 protocol, u32 prio,
1666 					   bool prio_allocate);
1667 
1668 /* Try to insert new proto.
1669  * If proto with specified priority already exists, free new proto
1670  * and return existing one.
1671  */
1672 
1673 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1674 						    struct tcf_proto *tp_new,
1675 						    u32 protocol, u32 prio,
1676 						    bool rtnl_held)
1677 {
1678 	struct tcf_chain_info chain_info;
1679 	struct tcf_proto *tp;
1680 	int err = 0;
1681 
1682 	mutex_lock(&chain->filter_chain_lock);
1683 
1684 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1685 		mutex_unlock(&chain->filter_chain_lock);
1686 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1687 		return ERR_PTR(-EAGAIN);
1688 	}
1689 
1690 	tp = tcf_chain_tp_find(chain, &chain_info,
1691 			       protocol, prio, false);
1692 	if (!tp)
1693 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1694 	mutex_unlock(&chain->filter_chain_lock);
1695 
1696 	if (tp) {
1697 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1698 		tp_new = tp;
1699 	} else if (err) {
1700 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1701 		tp_new = ERR_PTR(err);
1702 	}
1703 
1704 	return tp_new;
1705 }
1706 
1707 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1708 				      struct tcf_proto *tp, bool rtnl_held,
1709 				      struct netlink_ext_ack *extack)
1710 {
1711 	struct tcf_chain_info chain_info;
1712 	struct tcf_proto *tp_iter;
1713 	struct tcf_proto **pprev;
1714 	struct tcf_proto *next;
1715 
1716 	mutex_lock(&chain->filter_chain_lock);
1717 
1718 	/* Atomically find and remove tp from chain. */
1719 	for (pprev = &chain->filter_chain;
1720 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1721 	     pprev = &tp_iter->next) {
1722 		if (tp_iter == tp) {
1723 			chain_info.pprev = pprev;
1724 			chain_info.next = tp_iter->next;
1725 			WARN_ON(tp_iter->deleting);
1726 			break;
1727 		}
1728 	}
1729 	/* Verify that tp still exists and no new filters were inserted
1730 	 * concurrently.
1731 	 * Mark tp for deletion if it is empty.
1732 	 */
1733 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1734 		mutex_unlock(&chain->filter_chain_lock);
1735 		return;
1736 	}
1737 
1738 	tcf_proto_signal_destroying(chain, tp);
1739 	next = tcf_chain_dereference(chain_info.next, chain);
1740 	if (tp == chain->filter_chain)
1741 		tcf_chain0_head_change(chain, next);
1742 	RCU_INIT_POINTER(*chain_info.pprev, next);
1743 	mutex_unlock(&chain->filter_chain_lock);
1744 
1745 	tcf_proto_put(tp, rtnl_held, extack);
1746 }
1747 
1748 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1749 					   struct tcf_chain_info *chain_info,
1750 					   u32 protocol, u32 prio,
1751 					   bool prio_allocate)
1752 {
1753 	struct tcf_proto **pprev;
1754 	struct tcf_proto *tp;
1755 
1756 	/* Check the chain for existence of proto-tcf with this priority */
1757 	for (pprev = &chain->filter_chain;
1758 	     (tp = tcf_chain_dereference(*pprev, chain));
1759 	     pprev = &tp->next) {
1760 		if (tp->prio >= prio) {
1761 			if (tp->prio == prio) {
1762 				if (prio_allocate ||
1763 				    (tp->protocol != protocol && protocol))
1764 					return ERR_PTR(-EINVAL);
1765 			} else {
1766 				tp = NULL;
1767 			}
1768 			break;
1769 		}
1770 	}
1771 	chain_info->pprev = pprev;
1772 	if (tp) {
1773 		chain_info->next = tp->next;
1774 		tcf_proto_get(tp);
1775 	} else {
1776 		chain_info->next = NULL;
1777 	}
1778 	return tp;
1779 }
1780 
1781 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1782 			 struct tcf_proto *tp, struct tcf_block *block,
1783 			 struct Qdisc *q, u32 parent, void *fh,
1784 			 u32 portid, u32 seq, u16 flags, int event,
1785 			 bool rtnl_held)
1786 {
1787 	struct tcmsg *tcm;
1788 	struct nlmsghdr  *nlh;
1789 	unsigned char *b = skb_tail_pointer(skb);
1790 
1791 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1792 	if (!nlh)
1793 		goto out_nlmsg_trim;
1794 	tcm = nlmsg_data(nlh);
1795 	tcm->tcm_family = AF_UNSPEC;
1796 	tcm->tcm__pad1 = 0;
1797 	tcm->tcm__pad2 = 0;
1798 	if (q) {
1799 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1800 		tcm->tcm_parent = parent;
1801 	} else {
1802 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1803 		tcm->tcm_block_index = block->index;
1804 	}
1805 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1806 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1807 		goto nla_put_failure;
1808 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1809 		goto nla_put_failure;
1810 	if (!fh) {
1811 		tcm->tcm_handle = 0;
1812 	} else {
1813 		if (tp->ops->dump &&
1814 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1815 			goto nla_put_failure;
1816 	}
1817 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1818 	return skb->len;
1819 
1820 out_nlmsg_trim:
1821 nla_put_failure:
1822 	nlmsg_trim(skb, b);
1823 	return -1;
1824 }
1825 
1826 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1827 			  struct nlmsghdr *n, struct tcf_proto *tp,
1828 			  struct tcf_block *block, struct Qdisc *q,
1829 			  u32 parent, void *fh, int event, bool unicast,
1830 			  bool rtnl_held)
1831 {
1832 	struct sk_buff *skb;
1833 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1834 	int err = 0;
1835 
1836 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1837 	if (!skb)
1838 		return -ENOBUFS;
1839 
1840 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1841 			  n->nlmsg_seq, n->nlmsg_flags, event,
1842 			  rtnl_held) <= 0) {
1843 		kfree_skb(skb);
1844 		return -EINVAL;
1845 	}
1846 
1847 	if (unicast)
1848 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1849 	else
1850 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1851 				     n->nlmsg_flags & NLM_F_ECHO);
1852 
1853 	if (err > 0)
1854 		err = 0;
1855 	return err;
1856 }
1857 
1858 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1859 			      struct nlmsghdr *n, struct tcf_proto *tp,
1860 			      struct tcf_block *block, struct Qdisc *q,
1861 			      u32 parent, void *fh, bool unicast, bool *last,
1862 			      bool rtnl_held, struct netlink_ext_ack *extack)
1863 {
1864 	struct sk_buff *skb;
1865 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1866 	int err;
1867 
1868 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1869 	if (!skb)
1870 		return -ENOBUFS;
1871 
1872 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1873 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1874 			  rtnl_held) <= 0) {
1875 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1876 		kfree_skb(skb);
1877 		return -EINVAL;
1878 	}
1879 
1880 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1881 	if (err) {
1882 		kfree_skb(skb);
1883 		return err;
1884 	}
1885 
1886 	if (unicast)
1887 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1888 	else
1889 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1890 				     n->nlmsg_flags & NLM_F_ECHO);
1891 	if (err < 0)
1892 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1893 
1894 	if (err > 0)
1895 		err = 0;
1896 	return err;
1897 }
1898 
1899 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1900 				 struct tcf_block *block, struct Qdisc *q,
1901 				 u32 parent, struct nlmsghdr *n,
1902 				 struct tcf_chain *chain, int event,
1903 				 bool rtnl_held)
1904 {
1905 	struct tcf_proto *tp;
1906 
1907 	for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1908 	     tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1909 		tfilter_notify(net, oskb, n, tp, block,
1910 			       q, parent, NULL, event, false, rtnl_held);
1911 }
1912 
1913 static void tfilter_put(struct tcf_proto *tp, void *fh)
1914 {
1915 	if (tp->ops->put && fh)
1916 		tp->ops->put(tp, fh);
1917 }
1918 
1919 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1920 			  struct netlink_ext_ack *extack)
1921 {
1922 	struct net *net = sock_net(skb->sk);
1923 	struct nlattr *tca[TCA_MAX + 1];
1924 	char name[IFNAMSIZ];
1925 	struct tcmsg *t;
1926 	u32 protocol;
1927 	u32 prio;
1928 	bool prio_allocate;
1929 	u32 parent;
1930 	u32 chain_index;
1931 	struct Qdisc *q = NULL;
1932 	struct tcf_chain_info chain_info;
1933 	struct tcf_chain *chain = NULL;
1934 	struct tcf_block *block;
1935 	struct tcf_proto *tp;
1936 	unsigned long cl;
1937 	void *fh;
1938 	int err;
1939 	int tp_created;
1940 	bool rtnl_held = false;
1941 
1942 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1943 		return -EPERM;
1944 
1945 replay:
1946 	tp_created = 0;
1947 
1948 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1949 				     rtm_tca_policy, extack);
1950 	if (err < 0)
1951 		return err;
1952 
1953 	t = nlmsg_data(n);
1954 	protocol = TC_H_MIN(t->tcm_info);
1955 	prio = TC_H_MAJ(t->tcm_info);
1956 	prio_allocate = false;
1957 	parent = t->tcm_parent;
1958 	tp = NULL;
1959 	cl = 0;
1960 	block = NULL;
1961 
1962 	if (prio == 0) {
1963 		/* If no priority is provided by the user,
1964 		 * we allocate one.
1965 		 */
1966 		if (n->nlmsg_flags & NLM_F_CREATE) {
1967 			prio = TC_H_MAKE(0x80000000U, 0U);
1968 			prio_allocate = true;
1969 		} else {
1970 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1971 			return -ENOENT;
1972 		}
1973 	}
1974 
1975 	/* Find head of filter chain. */
1976 
1977 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1978 	if (err)
1979 		return err;
1980 
1981 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1982 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1983 		err = -EINVAL;
1984 		goto errout;
1985 	}
1986 
1987 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1988 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1989 	 * type is not specified, classifier is not unlocked.
1990 	 */
1991 	if (rtnl_held ||
1992 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1993 	    !tcf_proto_is_unlocked(name)) {
1994 		rtnl_held = true;
1995 		rtnl_lock();
1996 	}
1997 
1998 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1999 	if (err)
2000 		goto errout;
2001 
2002 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2003 				 extack);
2004 	if (IS_ERR(block)) {
2005 		err = PTR_ERR(block);
2006 		goto errout;
2007 	}
2008 
2009 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2010 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2011 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2012 		err = -EINVAL;
2013 		goto errout;
2014 	}
2015 	chain = tcf_chain_get(block, chain_index, true);
2016 	if (!chain) {
2017 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2018 		err = -ENOMEM;
2019 		goto errout;
2020 	}
2021 
2022 	mutex_lock(&chain->filter_chain_lock);
2023 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2024 			       prio, prio_allocate);
2025 	if (IS_ERR(tp)) {
2026 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2027 		err = PTR_ERR(tp);
2028 		goto errout_locked;
2029 	}
2030 
2031 	if (tp == NULL) {
2032 		struct tcf_proto *tp_new = NULL;
2033 
2034 		if (chain->flushing) {
2035 			err = -EAGAIN;
2036 			goto errout_locked;
2037 		}
2038 
2039 		/* Proto-tcf does not exist, create new one */
2040 
2041 		if (tca[TCA_KIND] == NULL || !protocol) {
2042 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2043 			err = -EINVAL;
2044 			goto errout_locked;
2045 		}
2046 
2047 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2048 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2049 			err = -ENOENT;
2050 			goto errout_locked;
2051 		}
2052 
2053 		if (prio_allocate)
2054 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2055 							       &chain_info));
2056 
2057 		mutex_unlock(&chain->filter_chain_lock);
2058 		tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2059 					  protocol, prio, chain, rtnl_held,
2060 					  extack);
2061 		if (IS_ERR(tp_new)) {
2062 			err = PTR_ERR(tp_new);
2063 			goto errout_tp;
2064 		}
2065 
2066 		tp_created = 1;
2067 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2068 						rtnl_held);
2069 		if (IS_ERR(tp)) {
2070 			err = PTR_ERR(tp);
2071 			goto errout_tp;
2072 		}
2073 	} else {
2074 		mutex_unlock(&chain->filter_chain_lock);
2075 	}
2076 
2077 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2078 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2079 		err = -EINVAL;
2080 		goto errout;
2081 	}
2082 
2083 	fh = tp->ops->get(tp, t->tcm_handle);
2084 
2085 	if (!fh) {
2086 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2087 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2088 			err = -ENOENT;
2089 			goto errout;
2090 		}
2091 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2092 		tfilter_put(tp, fh);
2093 		NL_SET_ERR_MSG(extack, "Filter already exists");
2094 		err = -EEXIST;
2095 		goto errout;
2096 	}
2097 
2098 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2099 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2100 		err = -EINVAL;
2101 		goto errout;
2102 	}
2103 
2104 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2105 			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2106 			      rtnl_held, extack);
2107 	if (err == 0) {
2108 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2109 			       RTM_NEWTFILTER, false, rtnl_held);
2110 		tfilter_put(tp, fh);
2111 		/* q pointer is NULL for shared blocks */
2112 		if (q)
2113 			q->flags &= ~TCQ_F_CAN_BYPASS;
2114 	}
2115 
2116 errout:
2117 	if (err && tp_created)
2118 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2119 errout_tp:
2120 	if (chain) {
2121 		if (tp && !IS_ERR(tp))
2122 			tcf_proto_put(tp, rtnl_held, NULL);
2123 		if (!tp_created)
2124 			tcf_chain_put(chain);
2125 	}
2126 	tcf_block_release(q, block, rtnl_held);
2127 
2128 	if (rtnl_held)
2129 		rtnl_unlock();
2130 
2131 	if (err == -EAGAIN) {
2132 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2133 		 * of target chain.
2134 		 */
2135 		rtnl_held = true;
2136 		/* Replay the request. */
2137 		goto replay;
2138 	}
2139 	return err;
2140 
2141 errout_locked:
2142 	mutex_unlock(&chain->filter_chain_lock);
2143 	goto errout;
2144 }
2145 
2146 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2147 			  struct netlink_ext_ack *extack)
2148 {
2149 	struct net *net = sock_net(skb->sk);
2150 	struct nlattr *tca[TCA_MAX + 1];
2151 	char name[IFNAMSIZ];
2152 	struct tcmsg *t;
2153 	u32 protocol;
2154 	u32 prio;
2155 	u32 parent;
2156 	u32 chain_index;
2157 	struct Qdisc *q = NULL;
2158 	struct tcf_chain_info chain_info;
2159 	struct tcf_chain *chain = NULL;
2160 	struct tcf_block *block = NULL;
2161 	struct tcf_proto *tp = NULL;
2162 	unsigned long cl = 0;
2163 	void *fh = NULL;
2164 	int err;
2165 	bool rtnl_held = false;
2166 
2167 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2168 		return -EPERM;
2169 
2170 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2171 				     rtm_tca_policy, extack);
2172 	if (err < 0)
2173 		return err;
2174 
2175 	t = nlmsg_data(n);
2176 	protocol = TC_H_MIN(t->tcm_info);
2177 	prio = TC_H_MAJ(t->tcm_info);
2178 	parent = t->tcm_parent;
2179 
2180 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2181 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2182 		return -ENOENT;
2183 	}
2184 
2185 	/* Find head of filter chain. */
2186 
2187 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2188 	if (err)
2189 		return err;
2190 
2191 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2192 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2193 		err = -EINVAL;
2194 		goto errout;
2195 	}
2196 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2197 	 * found), qdisc is not unlocked, classifier type is not specified,
2198 	 * classifier is not unlocked.
2199 	 */
2200 	if (!prio ||
2201 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2202 	    !tcf_proto_is_unlocked(name)) {
2203 		rtnl_held = true;
2204 		rtnl_lock();
2205 	}
2206 
2207 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2208 	if (err)
2209 		goto errout;
2210 
2211 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2212 				 extack);
2213 	if (IS_ERR(block)) {
2214 		err = PTR_ERR(block);
2215 		goto errout;
2216 	}
2217 
2218 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2219 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2220 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2221 		err = -EINVAL;
2222 		goto errout;
2223 	}
2224 	chain = tcf_chain_get(block, chain_index, false);
2225 	if (!chain) {
2226 		/* User requested flush on non-existent chain. Nothing to do,
2227 		 * so just return success.
2228 		 */
2229 		if (prio == 0) {
2230 			err = 0;
2231 			goto errout;
2232 		}
2233 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2234 		err = -ENOENT;
2235 		goto errout;
2236 	}
2237 
2238 	if (prio == 0) {
2239 		tfilter_notify_chain(net, skb, block, q, parent, n,
2240 				     chain, RTM_DELTFILTER, rtnl_held);
2241 		tcf_chain_flush(chain, rtnl_held);
2242 		err = 0;
2243 		goto errout;
2244 	}
2245 
2246 	mutex_lock(&chain->filter_chain_lock);
2247 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2248 			       prio, false);
2249 	if (!tp || IS_ERR(tp)) {
2250 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2251 		err = tp ? PTR_ERR(tp) : -ENOENT;
2252 		goto errout_locked;
2253 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2254 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2255 		err = -EINVAL;
2256 		goto errout_locked;
2257 	} else if (t->tcm_handle == 0) {
2258 		tcf_proto_signal_destroying(chain, tp);
2259 		tcf_chain_tp_remove(chain, &chain_info, tp);
2260 		mutex_unlock(&chain->filter_chain_lock);
2261 
2262 		tcf_proto_put(tp, rtnl_held, NULL);
2263 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2264 			       RTM_DELTFILTER, false, rtnl_held);
2265 		err = 0;
2266 		goto errout;
2267 	}
2268 	mutex_unlock(&chain->filter_chain_lock);
2269 
2270 	fh = tp->ops->get(tp, t->tcm_handle);
2271 
2272 	if (!fh) {
2273 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2274 		err = -ENOENT;
2275 	} else {
2276 		bool last;
2277 
2278 		err = tfilter_del_notify(net, skb, n, tp, block,
2279 					 q, parent, fh, false, &last,
2280 					 rtnl_held, extack);
2281 
2282 		if (err)
2283 			goto errout;
2284 		if (last)
2285 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2286 	}
2287 
2288 errout:
2289 	if (chain) {
2290 		if (tp && !IS_ERR(tp))
2291 			tcf_proto_put(tp, rtnl_held, NULL);
2292 		tcf_chain_put(chain);
2293 	}
2294 	tcf_block_release(q, block, rtnl_held);
2295 
2296 	if (rtnl_held)
2297 		rtnl_unlock();
2298 
2299 	return err;
2300 
2301 errout_locked:
2302 	mutex_unlock(&chain->filter_chain_lock);
2303 	goto errout;
2304 }
2305 
2306 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2307 			  struct netlink_ext_ack *extack)
2308 {
2309 	struct net *net = sock_net(skb->sk);
2310 	struct nlattr *tca[TCA_MAX + 1];
2311 	char name[IFNAMSIZ];
2312 	struct tcmsg *t;
2313 	u32 protocol;
2314 	u32 prio;
2315 	u32 parent;
2316 	u32 chain_index;
2317 	struct Qdisc *q = NULL;
2318 	struct tcf_chain_info chain_info;
2319 	struct tcf_chain *chain = NULL;
2320 	struct tcf_block *block = NULL;
2321 	struct tcf_proto *tp = NULL;
2322 	unsigned long cl = 0;
2323 	void *fh = NULL;
2324 	int err;
2325 	bool rtnl_held = false;
2326 
2327 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2328 				     rtm_tca_policy, extack);
2329 	if (err < 0)
2330 		return err;
2331 
2332 	t = nlmsg_data(n);
2333 	protocol = TC_H_MIN(t->tcm_info);
2334 	prio = TC_H_MAJ(t->tcm_info);
2335 	parent = t->tcm_parent;
2336 
2337 	if (prio == 0) {
2338 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2339 		return -ENOENT;
2340 	}
2341 
2342 	/* Find head of filter chain. */
2343 
2344 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2345 	if (err)
2346 		return err;
2347 
2348 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2349 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2350 		err = -EINVAL;
2351 		goto errout;
2352 	}
2353 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2354 	 * unlocked, classifier type is not specified, classifier is not
2355 	 * unlocked.
2356 	 */
2357 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2358 	    !tcf_proto_is_unlocked(name)) {
2359 		rtnl_held = true;
2360 		rtnl_lock();
2361 	}
2362 
2363 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2364 	if (err)
2365 		goto errout;
2366 
2367 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2368 				 extack);
2369 	if (IS_ERR(block)) {
2370 		err = PTR_ERR(block);
2371 		goto errout;
2372 	}
2373 
2374 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2375 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2376 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2377 		err = -EINVAL;
2378 		goto errout;
2379 	}
2380 	chain = tcf_chain_get(block, chain_index, false);
2381 	if (!chain) {
2382 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2383 		err = -EINVAL;
2384 		goto errout;
2385 	}
2386 
2387 	mutex_lock(&chain->filter_chain_lock);
2388 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2389 			       prio, false);
2390 	mutex_unlock(&chain->filter_chain_lock);
2391 	if (!tp || IS_ERR(tp)) {
2392 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2393 		err = tp ? PTR_ERR(tp) : -ENOENT;
2394 		goto errout;
2395 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2396 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2397 		err = -EINVAL;
2398 		goto errout;
2399 	}
2400 
2401 	fh = tp->ops->get(tp, t->tcm_handle);
2402 
2403 	if (!fh) {
2404 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2405 		err = -ENOENT;
2406 	} else {
2407 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2408 				     fh, RTM_NEWTFILTER, true, rtnl_held);
2409 		if (err < 0)
2410 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2411 	}
2412 
2413 	tfilter_put(tp, fh);
2414 errout:
2415 	if (chain) {
2416 		if (tp && !IS_ERR(tp))
2417 			tcf_proto_put(tp, rtnl_held, NULL);
2418 		tcf_chain_put(chain);
2419 	}
2420 	tcf_block_release(q, block, rtnl_held);
2421 
2422 	if (rtnl_held)
2423 		rtnl_unlock();
2424 
2425 	return err;
2426 }
2427 
2428 struct tcf_dump_args {
2429 	struct tcf_walker w;
2430 	struct sk_buff *skb;
2431 	struct netlink_callback *cb;
2432 	struct tcf_block *block;
2433 	struct Qdisc *q;
2434 	u32 parent;
2435 };
2436 
2437 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2438 {
2439 	struct tcf_dump_args *a = (void *)arg;
2440 	struct net *net = sock_net(a->skb->sk);
2441 
2442 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2443 			     n, NETLINK_CB(a->cb->skb).portid,
2444 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2445 			     RTM_NEWTFILTER, true);
2446 }
2447 
2448 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2449 			   struct sk_buff *skb, struct netlink_callback *cb,
2450 			   long index_start, long *p_index)
2451 {
2452 	struct net *net = sock_net(skb->sk);
2453 	struct tcf_block *block = chain->block;
2454 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2455 	struct tcf_proto *tp, *tp_prev;
2456 	struct tcf_dump_args arg;
2457 
2458 	for (tp = __tcf_get_next_proto(chain, NULL);
2459 	     tp;
2460 	     tp_prev = tp,
2461 		     tp = __tcf_get_next_proto(chain, tp),
2462 		     tcf_proto_put(tp_prev, true, NULL),
2463 		     (*p_index)++) {
2464 		if (*p_index < index_start)
2465 			continue;
2466 		if (TC_H_MAJ(tcm->tcm_info) &&
2467 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2468 			continue;
2469 		if (TC_H_MIN(tcm->tcm_info) &&
2470 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2471 			continue;
2472 		if (*p_index > index_start)
2473 			memset(&cb->args[1], 0,
2474 			       sizeof(cb->args) - sizeof(cb->args[0]));
2475 		if (cb->args[1] == 0) {
2476 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2477 					  NETLINK_CB(cb->skb).portid,
2478 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2479 					  RTM_NEWTFILTER, true) <= 0)
2480 				goto errout;
2481 			cb->args[1] = 1;
2482 		}
2483 		if (!tp->ops->walk)
2484 			continue;
2485 		arg.w.fn = tcf_node_dump;
2486 		arg.skb = skb;
2487 		arg.cb = cb;
2488 		arg.block = block;
2489 		arg.q = q;
2490 		arg.parent = parent;
2491 		arg.w.stop = 0;
2492 		arg.w.skip = cb->args[1] - 1;
2493 		arg.w.count = 0;
2494 		arg.w.cookie = cb->args[2];
2495 		tp->ops->walk(tp, &arg.w, true);
2496 		cb->args[2] = arg.w.cookie;
2497 		cb->args[1] = arg.w.count + 1;
2498 		if (arg.w.stop)
2499 			goto errout;
2500 	}
2501 	return true;
2502 
2503 errout:
2504 	tcf_proto_put(tp, true, NULL);
2505 	return false;
2506 }
2507 
2508 /* called with RTNL */
2509 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2510 {
2511 	struct tcf_chain *chain, *chain_prev;
2512 	struct net *net = sock_net(skb->sk);
2513 	struct nlattr *tca[TCA_MAX + 1];
2514 	struct Qdisc *q = NULL;
2515 	struct tcf_block *block;
2516 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2517 	long index_start;
2518 	long index;
2519 	u32 parent;
2520 	int err;
2521 
2522 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2523 		return skb->len;
2524 
2525 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2526 				     NULL, cb->extack);
2527 	if (err)
2528 		return err;
2529 
2530 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2531 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2532 		if (!block)
2533 			goto out;
2534 		/* If we work with block index, q is NULL and parent value
2535 		 * will never be used in the following code. The check
2536 		 * in tcf_fill_node prevents it. However, compiler does not
2537 		 * see that far, so set parent to zero to silence the warning
2538 		 * about parent being uninitialized.
2539 		 */
2540 		parent = 0;
2541 	} else {
2542 		const struct Qdisc_class_ops *cops;
2543 		struct net_device *dev;
2544 		unsigned long cl = 0;
2545 
2546 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2547 		if (!dev)
2548 			return skb->len;
2549 
2550 		parent = tcm->tcm_parent;
2551 		if (!parent) {
2552 			q = dev->qdisc;
2553 			parent = q->handle;
2554 		} else {
2555 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2556 		}
2557 		if (!q)
2558 			goto out;
2559 		cops = q->ops->cl_ops;
2560 		if (!cops)
2561 			goto out;
2562 		if (!cops->tcf_block)
2563 			goto out;
2564 		if (TC_H_MIN(tcm->tcm_parent)) {
2565 			cl = cops->find(q, tcm->tcm_parent);
2566 			if (cl == 0)
2567 				goto out;
2568 		}
2569 		block = cops->tcf_block(q, cl, NULL);
2570 		if (!block)
2571 			goto out;
2572 		if (tcf_block_shared(block))
2573 			q = NULL;
2574 	}
2575 
2576 	index_start = cb->args[0];
2577 	index = 0;
2578 
2579 	for (chain = __tcf_get_next_chain(block, NULL);
2580 	     chain;
2581 	     chain_prev = chain,
2582 		     chain = __tcf_get_next_chain(block, chain),
2583 		     tcf_chain_put(chain_prev)) {
2584 		if (tca[TCA_CHAIN] &&
2585 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2586 			continue;
2587 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2588 				    index_start, &index)) {
2589 			tcf_chain_put(chain);
2590 			err = -EMSGSIZE;
2591 			break;
2592 		}
2593 	}
2594 
2595 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2596 		tcf_block_refcnt_put(block, true);
2597 	cb->args[0] = index;
2598 
2599 out:
2600 	/* If we did no progress, the error (EMSGSIZE) is real */
2601 	if (skb->len == 0 && err)
2602 		return err;
2603 	return skb->len;
2604 }
2605 
2606 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2607 			      void *tmplt_priv, u32 chain_index,
2608 			      struct net *net, struct sk_buff *skb,
2609 			      struct tcf_block *block,
2610 			      u32 portid, u32 seq, u16 flags, int event)
2611 {
2612 	unsigned char *b = skb_tail_pointer(skb);
2613 	const struct tcf_proto_ops *ops;
2614 	struct nlmsghdr *nlh;
2615 	struct tcmsg *tcm;
2616 	void *priv;
2617 
2618 	ops = tmplt_ops;
2619 	priv = tmplt_priv;
2620 
2621 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2622 	if (!nlh)
2623 		goto out_nlmsg_trim;
2624 	tcm = nlmsg_data(nlh);
2625 	tcm->tcm_family = AF_UNSPEC;
2626 	tcm->tcm__pad1 = 0;
2627 	tcm->tcm__pad2 = 0;
2628 	tcm->tcm_handle = 0;
2629 	if (block->q) {
2630 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2631 		tcm->tcm_parent = block->q->handle;
2632 	} else {
2633 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2634 		tcm->tcm_block_index = block->index;
2635 	}
2636 
2637 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2638 		goto nla_put_failure;
2639 
2640 	if (ops) {
2641 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2642 			goto nla_put_failure;
2643 		if (ops->tmplt_dump(skb, net, priv) < 0)
2644 			goto nla_put_failure;
2645 	}
2646 
2647 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2648 	return skb->len;
2649 
2650 out_nlmsg_trim:
2651 nla_put_failure:
2652 	nlmsg_trim(skb, b);
2653 	return -EMSGSIZE;
2654 }
2655 
2656 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2657 			   u32 seq, u16 flags, int event, bool unicast)
2658 {
2659 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2660 	struct tcf_block *block = chain->block;
2661 	struct net *net = block->net;
2662 	struct sk_buff *skb;
2663 	int err = 0;
2664 
2665 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2666 	if (!skb)
2667 		return -ENOBUFS;
2668 
2669 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2670 			       chain->index, net, skb, block, portid,
2671 			       seq, flags, event) <= 0) {
2672 		kfree_skb(skb);
2673 		return -EINVAL;
2674 	}
2675 
2676 	if (unicast)
2677 		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2678 	else
2679 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2680 				     flags & NLM_F_ECHO);
2681 
2682 	if (err > 0)
2683 		err = 0;
2684 	return err;
2685 }
2686 
2687 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2688 				  void *tmplt_priv, u32 chain_index,
2689 				  struct tcf_block *block, struct sk_buff *oskb,
2690 				  u32 seq, u16 flags, bool unicast)
2691 {
2692 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2693 	struct net *net = block->net;
2694 	struct sk_buff *skb;
2695 
2696 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2697 	if (!skb)
2698 		return -ENOBUFS;
2699 
2700 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2701 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2702 		kfree_skb(skb);
2703 		return -EINVAL;
2704 	}
2705 
2706 	if (unicast)
2707 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2708 
2709 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2710 }
2711 
2712 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2713 			      struct nlattr **tca,
2714 			      struct netlink_ext_ack *extack)
2715 {
2716 	const struct tcf_proto_ops *ops;
2717 	char name[IFNAMSIZ];
2718 	void *tmplt_priv;
2719 
2720 	/* If kind is not set, user did not specify template. */
2721 	if (!tca[TCA_KIND])
2722 		return 0;
2723 
2724 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2725 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2726 		return -EINVAL;
2727 	}
2728 
2729 	ops = tcf_proto_lookup_ops(name, true, extack);
2730 	if (IS_ERR(ops))
2731 		return PTR_ERR(ops);
2732 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2733 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2734 		return -EOPNOTSUPP;
2735 	}
2736 
2737 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2738 	if (IS_ERR(tmplt_priv)) {
2739 		module_put(ops->owner);
2740 		return PTR_ERR(tmplt_priv);
2741 	}
2742 	chain->tmplt_ops = ops;
2743 	chain->tmplt_priv = tmplt_priv;
2744 	return 0;
2745 }
2746 
2747 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2748 			       void *tmplt_priv)
2749 {
2750 	/* If template ops are set, no work to do for us. */
2751 	if (!tmplt_ops)
2752 		return;
2753 
2754 	tmplt_ops->tmplt_destroy(tmplt_priv);
2755 	module_put(tmplt_ops->owner);
2756 }
2757 
2758 /* Add/delete/get a chain */
2759 
2760 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2761 			struct netlink_ext_ack *extack)
2762 {
2763 	struct net *net = sock_net(skb->sk);
2764 	struct nlattr *tca[TCA_MAX + 1];
2765 	struct tcmsg *t;
2766 	u32 parent;
2767 	u32 chain_index;
2768 	struct Qdisc *q = NULL;
2769 	struct tcf_chain *chain = NULL;
2770 	struct tcf_block *block;
2771 	unsigned long cl;
2772 	int err;
2773 
2774 	if (n->nlmsg_type != RTM_GETCHAIN &&
2775 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2776 		return -EPERM;
2777 
2778 replay:
2779 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2780 				     rtm_tca_policy, extack);
2781 	if (err < 0)
2782 		return err;
2783 
2784 	t = nlmsg_data(n);
2785 	parent = t->tcm_parent;
2786 	cl = 0;
2787 
2788 	block = tcf_block_find(net, &q, &parent, &cl,
2789 			       t->tcm_ifindex, t->tcm_block_index, extack);
2790 	if (IS_ERR(block))
2791 		return PTR_ERR(block);
2792 
2793 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2794 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2795 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2796 		err = -EINVAL;
2797 		goto errout_block;
2798 	}
2799 
2800 	mutex_lock(&block->lock);
2801 	chain = tcf_chain_lookup(block, chain_index);
2802 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2803 		if (chain) {
2804 			if (tcf_chain_held_by_acts_only(chain)) {
2805 				/* The chain exists only because there is
2806 				 * some action referencing it.
2807 				 */
2808 				tcf_chain_hold(chain);
2809 			} else {
2810 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2811 				err = -EEXIST;
2812 				goto errout_block_locked;
2813 			}
2814 		} else {
2815 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2816 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2817 				err = -ENOENT;
2818 				goto errout_block_locked;
2819 			}
2820 			chain = tcf_chain_create(block, chain_index);
2821 			if (!chain) {
2822 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2823 				err = -ENOMEM;
2824 				goto errout_block_locked;
2825 			}
2826 		}
2827 	} else {
2828 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2829 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2830 			err = -EINVAL;
2831 			goto errout_block_locked;
2832 		}
2833 		tcf_chain_hold(chain);
2834 	}
2835 
2836 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2837 		/* Modifying chain requires holding parent block lock. In case
2838 		 * the chain was successfully added, take a reference to the
2839 		 * chain. This ensures that an empty chain does not disappear at
2840 		 * the end of this function.
2841 		 */
2842 		tcf_chain_hold(chain);
2843 		chain->explicitly_created = true;
2844 	}
2845 	mutex_unlock(&block->lock);
2846 
2847 	switch (n->nlmsg_type) {
2848 	case RTM_NEWCHAIN:
2849 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2850 		if (err) {
2851 			tcf_chain_put_explicitly_created(chain);
2852 			goto errout;
2853 		}
2854 
2855 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2856 				RTM_NEWCHAIN, false);
2857 		break;
2858 	case RTM_DELCHAIN:
2859 		tfilter_notify_chain(net, skb, block, q, parent, n,
2860 				     chain, RTM_DELTFILTER, true);
2861 		/* Flush the chain first as the user requested chain removal. */
2862 		tcf_chain_flush(chain, true);
2863 		/* In case the chain was successfully deleted, put a reference
2864 		 * to the chain previously taken during addition.
2865 		 */
2866 		tcf_chain_put_explicitly_created(chain);
2867 		break;
2868 	case RTM_GETCHAIN:
2869 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2870 				      n->nlmsg_seq, n->nlmsg_type, true);
2871 		if (err < 0)
2872 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2873 		break;
2874 	default:
2875 		err = -EOPNOTSUPP;
2876 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2877 		goto errout;
2878 	}
2879 
2880 errout:
2881 	tcf_chain_put(chain);
2882 errout_block:
2883 	tcf_block_release(q, block, true);
2884 	if (err == -EAGAIN)
2885 		/* Replay the request. */
2886 		goto replay;
2887 	return err;
2888 
2889 errout_block_locked:
2890 	mutex_unlock(&block->lock);
2891 	goto errout_block;
2892 }
2893 
2894 /* called with RTNL */
2895 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2896 {
2897 	struct net *net = sock_net(skb->sk);
2898 	struct nlattr *tca[TCA_MAX + 1];
2899 	struct Qdisc *q = NULL;
2900 	struct tcf_block *block;
2901 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2902 	struct tcf_chain *chain;
2903 	long index_start;
2904 	long index;
2905 	u32 parent;
2906 	int err;
2907 
2908 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2909 		return skb->len;
2910 
2911 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2912 				     rtm_tca_policy, cb->extack);
2913 	if (err)
2914 		return err;
2915 
2916 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2917 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2918 		if (!block)
2919 			goto out;
2920 		/* If we work with block index, q is NULL and parent value
2921 		 * will never be used in the following code. The check
2922 		 * in tcf_fill_node prevents it. However, compiler does not
2923 		 * see that far, so set parent to zero to silence the warning
2924 		 * about parent being uninitialized.
2925 		 */
2926 		parent = 0;
2927 	} else {
2928 		const struct Qdisc_class_ops *cops;
2929 		struct net_device *dev;
2930 		unsigned long cl = 0;
2931 
2932 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2933 		if (!dev)
2934 			return skb->len;
2935 
2936 		parent = tcm->tcm_parent;
2937 		if (!parent) {
2938 			q = dev->qdisc;
2939 			parent = q->handle;
2940 		} else {
2941 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2942 		}
2943 		if (!q)
2944 			goto out;
2945 		cops = q->ops->cl_ops;
2946 		if (!cops)
2947 			goto out;
2948 		if (!cops->tcf_block)
2949 			goto out;
2950 		if (TC_H_MIN(tcm->tcm_parent)) {
2951 			cl = cops->find(q, tcm->tcm_parent);
2952 			if (cl == 0)
2953 				goto out;
2954 		}
2955 		block = cops->tcf_block(q, cl, NULL);
2956 		if (!block)
2957 			goto out;
2958 		if (tcf_block_shared(block))
2959 			q = NULL;
2960 	}
2961 
2962 	index_start = cb->args[0];
2963 	index = 0;
2964 
2965 	mutex_lock(&block->lock);
2966 	list_for_each_entry(chain, &block->chain_list, list) {
2967 		if ((tca[TCA_CHAIN] &&
2968 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2969 			continue;
2970 		if (index < index_start) {
2971 			index++;
2972 			continue;
2973 		}
2974 		if (tcf_chain_held_by_acts_only(chain))
2975 			continue;
2976 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2977 					 chain->index, net, skb, block,
2978 					 NETLINK_CB(cb->skb).portid,
2979 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2980 					 RTM_NEWCHAIN);
2981 		if (err <= 0)
2982 			break;
2983 		index++;
2984 	}
2985 	mutex_unlock(&block->lock);
2986 
2987 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2988 		tcf_block_refcnt_put(block, true);
2989 	cb->args[0] = index;
2990 
2991 out:
2992 	/* If we did no progress, the error (EMSGSIZE) is real */
2993 	if (skb->len == 0 && err)
2994 		return err;
2995 	return skb->len;
2996 }
2997 
2998 void tcf_exts_destroy(struct tcf_exts *exts)
2999 {
3000 #ifdef CONFIG_NET_CLS_ACT
3001 	if (exts->actions) {
3002 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3003 		kfree(exts->actions);
3004 	}
3005 	exts->nr_actions = 0;
3006 #endif
3007 }
3008 EXPORT_SYMBOL(tcf_exts_destroy);
3009 
3010 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3011 		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3012 		      bool rtnl_held, struct netlink_ext_ack *extack)
3013 {
3014 #ifdef CONFIG_NET_CLS_ACT
3015 	{
3016 		struct tc_action *act;
3017 		size_t attr_size = 0;
3018 
3019 		if (exts->police && tb[exts->police]) {
3020 			act = tcf_action_init_1(net, tp, tb[exts->police],
3021 						rate_tlv, "police", ovr,
3022 						TCA_ACT_BIND, rtnl_held,
3023 						extack);
3024 			if (IS_ERR(act))
3025 				return PTR_ERR(act);
3026 
3027 			act->type = exts->type = TCA_OLD_COMPAT;
3028 			exts->actions[0] = act;
3029 			exts->nr_actions = 1;
3030 		} else if (exts->action && tb[exts->action]) {
3031 			int err;
3032 
3033 			err = tcf_action_init(net, tp, tb[exts->action],
3034 					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
3035 					      exts->actions, &attr_size,
3036 					      rtnl_held, extack);
3037 			if (err < 0)
3038 				return err;
3039 			exts->nr_actions = err;
3040 		}
3041 	}
3042 #else
3043 	if ((exts->action && tb[exts->action]) ||
3044 	    (exts->police && tb[exts->police])) {
3045 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3046 		return -EOPNOTSUPP;
3047 	}
3048 #endif
3049 
3050 	return 0;
3051 }
3052 EXPORT_SYMBOL(tcf_exts_validate);
3053 
3054 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3055 {
3056 #ifdef CONFIG_NET_CLS_ACT
3057 	struct tcf_exts old = *dst;
3058 
3059 	*dst = *src;
3060 	tcf_exts_destroy(&old);
3061 #endif
3062 }
3063 EXPORT_SYMBOL(tcf_exts_change);
3064 
3065 #ifdef CONFIG_NET_CLS_ACT
3066 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3067 {
3068 	if (exts->nr_actions == 0)
3069 		return NULL;
3070 	else
3071 		return exts->actions[0];
3072 }
3073 #endif
3074 
3075 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3076 {
3077 #ifdef CONFIG_NET_CLS_ACT
3078 	struct nlattr *nest;
3079 
3080 	if (exts->action && tcf_exts_has_actions(exts)) {
3081 		/*
3082 		 * again for backward compatible mode - we want
3083 		 * to work with both old and new modes of entering
3084 		 * tc data even if iproute2  was newer - jhs
3085 		 */
3086 		if (exts->type != TCA_OLD_COMPAT) {
3087 			nest = nla_nest_start_noflag(skb, exts->action);
3088 			if (nest == NULL)
3089 				goto nla_put_failure;
3090 
3091 			if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3092 				goto nla_put_failure;
3093 			nla_nest_end(skb, nest);
3094 		} else if (exts->police) {
3095 			struct tc_action *act = tcf_exts_first_act(exts);
3096 			nest = nla_nest_start_noflag(skb, exts->police);
3097 			if (nest == NULL || !act)
3098 				goto nla_put_failure;
3099 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3100 				goto nla_put_failure;
3101 			nla_nest_end(skb, nest);
3102 		}
3103 	}
3104 	return 0;
3105 
3106 nla_put_failure:
3107 	nla_nest_cancel(skb, nest);
3108 	return -1;
3109 #else
3110 	return 0;
3111 #endif
3112 }
3113 EXPORT_SYMBOL(tcf_exts_dump);
3114 
3115 
3116 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3117 {
3118 #ifdef CONFIG_NET_CLS_ACT
3119 	struct tc_action *a = tcf_exts_first_act(exts);
3120 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3121 		return -1;
3122 #endif
3123 	return 0;
3124 }
3125 EXPORT_SYMBOL(tcf_exts_dump_stats);
3126 
3127 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3128 {
3129 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3130 		return;
3131 	*flags |= TCA_CLS_FLAGS_IN_HW;
3132 	atomic_inc(&block->offloadcnt);
3133 }
3134 
3135 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3136 {
3137 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3138 		return;
3139 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3140 	atomic_dec(&block->offloadcnt);
3141 }
3142 
3143 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3144 				      struct tcf_proto *tp, u32 *cnt,
3145 				      u32 *flags, u32 diff, bool add)
3146 {
3147 	lockdep_assert_held(&block->cb_lock);
3148 
3149 	spin_lock(&tp->lock);
3150 	if (add) {
3151 		if (!*cnt)
3152 			tcf_block_offload_inc(block, flags);
3153 		*cnt += diff;
3154 	} else {
3155 		*cnt -= diff;
3156 		if (!*cnt)
3157 			tcf_block_offload_dec(block, flags);
3158 	}
3159 	spin_unlock(&tp->lock);
3160 }
3161 
3162 static void
3163 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3164 			 u32 *cnt, u32 *flags)
3165 {
3166 	lockdep_assert_held(&block->cb_lock);
3167 
3168 	spin_lock(&tp->lock);
3169 	tcf_block_offload_dec(block, flags);
3170 	*cnt = 0;
3171 	spin_unlock(&tp->lock);
3172 }
3173 
3174 static int
3175 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3176 		   void *type_data, bool err_stop)
3177 {
3178 	struct flow_block_cb *block_cb;
3179 	int ok_count = 0;
3180 	int err;
3181 
3182 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3183 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3184 		if (err) {
3185 			if (err_stop)
3186 				return err;
3187 		} else {
3188 			ok_count++;
3189 		}
3190 	}
3191 	return ok_count;
3192 }
3193 
3194 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3195 		     void *type_data, bool err_stop, bool rtnl_held)
3196 {
3197 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3198 	int ok_count;
3199 
3200 retry:
3201 	if (take_rtnl)
3202 		rtnl_lock();
3203 	down_read(&block->cb_lock);
3204 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3205 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3206 	 * obtain the locks in same order here.
3207 	 */
3208 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3209 		up_read(&block->cb_lock);
3210 		take_rtnl = true;
3211 		goto retry;
3212 	}
3213 
3214 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3215 
3216 	up_read(&block->cb_lock);
3217 	if (take_rtnl)
3218 		rtnl_unlock();
3219 	return ok_count;
3220 }
3221 EXPORT_SYMBOL(tc_setup_cb_call);
3222 
3223 /* Non-destructive filter add. If filter that wasn't already in hardware is
3224  * successfully offloaded, increment block offloads counter. On failure,
3225  * previously offloaded filter is considered to be intact and offloads counter
3226  * is not decremented.
3227  */
3228 
3229 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3230 		    enum tc_setup_type type, void *type_data, bool err_stop,
3231 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3232 {
3233 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3234 	int ok_count;
3235 
3236 retry:
3237 	if (take_rtnl)
3238 		rtnl_lock();
3239 	down_read(&block->cb_lock);
3240 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3241 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3242 	 * obtain the locks in same order here.
3243 	 */
3244 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3245 		up_read(&block->cb_lock);
3246 		take_rtnl = true;
3247 		goto retry;
3248 	}
3249 
3250 	/* Make sure all netdevs sharing this block are offload-capable. */
3251 	if (block->nooffloaddevcnt && err_stop) {
3252 		ok_count = -EOPNOTSUPP;
3253 		goto err_unlock;
3254 	}
3255 
3256 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3257 	if (ok_count < 0)
3258 		goto err_unlock;
3259 
3260 	if (tp->ops->hw_add)
3261 		tp->ops->hw_add(tp, type_data);
3262 	if (ok_count > 0)
3263 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3264 					  ok_count, true);
3265 err_unlock:
3266 	up_read(&block->cb_lock);
3267 	if (take_rtnl)
3268 		rtnl_unlock();
3269 	return ok_count < 0 ? ok_count : 0;
3270 }
3271 EXPORT_SYMBOL(tc_setup_cb_add);
3272 
3273 /* Destructive filter replace. If filter that wasn't already in hardware is
3274  * successfully offloaded, increment block offload counter. On failure,
3275  * previously offloaded filter is considered to be destroyed and offload counter
3276  * is decremented.
3277  */
3278 
3279 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3280 			enum tc_setup_type type, void *type_data, bool err_stop,
3281 			u32 *old_flags, unsigned int *old_in_hw_count,
3282 			u32 *new_flags, unsigned int *new_in_hw_count,
3283 			bool rtnl_held)
3284 {
3285 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3286 	int ok_count;
3287 
3288 retry:
3289 	if (take_rtnl)
3290 		rtnl_lock();
3291 	down_read(&block->cb_lock);
3292 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3293 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3294 	 * obtain the locks in same order here.
3295 	 */
3296 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3297 		up_read(&block->cb_lock);
3298 		take_rtnl = true;
3299 		goto retry;
3300 	}
3301 
3302 	/* Make sure all netdevs sharing this block are offload-capable. */
3303 	if (block->nooffloaddevcnt && err_stop) {
3304 		ok_count = -EOPNOTSUPP;
3305 		goto err_unlock;
3306 	}
3307 
3308 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3309 	if (tp->ops->hw_del)
3310 		tp->ops->hw_del(tp, type_data);
3311 
3312 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3313 	if (ok_count < 0)
3314 		goto err_unlock;
3315 
3316 	if (tp->ops->hw_add)
3317 		tp->ops->hw_add(tp, type_data);
3318 	if (ok_count > 0)
3319 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3320 					  new_flags, ok_count, true);
3321 err_unlock:
3322 	up_read(&block->cb_lock);
3323 	if (take_rtnl)
3324 		rtnl_unlock();
3325 	return ok_count < 0 ? ok_count : 0;
3326 }
3327 EXPORT_SYMBOL(tc_setup_cb_replace);
3328 
3329 /* Destroy filter and decrement block offload counter, if filter was previously
3330  * offloaded.
3331  */
3332 
3333 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3334 			enum tc_setup_type type, void *type_data, bool err_stop,
3335 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3336 {
3337 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3338 	int ok_count;
3339 
3340 retry:
3341 	if (take_rtnl)
3342 		rtnl_lock();
3343 	down_read(&block->cb_lock);
3344 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3345 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3346 	 * obtain the locks in same order here.
3347 	 */
3348 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3349 		up_read(&block->cb_lock);
3350 		take_rtnl = true;
3351 		goto retry;
3352 	}
3353 
3354 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3355 
3356 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3357 	if (tp->ops->hw_del)
3358 		tp->ops->hw_del(tp, type_data);
3359 
3360 	up_read(&block->cb_lock);
3361 	if (take_rtnl)
3362 		rtnl_unlock();
3363 	return ok_count < 0 ? ok_count : 0;
3364 }
3365 EXPORT_SYMBOL(tc_setup_cb_destroy);
3366 
3367 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3368 			  bool add, flow_setup_cb_t *cb,
3369 			  enum tc_setup_type type, void *type_data,
3370 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3371 {
3372 	int err = cb(type, type_data, cb_priv);
3373 
3374 	if (err) {
3375 		if (add && tc_skip_sw(*flags))
3376 			return err;
3377 	} else {
3378 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3379 					  add);
3380 	}
3381 
3382 	return 0;
3383 }
3384 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3385 
3386 void tc_cleanup_flow_action(struct flow_action *flow_action)
3387 {
3388 	struct flow_action_entry *entry;
3389 	int i;
3390 
3391 	flow_action_for_each(i, entry, flow_action)
3392 		if (entry->destructor)
3393 			entry->destructor(entry->destructor_priv);
3394 }
3395 EXPORT_SYMBOL(tc_cleanup_flow_action);
3396 
3397 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3398 			       const struct tc_action *act)
3399 {
3400 #ifdef CONFIG_NET_CLS_ACT
3401 	entry->dev = act->ops->get_dev(act, &entry->destructor);
3402 	if (!entry->dev)
3403 		return;
3404 	entry->destructor_priv = entry->dev;
3405 #endif
3406 }
3407 
3408 static void tcf_tunnel_encap_put_tunnel(void *priv)
3409 {
3410 	struct ip_tunnel_info *tunnel = priv;
3411 
3412 	kfree(tunnel);
3413 }
3414 
3415 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3416 				       const struct tc_action *act)
3417 {
3418 	entry->tunnel = tcf_tunnel_info_copy(act);
3419 	if (!entry->tunnel)
3420 		return -ENOMEM;
3421 	entry->destructor = tcf_tunnel_encap_put_tunnel;
3422 	entry->destructor_priv = entry->tunnel;
3423 	return 0;
3424 }
3425 
3426 static void tcf_sample_get_group(struct flow_action_entry *entry,
3427 				 const struct tc_action *act)
3428 {
3429 #ifdef CONFIG_NET_CLS_ACT
3430 	entry->sample.psample_group =
3431 		act->ops->get_psample_group(act, &entry->destructor);
3432 	entry->destructor_priv = entry->sample.psample_group;
3433 #endif
3434 }
3435 
3436 int tc_setup_flow_action(struct flow_action *flow_action,
3437 			 const struct tcf_exts *exts, bool rtnl_held)
3438 {
3439 	const struct tc_action *act;
3440 	int i, j, k, err = 0;
3441 
3442 	if (!exts)
3443 		return 0;
3444 
3445 	if (!rtnl_held)
3446 		rtnl_lock();
3447 
3448 	j = 0;
3449 	tcf_exts_for_each_action(i, act, exts) {
3450 		struct flow_action_entry *entry;
3451 
3452 		entry = &flow_action->entries[j];
3453 		if (is_tcf_gact_ok(act)) {
3454 			entry->id = FLOW_ACTION_ACCEPT;
3455 		} else if (is_tcf_gact_shot(act)) {
3456 			entry->id = FLOW_ACTION_DROP;
3457 		} else if (is_tcf_gact_trap(act)) {
3458 			entry->id = FLOW_ACTION_TRAP;
3459 		} else if (is_tcf_gact_goto_chain(act)) {
3460 			entry->id = FLOW_ACTION_GOTO;
3461 			entry->chain_index = tcf_gact_goto_chain_index(act);
3462 		} else if (is_tcf_mirred_egress_redirect(act)) {
3463 			entry->id = FLOW_ACTION_REDIRECT;
3464 			tcf_mirred_get_dev(entry, act);
3465 		} else if (is_tcf_mirred_egress_mirror(act)) {
3466 			entry->id = FLOW_ACTION_MIRRED;
3467 			tcf_mirred_get_dev(entry, act);
3468 		} else if (is_tcf_mirred_ingress_redirect(act)) {
3469 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3470 			tcf_mirred_get_dev(entry, act);
3471 		} else if (is_tcf_mirred_ingress_mirror(act)) {
3472 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
3473 			tcf_mirred_get_dev(entry, act);
3474 		} else if (is_tcf_vlan(act)) {
3475 			switch (tcf_vlan_action(act)) {
3476 			case TCA_VLAN_ACT_PUSH:
3477 				entry->id = FLOW_ACTION_VLAN_PUSH;
3478 				entry->vlan.vid = tcf_vlan_push_vid(act);
3479 				entry->vlan.proto = tcf_vlan_push_proto(act);
3480 				entry->vlan.prio = tcf_vlan_push_prio(act);
3481 				break;
3482 			case TCA_VLAN_ACT_POP:
3483 				entry->id = FLOW_ACTION_VLAN_POP;
3484 				break;
3485 			case TCA_VLAN_ACT_MODIFY:
3486 				entry->id = FLOW_ACTION_VLAN_MANGLE;
3487 				entry->vlan.vid = tcf_vlan_push_vid(act);
3488 				entry->vlan.proto = tcf_vlan_push_proto(act);
3489 				entry->vlan.prio = tcf_vlan_push_prio(act);
3490 				break;
3491 			default:
3492 				err = -EOPNOTSUPP;
3493 				goto err_out;
3494 			}
3495 		} else if (is_tcf_tunnel_set(act)) {
3496 			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3497 			err = tcf_tunnel_encap_get_tunnel(entry, act);
3498 			if (err)
3499 				goto err_out;
3500 		} else if (is_tcf_tunnel_release(act)) {
3501 			entry->id = FLOW_ACTION_TUNNEL_DECAP;
3502 		} else if (is_tcf_pedit(act)) {
3503 			for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3504 				switch (tcf_pedit_cmd(act, k)) {
3505 				case TCA_PEDIT_KEY_EX_CMD_SET:
3506 					entry->id = FLOW_ACTION_MANGLE;
3507 					break;
3508 				case TCA_PEDIT_KEY_EX_CMD_ADD:
3509 					entry->id = FLOW_ACTION_ADD;
3510 					break;
3511 				default:
3512 					err = -EOPNOTSUPP;
3513 					goto err_out;
3514 				}
3515 				entry->mangle.htype = tcf_pedit_htype(act, k);
3516 				entry->mangle.mask = tcf_pedit_mask(act, k);
3517 				entry->mangle.val = tcf_pedit_val(act, k);
3518 				entry->mangle.offset = tcf_pedit_offset(act, k);
3519 				entry = &flow_action->entries[++j];
3520 			}
3521 		} else if (is_tcf_csum(act)) {
3522 			entry->id = FLOW_ACTION_CSUM;
3523 			entry->csum_flags = tcf_csum_update_flags(act);
3524 		} else if (is_tcf_skbedit_mark(act)) {
3525 			entry->id = FLOW_ACTION_MARK;
3526 			entry->mark = tcf_skbedit_mark(act);
3527 		} else if (is_tcf_sample(act)) {
3528 			entry->id = FLOW_ACTION_SAMPLE;
3529 			entry->sample.trunc_size = tcf_sample_trunc_size(act);
3530 			entry->sample.truncate = tcf_sample_truncate(act);
3531 			entry->sample.rate = tcf_sample_rate(act);
3532 			tcf_sample_get_group(entry, act);
3533 		} else if (is_tcf_police(act)) {
3534 			entry->id = FLOW_ACTION_POLICE;
3535 			entry->police.burst = tcf_police_tcfp_burst(act);
3536 			entry->police.rate_bytes_ps =
3537 				tcf_police_rate_bytes_ps(act);
3538 		} else if (is_tcf_ct(act)) {
3539 			entry->id = FLOW_ACTION_CT;
3540 			entry->ct.action = tcf_ct_action(act);
3541 			entry->ct.zone = tcf_ct_zone(act);
3542 		} else if (is_tcf_mpls(act)) {
3543 			switch (tcf_mpls_action(act)) {
3544 			case TCA_MPLS_ACT_PUSH:
3545 				entry->id = FLOW_ACTION_MPLS_PUSH;
3546 				entry->mpls_push.proto = tcf_mpls_proto(act);
3547 				entry->mpls_push.label = tcf_mpls_label(act);
3548 				entry->mpls_push.tc = tcf_mpls_tc(act);
3549 				entry->mpls_push.bos = tcf_mpls_bos(act);
3550 				entry->mpls_push.ttl = tcf_mpls_ttl(act);
3551 				break;
3552 			case TCA_MPLS_ACT_POP:
3553 				entry->id = FLOW_ACTION_MPLS_POP;
3554 				entry->mpls_pop.proto = tcf_mpls_proto(act);
3555 				break;
3556 			case TCA_MPLS_ACT_MODIFY:
3557 				entry->id = FLOW_ACTION_MPLS_MANGLE;
3558 				entry->mpls_mangle.label = tcf_mpls_label(act);
3559 				entry->mpls_mangle.tc = tcf_mpls_tc(act);
3560 				entry->mpls_mangle.bos = tcf_mpls_bos(act);
3561 				entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3562 				break;
3563 			default:
3564 				goto err_out;
3565 			}
3566 		} else if (is_tcf_skbedit_ptype(act)) {
3567 			entry->id = FLOW_ACTION_PTYPE;
3568 			entry->ptype = tcf_skbedit_ptype(act);
3569 		} else {
3570 			err = -EOPNOTSUPP;
3571 			goto err_out;
3572 		}
3573 
3574 		if (!is_tcf_pedit(act))
3575 			j++;
3576 	}
3577 
3578 err_out:
3579 	if (!rtnl_held)
3580 		rtnl_unlock();
3581 
3582 	if (err)
3583 		tc_cleanup_flow_action(flow_action);
3584 
3585 	return err;
3586 }
3587 EXPORT_SYMBOL(tc_setup_flow_action);
3588 
3589 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3590 {
3591 	unsigned int num_acts = 0;
3592 	struct tc_action *act;
3593 	int i;
3594 
3595 	tcf_exts_for_each_action(i, act, exts) {
3596 		if (is_tcf_pedit(act))
3597 			num_acts += tcf_pedit_nkeys(act);
3598 		else
3599 			num_acts++;
3600 	}
3601 	return num_acts;
3602 }
3603 EXPORT_SYMBOL(tcf_exts_num_actions);
3604 
3605 static __net_init int tcf_net_init(struct net *net)
3606 {
3607 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3608 
3609 	spin_lock_init(&tn->idr_lock);
3610 	idr_init(&tn->idr);
3611 	return 0;
3612 }
3613 
3614 static void __net_exit tcf_net_exit(struct net *net)
3615 {
3616 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3617 
3618 	idr_destroy(&tn->idr);
3619 }
3620 
3621 static struct pernet_operations tcf_net_ops = {
3622 	.init = tcf_net_init,
3623 	.exit = tcf_net_exit,
3624 	.id   = &tcf_net_id,
3625 	.size = sizeof(struct tcf_net),
3626 };
3627 
3628 static struct flow_indr_block_entry block_entry = {
3629 	.cb = tc_indr_block_get_and_cmd,
3630 	.list = LIST_HEAD_INIT(block_entry.list),
3631 };
3632 
3633 static int __init tc_filter_init(void)
3634 {
3635 	int err;
3636 
3637 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3638 	if (!tc_filter_wq)
3639 		return -ENOMEM;
3640 
3641 	err = register_pernet_subsys(&tcf_net_ops);
3642 	if (err)
3643 		goto err_register_pernet_subsys;
3644 
3645 	flow_indr_add_block_cb(&block_entry);
3646 
3647 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3648 		      RTNL_FLAG_DOIT_UNLOCKED);
3649 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3650 		      RTNL_FLAG_DOIT_UNLOCKED);
3651 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3652 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3653 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3654 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3655 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3656 		      tc_dump_chain, 0);
3657 
3658 	return 0;
3659 
3660 err_register_pernet_subsys:
3661 	destroy_workqueue(tc_filter_wq);
3662 	return err;
3663 }
3664 
3665 subsys_initcall(tc_filter_init);
3666