xref: /openbmc/linux/net/sched/cls_api.c (revision a531b0c2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43 
44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53 {
54 	return jhash_3words(tp->chain->index, tp->prio,
55 			    (__force __u32)tp->protocol, 0);
56 }
57 
58 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59 					struct tcf_proto *tp)
60 {
61 	struct tcf_block *block = chain->block;
62 
63 	mutex_lock(&block->proto_destroy_lock);
64 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65 		     destroy_obj_hashfn(tp));
66 	mutex_unlock(&block->proto_destroy_lock);
67 }
68 
69 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70 			  const struct tcf_proto *tp2)
71 {
72 	return tp1->chain->index == tp2->chain->index &&
73 	       tp1->prio == tp2->prio &&
74 	       tp1->protocol == tp2->protocol;
75 }
76 
77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78 					struct tcf_proto *tp)
79 {
80 	u32 hash = destroy_obj_hashfn(tp);
81 	struct tcf_proto *iter;
82 	bool found = false;
83 
84 	rcu_read_lock();
85 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86 				   destroy_ht_node, hash) {
87 		if (tcf_proto_cmp(tp, iter)) {
88 			found = true;
89 			break;
90 		}
91 	}
92 	rcu_read_unlock();
93 
94 	return found;
95 }
96 
97 static void
98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99 {
100 	struct tcf_block *block = chain->block;
101 
102 	mutex_lock(&block->proto_destroy_lock);
103 	if (hash_hashed(&tp->destroy_ht_node))
104 		hash_del_rcu(&tp->destroy_ht_node);
105 	mutex_unlock(&block->proto_destroy_lock);
106 }
107 
108 /* Find classifier type by string name */
109 
110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111 {
112 	const struct tcf_proto_ops *t, *res = NULL;
113 
114 	if (kind) {
115 		read_lock(&cls_mod_lock);
116 		list_for_each_entry(t, &tcf_proto_base, head) {
117 			if (strcmp(kind, t->kind) == 0) {
118 				if (try_module_get(t->owner))
119 					res = t;
120 				break;
121 			}
122 		}
123 		read_unlock(&cls_mod_lock);
124 	}
125 	return res;
126 }
127 
128 static const struct tcf_proto_ops *
129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130 		     struct netlink_ext_ack *extack)
131 {
132 	const struct tcf_proto_ops *ops;
133 
134 	ops = __tcf_proto_lookup_ops(kind);
135 	if (ops)
136 		return ops;
137 #ifdef CONFIG_MODULES
138 	if (rtnl_held)
139 		rtnl_unlock();
140 	request_module("cls_%s", kind);
141 	if (rtnl_held)
142 		rtnl_lock();
143 	ops = __tcf_proto_lookup_ops(kind);
144 	/* We dropped the RTNL semaphore in order to perform
145 	 * the module load. So, even if we succeeded in loading
146 	 * the module we have to replay the request. We indicate
147 	 * this using -EAGAIN.
148 	 */
149 	if (ops) {
150 		module_put(ops->owner);
151 		return ERR_PTR(-EAGAIN);
152 	}
153 #endif
154 	NL_SET_ERR_MSG(extack, "TC classifier not found");
155 	return ERR_PTR(-ENOENT);
156 }
157 
158 /* Register(unregister) new classifier type */
159 
160 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161 {
162 	struct tcf_proto_ops *t;
163 	int rc = -EEXIST;
164 
165 	write_lock(&cls_mod_lock);
166 	list_for_each_entry(t, &tcf_proto_base, head)
167 		if (!strcmp(ops->kind, t->kind))
168 			goto out;
169 
170 	list_add_tail(&ops->head, &tcf_proto_base);
171 	rc = 0;
172 out:
173 	write_unlock(&cls_mod_lock);
174 	return rc;
175 }
176 EXPORT_SYMBOL(register_tcf_proto_ops);
177 
178 static struct workqueue_struct *tc_filter_wq;
179 
180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181 {
182 	struct tcf_proto_ops *t;
183 	int rc = -ENOENT;
184 
185 	/* Wait for outstanding call_rcu()s, if any, from a
186 	 * tcf_proto_ops's destroy() handler.
187 	 */
188 	rcu_barrier();
189 	flush_workqueue(tc_filter_wq);
190 
191 	write_lock(&cls_mod_lock);
192 	list_for_each_entry(t, &tcf_proto_base, head) {
193 		if (t == ops) {
194 			list_del(&t->head);
195 			rc = 0;
196 			break;
197 		}
198 	}
199 	write_unlock(&cls_mod_lock);
200 	return rc;
201 }
202 EXPORT_SYMBOL(unregister_tcf_proto_ops);
203 
204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205 {
206 	INIT_RCU_WORK(rwork, func);
207 	return queue_rcu_work(tc_filter_wq, rwork);
208 }
209 EXPORT_SYMBOL(tcf_queue_work);
210 
211 /* Select new prio value from the range, managed by kernel. */
212 
213 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214 {
215 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
216 
217 	if (tp)
218 		first = tp->prio - 1;
219 
220 	return TC_H_MAJ(first);
221 }
222 
223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224 {
225 	if (kind)
226 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
227 	memset(name, 0, IFNAMSIZ);
228 	return false;
229 }
230 
231 static bool tcf_proto_is_unlocked(const char *kind)
232 {
233 	const struct tcf_proto_ops *ops;
234 	bool ret;
235 
236 	if (strlen(kind) == 0)
237 		return false;
238 
239 	ops = tcf_proto_lookup_ops(kind, false, NULL);
240 	/* On error return false to take rtnl lock. Proto lookup/create
241 	 * functions will perform lookup again and properly handle errors.
242 	 */
243 	if (IS_ERR(ops))
244 		return false;
245 
246 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247 	module_put(ops->owner);
248 	return ret;
249 }
250 
251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252 					  u32 prio, struct tcf_chain *chain,
253 					  bool rtnl_held,
254 					  struct netlink_ext_ack *extack)
255 {
256 	struct tcf_proto *tp;
257 	int err;
258 
259 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260 	if (!tp)
261 		return ERR_PTR(-ENOBUFS);
262 
263 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
264 	if (IS_ERR(tp->ops)) {
265 		err = PTR_ERR(tp->ops);
266 		goto errout;
267 	}
268 	tp->classify = tp->ops->classify;
269 	tp->protocol = protocol;
270 	tp->prio = prio;
271 	tp->chain = chain;
272 	spin_lock_init(&tp->lock);
273 	refcount_set(&tp->refcnt, 1);
274 
275 	err = tp->ops->init(tp);
276 	if (err) {
277 		module_put(tp->ops->owner);
278 		goto errout;
279 	}
280 	return tp;
281 
282 errout:
283 	kfree(tp);
284 	return ERR_PTR(err);
285 }
286 
287 static void tcf_proto_get(struct tcf_proto *tp)
288 {
289 	refcount_inc(&tp->refcnt);
290 }
291 
292 static void tcf_chain_put(struct tcf_chain *chain);
293 
294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295 			      bool sig_destroy, struct netlink_ext_ack *extack)
296 {
297 	tp->ops->destroy(tp, rtnl_held, extack);
298 	if (sig_destroy)
299 		tcf_proto_signal_destroyed(tp->chain, tp);
300 	tcf_chain_put(tp->chain);
301 	module_put(tp->ops->owner);
302 	kfree_rcu(tp, rcu);
303 }
304 
305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306 			  struct netlink_ext_ack *extack)
307 {
308 	if (refcount_dec_and_test(&tp->refcnt))
309 		tcf_proto_destroy(tp, rtnl_held, true, extack);
310 }
311 
312 static bool tcf_proto_check_delete(struct tcf_proto *tp)
313 {
314 	if (tp->ops->delete_empty)
315 		return tp->ops->delete_empty(tp);
316 
317 	tp->deleting = true;
318 	return tp->deleting;
319 }
320 
321 static void tcf_proto_mark_delete(struct tcf_proto *tp)
322 {
323 	spin_lock(&tp->lock);
324 	tp->deleting = true;
325 	spin_unlock(&tp->lock);
326 }
327 
328 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329 {
330 	bool deleting;
331 
332 	spin_lock(&tp->lock);
333 	deleting = tp->deleting;
334 	spin_unlock(&tp->lock);
335 
336 	return deleting;
337 }
338 
339 #define ASSERT_BLOCK_LOCKED(block)					\
340 	lockdep_assert_held(&(block)->lock)
341 
342 struct tcf_filter_chain_list_item {
343 	struct list_head list;
344 	tcf_chain_head_change_t *chain_head_change;
345 	void *chain_head_change_priv;
346 };
347 
348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349 					  u32 chain_index)
350 {
351 	struct tcf_chain *chain;
352 
353 	ASSERT_BLOCK_LOCKED(block);
354 
355 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356 	if (!chain)
357 		return NULL;
358 	list_add_tail_rcu(&chain->list, &block->chain_list);
359 	mutex_init(&chain->filter_chain_lock);
360 	chain->block = block;
361 	chain->index = chain_index;
362 	chain->refcnt = 1;
363 	if (!chain->index)
364 		block->chain0.chain = chain;
365 	return chain;
366 }
367 
368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369 				       struct tcf_proto *tp_head)
370 {
371 	if (item->chain_head_change)
372 		item->chain_head_change(tp_head, item->chain_head_change_priv);
373 }
374 
375 static void tcf_chain0_head_change(struct tcf_chain *chain,
376 				   struct tcf_proto *tp_head)
377 {
378 	struct tcf_filter_chain_list_item *item;
379 	struct tcf_block *block = chain->block;
380 
381 	if (chain->index)
382 		return;
383 
384 	mutex_lock(&block->lock);
385 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386 		tcf_chain_head_change_item(item, tp_head);
387 	mutex_unlock(&block->lock);
388 }
389 
390 /* Returns true if block can be safely freed. */
391 
392 static bool tcf_chain_detach(struct tcf_chain *chain)
393 {
394 	struct tcf_block *block = chain->block;
395 
396 	ASSERT_BLOCK_LOCKED(block);
397 
398 	list_del_rcu(&chain->list);
399 	if (!chain->index)
400 		block->chain0.chain = NULL;
401 
402 	if (list_empty(&block->chain_list) &&
403 	    refcount_read(&block->refcnt) == 0)
404 		return true;
405 
406 	return false;
407 }
408 
409 static void tcf_block_destroy(struct tcf_block *block)
410 {
411 	mutex_destroy(&block->lock);
412 	mutex_destroy(&block->proto_destroy_lock);
413 	kfree_rcu(block, rcu);
414 }
415 
416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417 {
418 	struct tcf_block *block = chain->block;
419 
420 	mutex_destroy(&chain->filter_chain_lock);
421 	kfree_rcu(chain, rcu);
422 	if (free_block)
423 		tcf_block_destroy(block);
424 }
425 
426 static void tcf_chain_hold(struct tcf_chain *chain)
427 {
428 	ASSERT_BLOCK_LOCKED(chain->block);
429 
430 	++chain->refcnt;
431 }
432 
433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434 {
435 	ASSERT_BLOCK_LOCKED(chain->block);
436 
437 	/* In case all the references are action references, this
438 	 * chain should not be shown to the user.
439 	 */
440 	return chain->refcnt == chain->action_refcnt;
441 }
442 
443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444 					  u32 chain_index)
445 {
446 	struct tcf_chain *chain;
447 
448 	ASSERT_BLOCK_LOCKED(block);
449 
450 	list_for_each_entry(chain, &block->chain_list, list) {
451 		if (chain->index == chain_index)
452 			return chain;
453 	}
454 	return NULL;
455 }
456 
457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459 					      u32 chain_index)
460 {
461 	struct tcf_chain *chain;
462 
463 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
464 		if (chain->index == chain_index)
465 			return chain;
466 	}
467 	return NULL;
468 }
469 #endif
470 
471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472 			   u32 seq, u16 flags, int event, bool unicast);
473 
474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475 					 u32 chain_index, bool create,
476 					 bool by_act)
477 {
478 	struct tcf_chain *chain = NULL;
479 	bool is_first_reference;
480 
481 	mutex_lock(&block->lock);
482 	chain = tcf_chain_lookup(block, chain_index);
483 	if (chain) {
484 		tcf_chain_hold(chain);
485 	} else {
486 		if (!create)
487 			goto errout;
488 		chain = tcf_chain_create(block, chain_index);
489 		if (!chain)
490 			goto errout;
491 	}
492 
493 	if (by_act)
494 		++chain->action_refcnt;
495 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496 	mutex_unlock(&block->lock);
497 
498 	/* Send notification only in case we got the first
499 	 * non-action reference. Until then, the chain acts only as
500 	 * a placeholder for actions pointing to it and user ought
501 	 * not know about them.
502 	 */
503 	if (is_first_reference && !by_act)
504 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505 				RTM_NEWCHAIN, false);
506 
507 	return chain;
508 
509 errout:
510 	mutex_unlock(&block->lock);
511 	return chain;
512 }
513 
514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515 				       bool create)
516 {
517 	return __tcf_chain_get(block, chain_index, create, false);
518 }
519 
520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521 {
522 	return __tcf_chain_get(block, chain_index, true, true);
523 }
524 EXPORT_SYMBOL(tcf_chain_get_by_act);
525 
526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527 			       void *tmplt_priv);
528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529 				  void *tmplt_priv, u32 chain_index,
530 				  struct tcf_block *block, struct sk_buff *oskb,
531 				  u32 seq, u16 flags, bool unicast);
532 
533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534 			    bool explicitly_created)
535 {
536 	struct tcf_block *block = chain->block;
537 	const struct tcf_proto_ops *tmplt_ops;
538 	bool free_block = false;
539 	unsigned int refcnt;
540 	void *tmplt_priv;
541 
542 	mutex_lock(&block->lock);
543 	if (explicitly_created) {
544 		if (!chain->explicitly_created) {
545 			mutex_unlock(&block->lock);
546 			return;
547 		}
548 		chain->explicitly_created = false;
549 	}
550 
551 	if (by_act)
552 		chain->action_refcnt--;
553 
554 	/* tc_chain_notify_delete can't be called while holding block lock.
555 	 * However, when block is unlocked chain can be changed concurrently, so
556 	 * save these to temporary variables.
557 	 */
558 	refcnt = --chain->refcnt;
559 	tmplt_ops = chain->tmplt_ops;
560 	tmplt_priv = chain->tmplt_priv;
561 
562 	/* The last dropped non-action reference will trigger notification. */
563 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
564 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565 				       block, NULL, 0, 0, false);
566 		/* Last reference to chain, no need to lock. */
567 		chain->flushing = false;
568 	}
569 
570 	if (refcnt == 0)
571 		free_block = tcf_chain_detach(chain);
572 	mutex_unlock(&block->lock);
573 
574 	if (refcnt == 0) {
575 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576 		tcf_chain_destroy(chain, free_block);
577 	}
578 }
579 
580 static void tcf_chain_put(struct tcf_chain *chain)
581 {
582 	__tcf_chain_put(chain, false, false);
583 }
584 
585 void tcf_chain_put_by_act(struct tcf_chain *chain)
586 {
587 	__tcf_chain_put(chain, true, false);
588 }
589 EXPORT_SYMBOL(tcf_chain_put_by_act);
590 
591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592 {
593 	__tcf_chain_put(chain, false, true);
594 }
595 
596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
597 {
598 	struct tcf_proto *tp, *tp_next;
599 
600 	mutex_lock(&chain->filter_chain_lock);
601 	tp = tcf_chain_dereference(chain->filter_chain, chain);
602 	while (tp) {
603 		tp_next = rcu_dereference_protected(tp->next, 1);
604 		tcf_proto_signal_destroying(chain, tp);
605 		tp = tp_next;
606 	}
607 	tp = tcf_chain_dereference(chain->filter_chain, chain);
608 	RCU_INIT_POINTER(chain->filter_chain, NULL);
609 	tcf_chain0_head_change(chain, NULL);
610 	chain->flushing = true;
611 	mutex_unlock(&chain->filter_chain_lock);
612 
613 	while (tp) {
614 		tp_next = rcu_dereference_protected(tp->next, 1);
615 		tcf_proto_put(tp, rtnl_held, NULL);
616 		tp = tp_next;
617 	}
618 }
619 
620 static int tcf_block_setup(struct tcf_block *block,
621 			   struct flow_block_offload *bo);
622 
623 static void tcf_block_offload_init(struct flow_block_offload *bo,
624 				   struct net_device *dev, struct Qdisc *sch,
625 				   enum flow_block_command command,
626 				   enum flow_block_binder_type binder_type,
627 				   struct flow_block *flow_block,
628 				   bool shared, struct netlink_ext_ack *extack)
629 {
630 	bo->net = dev_net(dev);
631 	bo->command = command;
632 	bo->binder_type = binder_type;
633 	bo->block = flow_block;
634 	bo->block_shared = shared;
635 	bo->extack = extack;
636 	bo->sch = sch;
637 	bo->cb_list_head = &flow_block->cb_list;
638 	INIT_LIST_HEAD(&bo->cb_list);
639 }
640 
641 static void tcf_block_unbind(struct tcf_block *block,
642 			     struct flow_block_offload *bo);
643 
644 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
645 {
646 	struct tcf_block *block = block_cb->indr.data;
647 	struct net_device *dev = block_cb->indr.dev;
648 	struct Qdisc *sch = block_cb->indr.sch;
649 	struct netlink_ext_ack extack = {};
650 	struct flow_block_offload bo = {};
651 
652 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
653 			       block_cb->indr.binder_type,
654 			       &block->flow_block, tcf_block_shared(block),
655 			       &extack);
656 	rtnl_lock();
657 	down_write(&block->cb_lock);
658 	list_del(&block_cb->driver_list);
659 	list_move(&block_cb->list, &bo.cb_list);
660 	tcf_block_unbind(block, &bo);
661 	up_write(&block->cb_lock);
662 	rtnl_unlock();
663 }
664 
665 static bool tcf_block_offload_in_use(struct tcf_block *block)
666 {
667 	return atomic_read(&block->offloadcnt);
668 }
669 
670 static int tcf_block_offload_cmd(struct tcf_block *block,
671 				 struct net_device *dev, struct Qdisc *sch,
672 				 struct tcf_block_ext_info *ei,
673 				 enum flow_block_command command,
674 				 struct netlink_ext_ack *extack)
675 {
676 	struct flow_block_offload bo = {};
677 
678 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
679 			       &block->flow_block, tcf_block_shared(block),
680 			       extack);
681 
682 	if (dev->netdev_ops->ndo_setup_tc) {
683 		int err;
684 
685 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
686 		if (err < 0) {
687 			if (err != -EOPNOTSUPP)
688 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
689 			return err;
690 		}
691 
692 		return tcf_block_setup(block, &bo);
693 	}
694 
695 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
696 				    tc_block_indr_cleanup);
697 	tcf_block_setup(block, &bo);
698 
699 	return -EOPNOTSUPP;
700 }
701 
702 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
703 				  struct tcf_block_ext_info *ei,
704 				  struct netlink_ext_ack *extack)
705 {
706 	struct net_device *dev = q->dev_queue->dev;
707 	int err;
708 
709 	down_write(&block->cb_lock);
710 
711 	/* If tc offload feature is disabled and the block we try to bind
712 	 * to already has some offloaded filters, forbid to bind.
713 	 */
714 	if (dev->netdev_ops->ndo_setup_tc &&
715 	    !tc_can_offload(dev) &&
716 	    tcf_block_offload_in_use(block)) {
717 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
718 		err = -EOPNOTSUPP;
719 		goto err_unlock;
720 	}
721 
722 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
723 	if (err == -EOPNOTSUPP)
724 		goto no_offload_dev_inc;
725 	if (err)
726 		goto err_unlock;
727 
728 	up_write(&block->cb_lock);
729 	return 0;
730 
731 no_offload_dev_inc:
732 	if (tcf_block_offload_in_use(block))
733 		goto err_unlock;
734 
735 	err = 0;
736 	block->nooffloaddevcnt++;
737 err_unlock:
738 	up_write(&block->cb_lock);
739 	return err;
740 }
741 
742 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
743 				     struct tcf_block_ext_info *ei)
744 {
745 	struct net_device *dev = q->dev_queue->dev;
746 	int err;
747 
748 	down_write(&block->cb_lock);
749 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
750 	if (err == -EOPNOTSUPP)
751 		goto no_offload_dev_dec;
752 	up_write(&block->cb_lock);
753 	return;
754 
755 no_offload_dev_dec:
756 	WARN_ON(block->nooffloaddevcnt-- == 0);
757 	up_write(&block->cb_lock);
758 }
759 
760 static int
761 tcf_chain0_head_change_cb_add(struct tcf_block *block,
762 			      struct tcf_block_ext_info *ei,
763 			      struct netlink_ext_ack *extack)
764 {
765 	struct tcf_filter_chain_list_item *item;
766 	struct tcf_chain *chain0;
767 
768 	item = kmalloc(sizeof(*item), GFP_KERNEL);
769 	if (!item) {
770 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
771 		return -ENOMEM;
772 	}
773 	item->chain_head_change = ei->chain_head_change;
774 	item->chain_head_change_priv = ei->chain_head_change_priv;
775 
776 	mutex_lock(&block->lock);
777 	chain0 = block->chain0.chain;
778 	if (chain0)
779 		tcf_chain_hold(chain0);
780 	else
781 		list_add(&item->list, &block->chain0.filter_chain_list);
782 	mutex_unlock(&block->lock);
783 
784 	if (chain0) {
785 		struct tcf_proto *tp_head;
786 
787 		mutex_lock(&chain0->filter_chain_lock);
788 
789 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
790 		if (tp_head)
791 			tcf_chain_head_change_item(item, tp_head);
792 
793 		mutex_lock(&block->lock);
794 		list_add(&item->list, &block->chain0.filter_chain_list);
795 		mutex_unlock(&block->lock);
796 
797 		mutex_unlock(&chain0->filter_chain_lock);
798 		tcf_chain_put(chain0);
799 	}
800 
801 	return 0;
802 }
803 
804 static void
805 tcf_chain0_head_change_cb_del(struct tcf_block *block,
806 			      struct tcf_block_ext_info *ei)
807 {
808 	struct tcf_filter_chain_list_item *item;
809 
810 	mutex_lock(&block->lock);
811 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
812 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
813 		    (item->chain_head_change == ei->chain_head_change &&
814 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
815 			if (block->chain0.chain)
816 				tcf_chain_head_change_item(item, NULL);
817 			list_del(&item->list);
818 			mutex_unlock(&block->lock);
819 
820 			kfree(item);
821 			return;
822 		}
823 	}
824 	mutex_unlock(&block->lock);
825 	WARN_ON(1);
826 }
827 
828 struct tcf_net {
829 	spinlock_t idr_lock; /* Protects idr */
830 	struct idr idr;
831 };
832 
833 static unsigned int tcf_net_id;
834 
835 static int tcf_block_insert(struct tcf_block *block, struct net *net,
836 			    struct netlink_ext_ack *extack)
837 {
838 	struct tcf_net *tn = net_generic(net, tcf_net_id);
839 	int err;
840 
841 	idr_preload(GFP_KERNEL);
842 	spin_lock(&tn->idr_lock);
843 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
844 			    GFP_NOWAIT);
845 	spin_unlock(&tn->idr_lock);
846 	idr_preload_end();
847 
848 	return err;
849 }
850 
851 static void tcf_block_remove(struct tcf_block *block, struct net *net)
852 {
853 	struct tcf_net *tn = net_generic(net, tcf_net_id);
854 
855 	spin_lock(&tn->idr_lock);
856 	idr_remove(&tn->idr, block->index);
857 	spin_unlock(&tn->idr_lock);
858 }
859 
860 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
861 					  u32 block_index,
862 					  struct netlink_ext_ack *extack)
863 {
864 	struct tcf_block *block;
865 
866 	block = kzalloc(sizeof(*block), GFP_KERNEL);
867 	if (!block) {
868 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
869 		return ERR_PTR(-ENOMEM);
870 	}
871 	mutex_init(&block->lock);
872 	mutex_init(&block->proto_destroy_lock);
873 	init_rwsem(&block->cb_lock);
874 	flow_block_init(&block->flow_block);
875 	INIT_LIST_HEAD(&block->chain_list);
876 	INIT_LIST_HEAD(&block->owner_list);
877 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
878 
879 	refcount_set(&block->refcnt, 1);
880 	block->net = net;
881 	block->index = block_index;
882 
883 	/* Don't store q pointer for blocks which are shared */
884 	if (!tcf_block_shared(block))
885 		block->q = q;
886 	return block;
887 }
888 
889 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
890 {
891 	struct tcf_net *tn = net_generic(net, tcf_net_id);
892 
893 	return idr_find(&tn->idr, block_index);
894 }
895 
896 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
897 {
898 	struct tcf_block *block;
899 
900 	rcu_read_lock();
901 	block = tcf_block_lookup(net, block_index);
902 	if (block && !refcount_inc_not_zero(&block->refcnt))
903 		block = NULL;
904 	rcu_read_unlock();
905 
906 	return block;
907 }
908 
909 static struct tcf_chain *
910 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
911 {
912 	mutex_lock(&block->lock);
913 	if (chain)
914 		chain = list_is_last(&chain->list, &block->chain_list) ?
915 			NULL : list_next_entry(chain, list);
916 	else
917 		chain = list_first_entry_or_null(&block->chain_list,
918 						 struct tcf_chain, list);
919 
920 	/* skip all action-only chains */
921 	while (chain && tcf_chain_held_by_acts_only(chain))
922 		chain = list_is_last(&chain->list, &block->chain_list) ?
923 			NULL : list_next_entry(chain, list);
924 
925 	if (chain)
926 		tcf_chain_hold(chain);
927 	mutex_unlock(&block->lock);
928 
929 	return chain;
930 }
931 
932 /* Function to be used by all clients that want to iterate over all chains on
933  * block. It properly obtains block->lock and takes reference to chain before
934  * returning it. Users of this function must be tolerant to concurrent chain
935  * insertion/deletion or ensure that no concurrent chain modification is
936  * possible. Note that all netlink dump callbacks cannot guarantee to provide
937  * consistent dump because rtnl lock is released each time skb is filled with
938  * data and sent to user-space.
939  */
940 
941 struct tcf_chain *
942 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
943 {
944 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
945 
946 	if (chain)
947 		tcf_chain_put(chain);
948 
949 	return chain_next;
950 }
951 EXPORT_SYMBOL(tcf_get_next_chain);
952 
953 static struct tcf_proto *
954 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
955 {
956 	u32 prio = 0;
957 
958 	ASSERT_RTNL();
959 	mutex_lock(&chain->filter_chain_lock);
960 
961 	if (!tp) {
962 		tp = tcf_chain_dereference(chain->filter_chain, chain);
963 	} else if (tcf_proto_is_deleting(tp)) {
964 		/* 'deleting' flag is set and chain->filter_chain_lock was
965 		 * unlocked, which means next pointer could be invalid. Restart
966 		 * search.
967 		 */
968 		prio = tp->prio + 1;
969 		tp = tcf_chain_dereference(chain->filter_chain, chain);
970 
971 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
972 			if (!tp->deleting && tp->prio >= prio)
973 				break;
974 	} else {
975 		tp = tcf_chain_dereference(tp->next, chain);
976 	}
977 
978 	if (tp)
979 		tcf_proto_get(tp);
980 
981 	mutex_unlock(&chain->filter_chain_lock);
982 
983 	return tp;
984 }
985 
986 /* Function to be used by all clients that want to iterate over all tp's on
987  * chain. Users of this function must be tolerant to concurrent tp
988  * insertion/deletion or ensure that no concurrent chain modification is
989  * possible. Note that all netlink dump callbacks cannot guarantee to provide
990  * consistent dump because rtnl lock is released each time skb is filled with
991  * data and sent to user-space.
992  */
993 
994 struct tcf_proto *
995 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
996 {
997 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
998 
999 	if (tp)
1000 		tcf_proto_put(tp, true, NULL);
1001 
1002 	return tp_next;
1003 }
1004 EXPORT_SYMBOL(tcf_get_next_proto);
1005 
1006 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1007 {
1008 	struct tcf_chain *chain;
1009 
1010 	/* Last reference to block. At this point chains cannot be added or
1011 	 * removed concurrently.
1012 	 */
1013 	for (chain = tcf_get_next_chain(block, NULL);
1014 	     chain;
1015 	     chain = tcf_get_next_chain(block, chain)) {
1016 		tcf_chain_put_explicitly_created(chain);
1017 		tcf_chain_flush(chain, rtnl_held);
1018 	}
1019 }
1020 
1021 /* Lookup Qdisc and increments its reference counter.
1022  * Set parent, if necessary.
1023  */
1024 
1025 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1026 			    u32 *parent, int ifindex, bool rtnl_held,
1027 			    struct netlink_ext_ack *extack)
1028 {
1029 	const struct Qdisc_class_ops *cops;
1030 	struct net_device *dev;
1031 	int err = 0;
1032 
1033 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1034 		return 0;
1035 
1036 	rcu_read_lock();
1037 
1038 	/* Find link */
1039 	dev = dev_get_by_index_rcu(net, ifindex);
1040 	if (!dev) {
1041 		rcu_read_unlock();
1042 		return -ENODEV;
1043 	}
1044 
1045 	/* Find qdisc */
1046 	if (!*parent) {
1047 		*q = dev->qdisc;
1048 		*parent = (*q)->handle;
1049 	} else {
1050 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1051 		if (!*q) {
1052 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1053 			err = -EINVAL;
1054 			goto errout_rcu;
1055 		}
1056 	}
1057 
1058 	*q = qdisc_refcount_inc_nz(*q);
1059 	if (!*q) {
1060 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1061 		err = -EINVAL;
1062 		goto errout_rcu;
1063 	}
1064 
1065 	/* Is it classful? */
1066 	cops = (*q)->ops->cl_ops;
1067 	if (!cops) {
1068 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1069 		err = -EINVAL;
1070 		goto errout_qdisc;
1071 	}
1072 
1073 	if (!cops->tcf_block) {
1074 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1075 		err = -EOPNOTSUPP;
1076 		goto errout_qdisc;
1077 	}
1078 
1079 errout_rcu:
1080 	/* At this point we know that qdisc is not noop_qdisc,
1081 	 * which means that qdisc holds a reference to net_device
1082 	 * and we hold a reference to qdisc, so it is safe to release
1083 	 * rcu read lock.
1084 	 */
1085 	rcu_read_unlock();
1086 	return err;
1087 
1088 errout_qdisc:
1089 	rcu_read_unlock();
1090 
1091 	if (rtnl_held)
1092 		qdisc_put(*q);
1093 	else
1094 		qdisc_put_unlocked(*q);
1095 	*q = NULL;
1096 
1097 	return err;
1098 }
1099 
1100 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1101 			       int ifindex, struct netlink_ext_ack *extack)
1102 {
1103 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1104 		return 0;
1105 
1106 	/* Do we search for filter, attached to class? */
1107 	if (TC_H_MIN(parent)) {
1108 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1109 
1110 		*cl = cops->find(q, parent);
1111 		if (*cl == 0) {
1112 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1113 			return -ENOENT;
1114 		}
1115 	}
1116 
1117 	return 0;
1118 }
1119 
1120 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1121 					  unsigned long cl, int ifindex,
1122 					  u32 block_index,
1123 					  struct netlink_ext_ack *extack)
1124 {
1125 	struct tcf_block *block;
1126 
1127 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1128 		block = tcf_block_refcnt_get(net, block_index);
1129 		if (!block) {
1130 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1131 			return ERR_PTR(-EINVAL);
1132 		}
1133 	} else {
1134 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1135 
1136 		block = cops->tcf_block(q, cl, extack);
1137 		if (!block)
1138 			return ERR_PTR(-EINVAL);
1139 
1140 		if (tcf_block_shared(block)) {
1141 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1142 			return ERR_PTR(-EOPNOTSUPP);
1143 		}
1144 
1145 		/* Always take reference to block in order to support execution
1146 		 * of rules update path of cls API without rtnl lock. Caller
1147 		 * must release block when it is finished using it. 'if' block
1148 		 * of this conditional obtain reference to block by calling
1149 		 * tcf_block_refcnt_get().
1150 		 */
1151 		refcount_inc(&block->refcnt);
1152 	}
1153 
1154 	return block;
1155 }
1156 
1157 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1158 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1159 {
1160 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1161 		/* Flushing/putting all chains will cause the block to be
1162 		 * deallocated when last chain is freed. However, if chain_list
1163 		 * is empty, block has to be manually deallocated. After block
1164 		 * reference counter reached 0, it is no longer possible to
1165 		 * increment it or add new chains to block.
1166 		 */
1167 		bool free_block = list_empty(&block->chain_list);
1168 
1169 		mutex_unlock(&block->lock);
1170 		if (tcf_block_shared(block))
1171 			tcf_block_remove(block, block->net);
1172 
1173 		if (q)
1174 			tcf_block_offload_unbind(block, q, ei);
1175 
1176 		if (free_block)
1177 			tcf_block_destroy(block);
1178 		else
1179 			tcf_block_flush_all_chains(block, rtnl_held);
1180 	} else if (q) {
1181 		tcf_block_offload_unbind(block, q, ei);
1182 	}
1183 }
1184 
1185 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1186 {
1187 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1188 }
1189 
1190 /* Find tcf block.
1191  * Set q, parent, cl when appropriate.
1192  */
1193 
1194 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1195 					u32 *parent, unsigned long *cl,
1196 					int ifindex, u32 block_index,
1197 					struct netlink_ext_ack *extack)
1198 {
1199 	struct tcf_block *block;
1200 	int err = 0;
1201 
1202 	ASSERT_RTNL();
1203 
1204 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1205 	if (err)
1206 		goto errout;
1207 
1208 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1209 	if (err)
1210 		goto errout_qdisc;
1211 
1212 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1213 	if (IS_ERR(block)) {
1214 		err = PTR_ERR(block);
1215 		goto errout_qdisc;
1216 	}
1217 
1218 	return block;
1219 
1220 errout_qdisc:
1221 	if (*q)
1222 		qdisc_put(*q);
1223 errout:
1224 	*q = NULL;
1225 	return ERR_PTR(err);
1226 }
1227 
1228 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1229 			      bool rtnl_held)
1230 {
1231 	if (!IS_ERR_OR_NULL(block))
1232 		tcf_block_refcnt_put(block, rtnl_held);
1233 
1234 	if (q) {
1235 		if (rtnl_held)
1236 			qdisc_put(q);
1237 		else
1238 			qdisc_put_unlocked(q);
1239 	}
1240 }
1241 
1242 struct tcf_block_owner_item {
1243 	struct list_head list;
1244 	struct Qdisc *q;
1245 	enum flow_block_binder_type binder_type;
1246 };
1247 
1248 static void
1249 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1250 			       struct Qdisc *q,
1251 			       enum flow_block_binder_type binder_type)
1252 {
1253 	if (block->keep_dst &&
1254 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1255 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1256 		netif_keep_dst(qdisc_dev(q));
1257 }
1258 
1259 void tcf_block_netif_keep_dst(struct tcf_block *block)
1260 {
1261 	struct tcf_block_owner_item *item;
1262 
1263 	block->keep_dst = true;
1264 	list_for_each_entry(item, &block->owner_list, list)
1265 		tcf_block_owner_netif_keep_dst(block, item->q,
1266 					       item->binder_type);
1267 }
1268 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1269 
1270 static int tcf_block_owner_add(struct tcf_block *block,
1271 			       struct Qdisc *q,
1272 			       enum flow_block_binder_type binder_type)
1273 {
1274 	struct tcf_block_owner_item *item;
1275 
1276 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1277 	if (!item)
1278 		return -ENOMEM;
1279 	item->q = q;
1280 	item->binder_type = binder_type;
1281 	list_add(&item->list, &block->owner_list);
1282 	return 0;
1283 }
1284 
1285 static void tcf_block_owner_del(struct tcf_block *block,
1286 				struct Qdisc *q,
1287 				enum flow_block_binder_type binder_type)
1288 {
1289 	struct tcf_block_owner_item *item;
1290 
1291 	list_for_each_entry(item, &block->owner_list, list) {
1292 		if (item->q == q && item->binder_type == binder_type) {
1293 			list_del(&item->list);
1294 			kfree(item);
1295 			return;
1296 		}
1297 	}
1298 	WARN_ON(1);
1299 }
1300 
1301 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1302 		      struct tcf_block_ext_info *ei,
1303 		      struct netlink_ext_ack *extack)
1304 {
1305 	struct net *net = qdisc_net(q);
1306 	struct tcf_block *block = NULL;
1307 	int err;
1308 
1309 	if (ei->block_index)
1310 		/* block_index not 0 means the shared block is requested */
1311 		block = tcf_block_refcnt_get(net, ei->block_index);
1312 
1313 	if (!block) {
1314 		block = tcf_block_create(net, q, ei->block_index, extack);
1315 		if (IS_ERR(block))
1316 			return PTR_ERR(block);
1317 		if (tcf_block_shared(block)) {
1318 			err = tcf_block_insert(block, net, extack);
1319 			if (err)
1320 				goto err_block_insert;
1321 		}
1322 	}
1323 
1324 	err = tcf_block_owner_add(block, q, ei->binder_type);
1325 	if (err)
1326 		goto err_block_owner_add;
1327 
1328 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1329 
1330 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1331 	if (err)
1332 		goto err_chain0_head_change_cb_add;
1333 
1334 	err = tcf_block_offload_bind(block, q, ei, extack);
1335 	if (err)
1336 		goto err_block_offload_bind;
1337 
1338 	*p_block = block;
1339 	return 0;
1340 
1341 err_block_offload_bind:
1342 	tcf_chain0_head_change_cb_del(block, ei);
1343 err_chain0_head_change_cb_add:
1344 	tcf_block_owner_del(block, q, ei->binder_type);
1345 err_block_owner_add:
1346 err_block_insert:
1347 	tcf_block_refcnt_put(block, true);
1348 	return err;
1349 }
1350 EXPORT_SYMBOL(tcf_block_get_ext);
1351 
1352 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1353 {
1354 	struct tcf_proto __rcu **p_filter_chain = priv;
1355 
1356 	rcu_assign_pointer(*p_filter_chain, tp_head);
1357 }
1358 
1359 int tcf_block_get(struct tcf_block **p_block,
1360 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1361 		  struct netlink_ext_ack *extack)
1362 {
1363 	struct tcf_block_ext_info ei = {
1364 		.chain_head_change = tcf_chain_head_change_dflt,
1365 		.chain_head_change_priv = p_filter_chain,
1366 	};
1367 
1368 	WARN_ON(!p_filter_chain);
1369 	return tcf_block_get_ext(p_block, q, &ei, extack);
1370 }
1371 EXPORT_SYMBOL(tcf_block_get);
1372 
1373 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1374  * actions should be all removed after flushing.
1375  */
1376 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1377 		       struct tcf_block_ext_info *ei)
1378 {
1379 	if (!block)
1380 		return;
1381 	tcf_chain0_head_change_cb_del(block, ei);
1382 	tcf_block_owner_del(block, q, ei->binder_type);
1383 
1384 	__tcf_block_put(block, q, ei, true);
1385 }
1386 EXPORT_SYMBOL(tcf_block_put_ext);
1387 
1388 void tcf_block_put(struct tcf_block *block)
1389 {
1390 	struct tcf_block_ext_info ei = {0, };
1391 
1392 	if (!block)
1393 		return;
1394 	tcf_block_put_ext(block, block->q, &ei);
1395 }
1396 
1397 EXPORT_SYMBOL(tcf_block_put);
1398 
1399 static int
1400 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1401 			    void *cb_priv, bool add, bool offload_in_use,
1402 			    struct netlink_ext_ack *extack)
1403 {
1404 	struct tcf_chain *chain, *chain_prev;
1405 	struct tcf_proto *tp, *tp_prev;
1406 	int err;
1407 
1408 	lockdep_assert_held(&block->cb_lock);
1409 
1410 	for (chain = __tcf_get_next_chain(block, NULL);
1411 	     chain;
1412 	     chain_prev = chain,
1413 		     chain = __tcf_get_next_chain(block, chain),
1414 		     tcf_chain_put(chain_prev)) {
1415 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1416 		     tp_prev = tp,
1417 			     tp = __tcf_get_next_proto(chain, tp),
1418 			     tcf_proto_put(tp_prev, true, NULL)) {
1419 			if (tp->ops->reoffload) {
1420 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1421 							 extack);
1422 				if (err && add)
1423 					goto err_playback_remove;
1424 			} else if (add && offload_in_use) {
1425 				err = -EOPNOTSUPP;
1426 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1427 				goto err_playback_remove;
1428 			}
1429 		}
1430 	}
1431 
1432 	return 0;
1433 
1434 err_playback_remove:
1435 	tcf_proto_put(tp, true, NULL);
1436 	tcf_chain_put(chain);
1437 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1438 				    extack);
1439 	return err;
1440 }
1441 
1442 static int tcf_block_bind(struct tcf_block *block,
1443 			  struct flow_block_offload *bo)
1444 {
1445 	struct flow_block_cb *block_cb, *next;
1446 	int err, i = 0;
1447 
1448 	lockdep_assert_held(&block->cb_lock);
1449 
1450 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1451 		err = tcf_block_playback_offloads(block, block_cb->cb,
1452 						  block_cb->cb_priv, true,
1453 						  tcf_block_offload_in_use(block),
1454 						  bo->extack);
1455 		if (err)
1456 			goto err_unroll;
1457 		if (!bo->unlocked_driver_cb)
1458 			block->lockeddevcnt++;
1459 
1460 		i++;
1461 	}
1462 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1463 
1464 	return 0;
1465 
1466 err_unroll:
1467 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1468 		if (i-- > 0) {
1469 			list_del(&block_cb->list);
1470 			tcf_block_playback_offloads(block, block_cb->cb,
1471 						    block_cb->cb_priv, false,
1472 						    tcf_block_offload_in_use(block),
1473 						    NULL);
1474 			if (!bo->unlocked_driver_cb)
1475 				block->lockeddevcnt--;
1476 		}
1477 		flow_block_cb_free(block_cb);
1478 	}
1479 
1480 	return err;
1481 }
1482 
1483 static void tcf_block_unbind(struct tcf_block *block,
1484 			     struct flow_block_offload *bo)
1485 {
1486 	struct flow_block_cb *block_cb, *next;
1487 
1488 	lockdep_assert_held(&block->cb_lock);
1489 
1490 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1491 		tcf_block_playback_offloads(block, block_cb->cb,
1492 					    block_cb->cb_priv, false,
1493 					    tcf_block_offload_in_use(block),
1494 					    NULL);
1495 		list_del(&block_cb->list);
1496 		flow_block_cb_free(block_cb);
1497 		if (!bo->unlocked_driver_cb)
1498 			block->lockeddevcnt--;
1499 	}
1500 }
1501 
1502 static int tcf_block_setup(struct tcf_block *block,
1503 			   struct flow_block_offload *bo)
1504 {
1505 	int err;
1506 
1507 	switch (bo->command) {
1508 	case FLOW_BLOCK_BIND:
1509 		err = tcf_block_bind(block, bo);
1510 		break;
1511 	case FLOW_BLOCK_UNBIND:
1512 		err = 0;
1513 		tcf_block_unbind(block, bo);
1514 		break;
1515 	default:
1516 		WARN_ON_ONCE(1);
1517 		err = -EOPNOTSUPP;
1518 	}
1519 
1520 	return err;
1521 }
1522 
1523 /* Main classifier routine: scans classifier chain attached
1524  * to this qdisc, (optionally) tests for protocol and asks
1525  * specific classifiers.
1526  */
1527 static inline int __tcf_classify(struct sk_buff *skb,
1528 				 const struct tcf_proto *tp,
1529 				 const struct tcf_proto *orig_tp,
1530 				 struct tcf_result *res,
1531 				 bool compat_mode,
1532 				 u32 *last_executed_chain)
1533 {
1534 #ifdef CONFIG_NET_CLS_ACT
1535 	const int max_reclassify_loop = 16;
1536 	const struct tcf_proto *first_tp;
1537 	int limit = 0;
1538 
1539 reclassify:
1540 #endif
1541 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1542 		__be16 protocol = skb_protocol(skb, false);
1543 		int err;
1544 
1545 		if (tp->protocol != protocol &&
1546 		    tp->protocol != htons(ETH_P_ALL))
1547 			continue;
1548 
1549 		err = tp->classify(skb, tp, res);
1550 #ifdef CONFIG_NET_CLS_ACT
1551 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1552 			first_tp = orig_tp;
1553 			*last_executed_chain = first_tp->chain->index;
1554 			goto reset;
1555 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1556 			first_tp = res->goto_tp;
1557 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1558 			goto reset;
1559 		}
1560 #endif
1561 		if (err >= 0)
1562 			return err;
1563 	}
1564 
1565 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1566 #ifdef CONFIG_NET_CLS_ACT
1567 reset:
1568 	if (unlikely(limit++ >= max_reclassify_loop)) {
1569 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1570 				       tp->chain->block->index,
1571 				       tp->prio & 0xffff,
1572 				       ntohs(tp->protocol));
1573 		return TC_ACT_SHOT;
1574 	}
1575 
1576 	tp = first_tp;
1577 	goto reclassify;
1578 #endif
1579 }
1580 
1581 int tcf_classify(struct sk_buff *skb,
1582 		 const struct tcf_block *block,
1583 		 const struct tcf_proto *tp,
1584 		 struct tcf_result *res, bool compat_mode)
1585 {
1586 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1587 	u32 last_executed_chain = 0;
1588 
1589 	return __tcf_classify(skb, tp, tp, res, compat_mode,
1590 			      &last_executed_chain);
1591 #else
1592 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1593 	const struct tcf_proto *orig_tp = tp;
1594 	struct tc_skb_ext *ext;
1595 	int ret;
1596 
1597 	if (block) {
1598 		ext = skb_ext_find(skb, TC_SKB_EXT);
1599 
1600 		if (ext && ext->chain) {
1601 			struct tcf_chain *fchain;
1602 
1603 			fchain = tcf_chain_lookup_rcu(block, ext->chain);
1604 			if (!fchain)
1605 				return TC_ACT_SHOT;
1606 
1607 			/* Consume, so cloned/redirect skbs won't inherit ext */
1608 			skb_ext_del(skb, TC_SKB_EXT);
1609 
1610 			tp = rcu_dereference_bh(fchain->filter_chain);
1611 			last_executed_chain = fchain->index;
1612 		}
1613 	}
1614 
1615 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1616 			     &last_executed_chain);
1617 
1618 	/* If we missed on some chain */
1619 	if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1620 		ext = tc_skb_ext_alloc(skb);
1621 		if (WARN_ON_ONCE(!ext))
1622 			return TC_ACT_SHOT;
1623 		ext->chain = last_executed_chain;
1624 		ext->mru = qdisc_skb_cb(skb)->mru;
1625 		ext->post_ct = qdisc_skb_cb(skb)->post_ct;
1626 	}
1627 
1628 	return ret;
1629 #endif
1630 }
1631 EXPORT_SYMBOL(tcf_classify);
1632 
1633 struct tcf_chain_info {
1634 	struct tcf_proto __rcu **pprev;
1635 	struct tcf_proto __rcu *next;
1636 };
1637 
1638 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1639 					   struct tcf_chain_info *chain_info)
1640 {
1641 	return tcf_chain_dereference(*chain_info->pprev, chain);
1642 }
1643 
1644 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1645 			       struct tcf_chain_info *chain_info,
1646 			       struct tcf_proto *tp)
1647 {
1648 	if (chain->flushing)
1649 		return -EAGAIN;
1650 
1651 	if (*chain_info->pprev == chain->filter_chain)
1652 		tcf_chain0_head_change(chain, tp);
1653 	tcf_proto_get(tp);
1654 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1655 	rcu_assign_pointer(*chain_info->pprev, tp);
1656 
1657 	return 0;
1658 }
1659 
1660 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1661 				struct tcf_chain_info *chain_info,
1662 				struct tcf_proto *tp)
1663 {
1664 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1665 
1666 	tcf_proto_mark_delete(tp);
1667 	if (tp == chain->filter_chain)
1668 		tcf_chain0_head_change(chain, next);
1669 	RCU_INIT_POINTER(*chain_info->pprev, next);
1670 }
1671 
1672 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1673 					   struct tcf_chain_info *chain_info,
1674 					   u32 protocol, u32 prio,
1675 					   bool prio_allocate);
1676 
1677 /* Try to insert new proto.
1678  * If proto with specified priority already exists, free new proto
1679  * and return existing one.
1680  */
1681 
1682 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1683 						    struct tcf_proto *tp_new,
1684 						    u32 protocol, u32 prio,
1685 						    bool rtnl_held)
1686 {
1687 	struct tcf_chain_info chain_info;
1688 	struct tcf_proto *tp;
1689 	int err = 0;
1690 
1691 	mutex_lock(&chain->filter_chain_lock);
1692 
1693 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1694 		mutex_unlock(&chain->filter_chain_lock);
1695 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1696 		return ERR_PTR(-EAGAIN);
1697 	}
1698 
1699 	tp = tcf_chain_tp_find(chain, &chain_info,
1700 			       protocol, prio, false);
1701 	if (!tp)
1702 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1703 	mutex_unlock(&chain->filter_chain_lock);
1704 
1705 	if (tp) {
1706 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1707 		tp_new = tp;
1708 	} else if (err) {
1709 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1710 		tp_new = ERR_PTR(err);
1711 	}
1712 
1713 	return tp_new;
1714 }
1715 
1716 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1717 				      struct tcf_proto *tp, bool rtnl_held,
1718 				      struct netlink_ext_ack *extack)
1719 {
1720 	struct tcf_chain_info chain_info;
1721 	struct tcf_proto *tp_iter;
1722 	struct tcf_proto **pprev;
1723 	struct tcf_proto *next;
1724 
1725 	mutex_lock(&chain->filter_chain_lock);
1726 
1727 	/* Atomically find and remove tp from chain. */
1728 	for (pprev = &chain->filter_chain;
1729 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1730 	     pprev = &tp_iter->next) {
1731 		if (tp_iter == tp) {
1732 			chain_info.pprev = pprev;
1733 			chain_info.next = tp_iter->next;
1734 			WARN_ON(tp_iter->deleting);
1735 			break;
1736 		}
1737 	}
1738 	/* Verify that tp still exists and no new filters were inserted
1739 	 * concurrently.
1740 	 * Mark tp for deletion if it is empty.
1741 	 */
1742 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1743 		mutex_unlock(&chain->filter_chain_lock);
1744 		return;
1745 	}
1746 
1747 	tcf_proto_signal_destroying(chain, tp);
1748 	next = tcf_chain_dereference(chain_info.next, chain);
1749 	if (tp == chain->filter_chain)
1750 		tcf_chain0_head_change(chain, next);
1751 	RCU_INIT_POINTER(*chain_info.pprev, next);
1752 	mutex_unlock(&chain->filter_chain_lock);
1753 
1754 	tcf_proto_put(tp, rtnl_held, extack);
1755 }
1756 
1757 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1758 					   struct tcf_chain_info *chain_info,
1759 					   u32 protocol, u32 prio,
1760 					   bool prio_allocate)
1761 {
1762 	struct tcf_proto **pprev;
1763 	struct tcf_proto *tp;
1764 
1765 	/* Check the chain for existence of proto-tcf with this priority */
1766 	for (pprev = &chain->filter_chain;
1767 	     (tp = tcf_chain_dereference(*pprev, chain));
1768 	     pprev = &tp->next) {
1769 		if (tp->prio >= prio) {
1770 			if (tp->prio == prio) {
1771 				if (prio_allocate ||
1772 				    (tp->protocol != protocol && protocol))
1773 					return ERR_PTR(-EINVAL);
1774 			} else {
1775 				tp = NULL;
1776 			}
1777 			break;
1778 		}
1779 	}
1780 	chain_info->pprev = pprev;
1781 	if (tp) {
1782 		chain_info->next = tp->next;
1783 		tcf_proto_get(tp);
1784 	} else {
1785 		chain_info->next = NULL;
1786 	}
1787 	return tp;
1788 }
1789 
1790 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1791 			 struct tcf_proto *tp, struct tcf_block *block,
1792 			 struct Qdisc *q, u32 parent, void *fh,
1793 			 u32 portid, u32 seq, u16 flags, int event,
1794 			 bool terse_dump, bool rtnl_held)
1795 {
1796 	struct tcmsg *tcm;
1797 	struct nlmsghdr  *nlh;
1798 	unsigned char *b = skb_tail_pointer(skb);
1799 
1800 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1801 	if (!nlh)
1802 		goto out_nlmsg_trim;
1803 	tcm = nlmsg_data(nlh);
1804 	tcm->tcm_family = AF_UNSPEC;
1805 	tcm->tcm__pad1 = 0;
1806 	tcm->tcm__pad2 = 0;
1807 	if (q) {
1808 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1809 		tcm->tcm_parent = parent;
1810 	} else {
1811 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1812 		tcm->tcm_block_index = block->index;
1813 	}
1814 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1815 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1816 		goto nla_put_failure;
1817 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1818 		goto nla_put_failure;
1819 	if (!fh) {
1820 		tcm->tcm_handle = 0;
1821 	} else if (terse_dump) {
1822 		if (tp->ops->terse_dump) {
1823 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1824 						rtnl_held) < 0)
1825 				goto nla_put_failure;
1826 		} else {
1827 			goto cls_op_not_supp;
1828 		}
1829 	} else {
1830 		if (tp->ops->dump &&
1831 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1832 			goto nla_put_failure;
1833 	}
1834 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1835 	return skb->len;
1836 
1837 out_nlmsg_trim:
1838 nla_put_failure:
1839 cls_op_not_supp:
1840 	nlmsg_trim(skb, b);
1841 	return -1;
1842 }
1843 
1844 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1845 			  struct nlmsghdr *n, struct tcf_proto *tp,
1846 			  struct tcf_block *block, struct Qdisc *q,
1847 			  u32 parent, void *fh, int event, bool unicast,
1848 			  bool rtnl_held)
1849 {
1850 	struct sk_buff *skb;
1851 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1852 	int err = 0;
1853 
1854 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1855 	if (!skb)
1856 		return -ENOBUFS;
1857 
1858 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1859 			  n->nlmsg_seq, n->nlmsg_flags, event,
1860 			  false, rtnl_held) <= 0) {
1861 		kfree_skb(skb);
1862 		return -EINVAL;
1863 	}
1864 
1865 	if (unicast)
1866 		err = rtnl_unicast(skb, net, portid);
1867 	else
1868 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1869 				     n->nlmsg_flags & NLM_F_ECHO);
1870 	return err;
1871 }
1872 
1873 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1874 			      struct nlmsghdr *n, struct tcf_proto *tp,
1875 			      struct tcf_block *block, struct Qdisc *q,
1876 			      u32 parent, void *fh, bool unicast, bool *last,
1877 			      bool rtnl_held, struct netlink_ext_ack *extack)
1878 {
1879 	struct sk_buff *skb;
1880 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1881 	int err;
1882 
1883 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1884 	if (!skb)
1885 		return -ENOBUFS;
1886 
1887 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1888 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1889 			  false, rtnl_held) <= 0) {
1890 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1891 		kfree_skb(skb);
1892 		return -EINVAL;
1893 	}
1894 
1895 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1896 	if (err) {
1897 		kfree_skb(skb);
1898 		return err;
1899 	}
1900 
1901 	if (unicast)
1902 		err = rtnl_unicast(skb, net, portid);
1903 	else
1904 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1905 				     n->nlmsg_flags & NLM_F_ECHO);
1906 	if (err < 0)
1907 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1908 
1909 	return err;
1910 }
1911 
1912 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1913 				 struct tcf_block *block, struct Qdisc *q,
1914 				 u32 parent, struct nlmsghdr *n,
1915 				 struct tcf_chain *chain, int event)
1916 {
1917 	struct tcf_proto *tp;
1918 
1919 	for (tp = tcf_get_next_proto(chain, NULL);
1920 	     tp; tp = tcf_get_next_proto(chain, tp))
1921 		tfilter_notify(net, oskb, n, tp, block,
1922 			       q, parent, NULL, event, false, true);
1923 }
1924 
1925 static void tfilter_put(struct tcf_proto *tp, void *fh)
1926 {
1927 	if (tp->ops->put && fh)
1928 		tp->ops->put(tp, fh);
1929 }
1930 
1931 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1932 			  struct netlink_ext_ack *extack)
1933 {
1934 	struct net *net = sock_net(skb->sk);
1935 	struct nlattr *tca[TCA_MAX + 1];
1936 	char name[IFNAMSIZ];
1937 	struct tcmsg *t;
1938 	u32 protocol;
1939 	u32 prio;
1940 	bool prio_allocate;
1941 	u32 parent;
1942 	u32 chain_index;
1943 	struct Qdisc *q = NULL;
1944 	struct tcf_chain_info chain_info;
1945 	struct tcf_chain *chain = NULL;
1946 	struct tcf_block *block;
1947 	struct tcf_proto *tp;
1948 	unsigned long cl;
1949 	void *fh;
1950 	int err;
1951 	int tp_created;
1952 	bool rtnl_held = false;
1953 	u32 flags;
1954 
1955 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1956 		return -EPERM;
1957 
1958 replay:
1959 	tp_created = 0;
1960 
1961 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1962 				     rtm_tca_policy, extack);
1963 	if (err < 0)
1964 		return err;
1965 
1966 	t = nlmsg_data(n);
1967 	protocol = TC_H_MIN(t->tcm_info);
1968 	prio = TC_H_MAJ(t->tcm_info);
1969 	prio_allocate = false;
1970 	parent = t->tcm_parent;
1971 	tp = NULL;
1972 	cl = 0;
1973 	block = NULL;
1974 	flags = 0;
1975 
1976 	if (prio == 0) {
1977 		/* If no priority is provided by the user,
1978 		 * we allocate one.
1979 		 */
1980 		if (n->nlmsg_flags & NLM_F_CREATE) {
1981 			prio = TC_H_MAKE(0x80000000U, 0U);
1982 			prio_allocate = true;
1983 		} else {
1984 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1985 			return -ENOENT;
1986 		}
1987 	}
1988 
1989 	/* Find head of filter chain. */
1990 
1991 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1992 	if (err)
1993 		return err;
1994 
1995 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1996 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1997 		err = -EINVAL;
1998 		goto errout;
1999 	}
2000 
2001 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2002 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2003 	 * type is not specified, classifier is not unlocked.
2004 	 */
2005 	if (rtnl_held ||
2006 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2007 	    !tcf_proto_is_unlocked(name)) {
2008 		rtnl_held = true;
2009 		rtnl_lock();
2010 	}
2011 
2012 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2013 	if (err)
2014 		goto errout;
2015 
2016 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2017 				 extack);
2018 	if (IS_ERR(block)) {
2019 		err = PTR_ERR(block);
2020 		goto errout;
2021 	}
2022 	block->classid = parent;
2023 
2024 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2025 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2026 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2027 		err = -EINVAL;
2028 		goto errout;
2029 	}
2030 	chain = tcf_chain_get(block, chain_index, true);
2031 	if (!chain) {
2032 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2033 		err = -ENOMEM;
2034 		goto errout;
2035 	}
2036 
2037 	mutex_lock(&chain->filter_chain_lock);
2038 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2039 			       prio, prio_allocate);
2040 	if (IS_ERR(tp)) {
2041 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2042 		err = PTR_ERR(tp);
2043 		goto errout_locked;
2044 	}
2045 
2046 	if (tp == NULL) {
2047 		struct tcf_proto *tp_new = NULL;
2048 
2049 		if (chain->flushing) {
2050 			err = -EAGAIN;
2051 			goto errout_locked;
2052 		}
2053 
2054 		/* Proto-tcf does not exist, create new one */
2055 
2056 		if (tca[TCA_KIND] == NULL || !protocol) {
2057 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2058 			err = -EINVAL;
2059 			goto errout_locked;
2060 		}
2061 
2062 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2063 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2064 			err = -ENOENT;
2065 			goto errout_locked;
2066 		}
2067 
2068 		if (prio_allocate)
2069 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2070 							       &chain_info));
2071 
2072 		mutex_unlock(&chain->filter_chain_lock);
2073 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2074 					  rtnl_held, extack);
2075 		if (IS_ERR(tp_new)) {
2076 			err = PTR_ERR(tp_new);
2077 			goto errout_tp;
2078 		}
2079 
2080 		tp_created = 1;
2081 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2082 						rtnl_held);
2083 		if (IS_ERR(tp)) {
2084 			err = PTR_ERR(tp);
2085 			goto errout_tp;
2086 		}
2087 	} else {
2088 		mutex_unlock(&chain->filter_chain_lock);
2089 	}
2090 
2091 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2092 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2093 		err = -EINVAL;
2094 		goto errout;
2095 	}
2096 
2097 	fh = tp->ops->get(tp, t->tcm_handle);
2098 
2099 	if (!fh) {
2100 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2101 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2102 			err = -ENOENT;
2103 			goto errout;
2104 		}
2105 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2106 		tfilter_put(tp, fh);
2107 		NL_SET_ERR_MSG(extack, "Filter already exists");
2108 		err = -EEXIST;
2109 		goto errout;
2110 	}
2111 
2112 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2113 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2114 		err = -EINVAL;
2115 		goto errout;
2116 	}
2117 
2118 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2119 		flags |= TCA_ACT_FLAGS_REPLACE;
2120 	if (!rtnl_held)
2121 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2122 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2123 			      flags, extack);
2124 	if (err == 0) {
2125 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2126 			       RTM_NEWTFILTER, false, rtnl_held);
2127 		tfilter_put(tp, fh);
2128 		/* q pointer is NULL for shared blocks */
2129 		if (q)
2130 			q->flags &= ~TCQ_F_CAN_BYPASS;
2131 	}
2132 
2133 errout:
2134 	if (err && tp_created)
2135 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2136 errout_tp:
2137 	if (chain) {
2138 		if (tp && !IS_ERR(tp))
2139 			tcf_proto_put(tp, rtnl_held, NULL);
2140 		if (!tp_created)
2141 			tcf_chain_put(chain);
2142 	}
2143 	tcf_block_release(q, block, rtnl_held);
2144 
2145 	if (rtnl_held)
2146 		rtnl_unlock();
2147 
2148 	if (err == -EAGAIN) {
2149 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2150 		 * of target chain.
2151 		 */
2152 		rtnl_held = true;
2153 		/* Replay the request. */
2154 		goto replay;
2155 	}
2156 	return err;
2157 
2158 errout_locked:
2159 	mutex_unlock(&chain->filter_chain_lock);
2160 	goto errout;
2161 }
2162 
2163 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2164 			  struct netlink_ext_ack *extack)
2165 {
2166 	struct net *net = sock_net(skb->sk);
2167 	struct nlattr *tca[TCA_MAX + 1];
2168 	char name[IFNAMSIZ];
2169 	struct tcmsg *t;
2170 	u32 protocol;
2171 	u32 prio;
2172 	u32 parent;
2173 	u32 chain_index;
2174 	struct Qdisc *q = NULL;
2175 	struct tcf_chain_info chain_info;
2176 	struct tcf_chain *chain = NULL;
2177 	struct tcf_block *block = NULL;
2178 	struct tcf_proto *tp = NULL;
2179 	unsigned long cl = 0;
2180 	void *fh = NULL;
2181 	int err;
2182 	bool rtnl_held = false;
2183 
2184 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2185 		return -EPERM;
2186 
2187 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2188 				     rtm_tca_policy, extack);
2189 	if (err < 0)
2190 		return err;
2191 
2192 	t = nlmsg_data(n);
2193 	protocol = TC_H_MIN(t->tcm_info);
2194 	prio = TC_H_MAJ(t->tcm_info);
2195 	parent = t->tcm_parent;
2196 
2197 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2198 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2199 		return -ENOENT;
2200 	}
2201 
2202 	/* Find head of filter chain. */
2203 
2204 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2205 	if (err)
2206 		return err;
2207 
2208 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2209 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2210 		err = -EINVAL;
2211 		goto errout;
2212 	}
2213 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2214 	 * found), qdisc is not unlocked, classifier type is not specified,
2215 	 * classifier is not unlocked.
2216 	 */
2217 	if (!prio ||
2218 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2219 	    !tcf_proto_is_unlocked(name)) {
2220 		rtnl_held = true;
2221 		rtnl_lock();
2222 	}
2223 
2224 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2225 	if (err)
2226 		goto errout;
2227 
2228 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2229 				 extack);
2230 	if (IS_ERR(block)) {
2231 		err = PTR_ERR(block);
2232 		goto errout;
2233 	}
2234 
2235 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2236 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2237 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2238 		err = -EINVAL;
2239 		goto errout;
2240 	}
2241 	chain = tcf_chain_get(block, chain_index, false);
2242 	if (!chain) {
2243 		/* User requested flush on non-existent chain. Nothing to do,
2244 		 * so just return success.
2245 		 */
2246 		if (prio == 0) {
2247 			err = 0;
2248 			goto errout;
2249 		}
2250 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2251 		err = -ENOENT;
2252 		goto errout;
2253 	}
2254 
2255 	if (prio == 0) {
2256 		tfilter_notify_chain(net, skb, block, q, parent, n,
2257 				     chain, RTM_DELTFILTER);
2258 		tcf_chain_flush(chain, rtnl_held);
2259 		err = 0;
2260 		goto errout;
2261 	}
2262 
2263 	mutex_lock(&chain->filter_chain_lock);
2264 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2265 			       prio, false);
2266 	if (!tp || IS_ERR(tp)) {
2267 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2268 		err = tp ? PTR_ERR(tp) : -ENOENT;
2269 		goto errout_locked;
2270 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2271 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2272 		err = -EINVAL;
2273 		goto errout_locked;
2274 	} else if (t->tcm_handle == 0) {
2275 		tcf_proto_signal_destroying(chain, tp);
2276 		tcf_chain_tp_remove(chain, &chain_info, tp);
2277 		mutex_unlock(&chain->filter_chain_lock);
2278 
2279 		tcf_proto_put(tp, rtnl_held, NULL);
2280 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2281 			       RTM_DELTFILTER, false, rtnl_held);
2282 		err = 0;
2283 		goto errout;
2284 	}
2285 	mutex_unlock(&chain->filter_chain_lock);
2286 
2287 	fh = tp->ops->get(tp, t->tcm_handle);
2288 
2289 	if (!fh) {
2290 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2291 		err = -ENOENT;
2292 	} else {
2293 		bool last;
2294 
2295 		err = tfilter_del_notify(net, skb, n, tp, block,
2296 					 q, parent, fh, false, &last,
2297 					 rtnl_held, extack);
2298 
2299 		if (err)
2300 			goto errout;
2301 		if (last)
2302 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2303 	}
2304 
2305 errout:
2306 	if (chain) {
2307 		if (tp && !IS_ERR(tp))
2308 			tcf_proto_put(tp, rtnl_held, NULL);
2309 		tcf_chain_put(chain);
2310 	}
2311 	tcf_block_release(q, block, rtnl_held);
2312 
2313 	if (rtnl_held)
2314 		rtnl_unlock();
2315 
2316 	return err;
2317 
2318 errout_locked:
2319 	mutex_unlock(&chain->filter_chain_lock);
2320 	goto errout;
2321 }
2322 
2323 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2324 			  struct netlink_ext_ack *extack)
2325 {
2326 	struct net *net = sock_net(skb->sk);
2327 	struct nlattr *tca[TCA_MAX + 1];
2328 	char name[IFNAMSIZ];
2329 	struct tcmsg *t;
2330 	u32 protocol;
2331 	u32 prio;
2332 	u32 parent;
2333 	u32 chain_index;
2334 	struct Qdisc *q = NULL;
2335 	struct tcf_chain_info chain_info;
2336 	struct tcf_chain *chain = NULL;
2337 	struct tcf_block *block = NULL;
2338 	struct tcf_proto *tp = NULL;
2339 	unsigned long cl = 0;
2340 	void *fh = NULL;
2341 	int err;
2342 	bool rtnl_held = false;
2343 
2344 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2345 				     rtm_tca_policy, extack);
2346 	if (err < 0)
2347 		return err;
2348 
2349 	t = nlmsg_data(n);
2350 	protocol = TC_H_MIN(t->tcm_info);
2351 	prio = TC_H_MAJ(t->tcm_info);
2352 	parent = t->tcm_parent;
2353 
2354 	if (prio == 0) {
2355 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2356 		return -ENOENT;
2357 	}
2358 
2359 	/* Find head of filter chain. */
2360 
2361 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2362 	if (err)
2363 		return err;
2364 
2365 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2366 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2367 		err = -EINVAL;
2368 		goto errout;
2369 	}
2370 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2371 	 * unlocked, classifier type is not specified, classifier is not
2372 	 * unlocked.
2373 	 */
2374 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2375 	    !tcf_proto_is_unlocked(name)) {
2376 		rtnl_held = true;
2377 		rtnl_lock();
2378 	}
2379 
2380 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2381 	if (err)
2382 		goto errout;
2383 
2384 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2385 				 extack);
2386 	if (IS_ERR(block)) {
2387 		err = PTR_ERR(block);
2388 		goto errout;
2389 	}
2390 
2391 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2392 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2393 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2394 		err = -EINVAL;
2395 		goto errout;
2396 	}
2397 	chain = tcf_chain_get(block, chain_index, false);
2398 	if (!chain) {
2399 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2400 		err = -EINVAL;
2401 		goto errout;
2402 	}
2403 
2404 	mutex_lock(&chain->filter_chain_lock);
2405 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2406 			       prio, false);
2407 	mutex_unlock(&chain->filter_chain_lock);
2408 	if (!tp || IS_ERR(tp)) {
2409 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2410 		err = tp ? PTR_ERR(tp) : -ENOENT;
2411 		goto errout;
2412 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2413 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2414 		err = -EINVAL;
2415 		goto errout;
2416 	}
2417 
2418 	fh = tp->ops->get(tp, t->tcm_handle);
2419 
2420 	if (!fh) {
2421 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2422 		err = -ENOENT;
2423 	} else {
2424 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2425 				     fh, RTM_NEWTFILTER, true, rtnl_held);
2426 		if (err < 0)
2427 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2428 	}
2429 
2430 	tfilter_put(tp, fh);
2431 errout:
2432 	if (chain) {
2433 		if (tp && !IS_ERR(tp))
2434 			tcf_proto_put(tp, rtnl_held, NULL);
2435 		tcf_chain_put(chain);
2436 	}
2437 	tcf_block_release(q, block, rtnl_held);
2438 
2439 	if (rtnl_held)
2440 		rtnl_unlock();
2441 
2442 	return err;
2443 }
2444 
2445 struct tcf_dump_args {
2446 	struct tcf_walker w;
2447 	struct sk_buff *skb;
2448 	struct netlink_callback *cb;
2449 	struct tcf_block *block;
2450 	struct Qdisc *q;
2451 	u32 parent;
2452 	bool terse_dump;
2453 };
2454 
2455 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2456 {
2457 	struct tcf_dump_args *a = (void *)arg;
2458 	struct net *net = sock_net(a->skb->sk);
2459 
2460 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2461 			     n, NETLINK_CB(a->cb->skb).portid,
2462 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2463 			     RTM_NEWTFILTER, a->terse_dump, true);
2464 }
2465 
2466 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2467 			   struct sk_buff *skb, struct netlink_callback *cb,
2468 			   long index_start, long *p_index, bool terse)
2469 {
2470 	struct net *net = sock_net(skb->sk);
2471 	struct tcf_block *block = chain->block;
2472 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2473 	struct tcf_proto *tp, *tp_prev;
2474 	struct tcf_dump_args arg;
2475 
2476 	for (tp = __tcf_get_next_proto(chain, NULL);
2477 	     tp;
2478 	     tp_prev = tp,
2479 		     tp = __tcf_get_next_proto(chain, tp),
2480 		     tcf_proto_put(tp_prev, true, NULL),
2481 		     (*p_index)++) {
2482 		if (*p_index < index_start)
2483 			continue;
2484 		if (TC_H_MAJ(tcm->tcm_info) &&
2485 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2486 			continue;
2487 		if (TC_H_MIN(tcm->tcm_info) &&
2488 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2489 			continue;
2490 		if (*p_index > index_start)
2491 			memset(&cb->args[1], 0,
2492 			       sizeof(cb->args) - sizeof(cb->args[0]));
2493 		if (cb->args[1] == 0) {
2494 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2495 					  NETLINK_CB(cb->skb).portid,
2496 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2497 					  RTM_NEWTFILTER, false, true) <= 0)
2498 				goto errout;
2499 			cb->args[1] = 1;
2500 		}
2501 		if (!tp->ops->walk)
2502 			continue;
2503 		arg.w.fn = tcf_node_dump;
2504 		arg.skb = skb;
2505 		arg.cb = cb;
2506 		arg.block = block;
2507 		arg.q = q;
2508 		arg.parent = parent;
2509 		arg.w.stop = 0;
2510 		arg.w.skip = cb->args[1] - 1;
2511 		arg.w.count = 0;
2512 		arg.w.cookie = cb->args[2];
2513 		arg.terse_dump = terse;
2514 		tp->ops->walk(tp, &arg.w, true);
2515 		cb->args[2] = arg.w.cookie;
2516 		cb->args[1] = arg.w.count + 1;
2517 		if (arg.w.stop)
2518 			goto errout;
2519 	}
2520 	return true;
2521 
2522 errout:
2523 	tcf_proto_put(tp, true, NULL);
2524 	return false;
2525 }
2526 
2527 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2528 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2529 };
2530 
2531 /* called with RTNL */
2532 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2533 {
2534 	struct tcf_chain *chain, *chain_prev;
2535 	struct net *net = sock_net(skb->sk);
2536 	struct nlattr *tca[TCA_MAX + 1];
2537 	struct Qdisc *q = NULL;
2538 	struct tcf_block *block;
2539 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2540 	bool terse_dump = false;
2541 	long index_start;
2542 	long index;
2543 	u32 parent;
2544 	int err;
2545 
2546 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2547 		return skb->len;
2548 
2549 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2550 				     tcf_tfilter_dump_policy, cb->extack);
2551 	if (err)
2552 		return err;
2553 
2554 	if (tca[TCA_DUMP_FLAGS]) {
2555 		struct nla_bitfield32 flags =
2556 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2557 
2558 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2559 	}
2560 
2561 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2562 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2563 		if (!block)
2564 			goto out;
2565 		/* If we work with block index, q is NULL and parent value
2566 		 * will never be used in the following code. The check
2567 		 * in tcf_fill_node prevents it. However, compiler does not
2568 		 * see that far, so set parent to zero to silence the warning
2569 		 * about parent being uninitialized.
2570 		 */
2571 		parent = 0;
2572 	} else {
2573 		const struct Qdisc_class_ops *cops;
2574 		struct net_device *dev;
2575 		unsigned long cl = 0;
2576 
2577 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2578 		if (!dev)
2579 			return skb->len;
2580 
2581 		parent = tcm->tcm_parent;
2582 		if (!parent)
2583 			q = dev->qdisc;
2584 		else
2585 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2586 		if (!q)
2587 			goto out;
2588 		cops = q->ops->cl_ops;
2589 		if (!cops)
2590 			goto out;
2591 		if (!cops->tcf_block)
2592 			goto out;
2593 		if (TC_H_MIN(tcm->tcm_parent)) {
2594 			cl = cops->find(q, tcm->tcm_parent);
2595 			if (cl == 0)
2596 				goto out;
2597 		}
2598 		block = cops->tcf_block(q, cl, NULL);
2599 		if (!block)
2600 			goto out;
2601 		parent = block->classid;
2602 		if (tcf_block_shared(block))
2603 			q = NULL;
2604 	}
2605 
2606 	index_start = cb->args[0];
2607 	index = 0;
2608 
2609 	for (chain = __tcf_get_next_chain(block, NULL);
2610 	     chain;
2611 	     chain_prev = chain,
2612 		     chain = __tcf_get_next_chain(block, chain),
2613 		     tcf_chain_put(chain_prev)) {
2614 		if (tca[TCA_CHAIN] &&
2615 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2616 			continue;
2617 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2618 				    index_start, &index, terse_dump)) {
2619 			tcf_chain_put(chain);
2620 			err = -EMSGSIZE;
2621 			break;
2622 		}
2623 	}
2624 
2625 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2626 		tcf_block_refcnt_put(block, true);
2627 	cb->args[0] = index;
2628 
2629 out:
2630 	/* If we did no progress, the error (EMSGSIZE) is real */
2631 	if (skb->len == 0 && err)
2632 		return err;
2633 	return skb->len;
2634 }
2635 
2636 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2637 			      void *tmplt_priv, u32 chain_index,
2638 			      struct net *net, struct sk_buff *skb,
2639 			      struct tcf_block *block,
2640 			      u32 portid, u32 seq, u16 flags, int event)
2641 {
2642 	unsigned char *b = skb_tail_pointer(skb);
2643 	const struct tcf_proto_ops *ops;
2644 	struct nlmsghdr *nlh;
2645 	struct tcmsg *tcm;
2646 	void *priv;
2647 
2648 	ops = tmplt_ops;
2649 	priv = tmplt_priv;
2650 
2651 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2652 	if (!nlh)
2653 		goto out_nlmsg_trim;
2654 	tcm = nlmsg_data(nlh);
2655 	tcm->tcm_family = AF_UNSPEC;
2656 	tcm->tcm__pad1 = 0;
2657 	tcm->tcm__pad2 = 0;
2658 	tcm->tcm_handle = 0;
2659 	if (block->q) {
2660 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2661 		tcm->tcm_parent = block->q->handle;
2662 	} else {
2663 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2664 		tcm->tcm_block_index = block->index;
2665 	}
2666 
2667 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2668 		goto nla_put_failure;
2669 
2670 	if (ops) {
2671 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2672 			goto nla_put_failure;
2673 		if (ops->tmplt_dump(skb, net, priv) < 0)
2674 			goto nla_put_failure;
2675 	}
2676 
2677 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2678 	return skb->len;
2679 
2680 out_nlmsg_trim:
2681 nla_put_failure:
2682 	nlmsg_trim(skb, b);
2683 	return -EMSGSIZE;
2684 }
2685 
2686 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2687 			   u32 seq, u16 flags, int event, bool unicast)
2688 {
2689 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2690 	struct tcf_block *block = chain->block;
2691 	struct net *net = block->net;
2692 	struct sk_buff *skb;
2693 	int err = 0;
2694 
2695 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2696 	if (!skb)
2697 		return -ENOBUFS;
2698 
2699 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2700 			       chain->index, net, skb, block, portid,
2701 			       seq, flags, event) <= 0) {
2702 		kfree_skb(skb);
2703 		return -EINVAL;
2704 	}
2705 
2706 	if (unicast)
2707 		err = rtnl_unicast(skb, net, portid);
2708 	else
2709 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2710 				     flags & NLM_F_ECHO);
2711 
2712 	return err;
2713 }
2714 
2715 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2716 				  void *tmplt_priv, u32 chain_index,
2717 				  struct tcf_block *block, struct sk_buff *oskb,
2718 				  u32 seq, u16 flags, bool unicast)
2719 {
2720 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2721 	struct net *net = block->net;
2722 	struct sk_buff *skb;
2723 
2724 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2725 	if (!skb)
2726 		return -ENOBUFS;
2727 
2728 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2729 			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2730 		kfree_skb(skb);
2731 		return -EINVAL;
2732 	}
2733 
2734 	if (unicast)
2735 		return rtnl_unicast(skb, net, portid);
2736 
2737 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2738 }
2739 
2740 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2741 			      struct nlattr **tca,
2742 			      struct netlink_ext_ack *extack)
2743 {
2744 	const struct tcf_proto_ops *ops;
2745 	char name[IFNAMSIZ];
2746 	void *tmplt_priv;
2747 
2748 	/* If kind is not set, user did not specify template. */
2749 	if (!tca[TCA_KIND])
2750 		return 0;
2751 
2752 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2753 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2754 		return -EINVAL;
2755 	}
2756 
2757 	ops = tcf_proto_lookup_ops(name, true, extack);
2758 	if (IS_ERR(ops))
2759 		return PTR_ERR(ops);
2760 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2761 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2762 		return -EOPNOTSUPP;
2763 	}
2764 
2765 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2766 	if (IS_ERR(tmplt_priv)) {
2767 		module_put(ops->owner);
2768 		return PTR_ERR(tmplt_priv);
2769 	}
2770 	chain->tmplt_ops = ops;
2771 	chain->tmplt_priv = tmplt_priv;
2772 	return 0;
2773 }
2774 
2775 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2776 			       void *tmplt_priv)
2777 {
2778 	/* If template ops are set, no work to do for us. */
2779 	if (!tmplt_ops)
2780 		return;
2781 
2782 	tmplt_ops->tmplt_destroy(tmplt_priv);
2783 	module_put(tmplt_ops->owner);
2784 }
2785 
2786 /* Add/delete/get a chain */
2787 
2788 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2789 			struct netlink_ext_ack *extack)
2790 {
2791 	struct net *net = sock_net(skb->sk);
2792 	struct nlattr *tca[TCA_MAX + 1];
2793 	struct tcmsg *t;
2794 	u32 parent;
2795 	u32 chain_index;
2796 	struct Qdisc *q = NULL;
2797 	struct tcf_chain *chain = NULL;
2798 	struct tcf_block *block;
2799 	unsigned long cl;
2800 	int err;
2801 
2802 	if (n->nlmsg_type != RTM_GETCHAIN &&
2803 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2804 		return -EPERM;
2805 
2806 replay:
2807 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2808 				     rtm_tca_policy, extack);
2809 	if (err < 0)
2810 		return err;
2811 
2812 	t = nlmsg_data(n);
2813 	parent = t->tcm_parent;
2814 	cl = 0;
2815 
2816 	block = tcf_block_find(net, &q, &parent, &cl,
2817 			       t->tcm_ifindex, t->tcm_block_index, extack);
2818 	if (IS_ERR(block))
2819 		return PTR_ERR(block);
2820 
2821 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2822 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2823 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2824 		err = -EINVAL;
2825 		goto errout_block;
2826 	}
2827 
2828 	mutex_lock(&block->lock);
2829 	chain = tcf_chain_lookup(block, chain_index);
2830 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2831 		if (chain) {
2832 			if (tcf_chain_held_by_acts_only(chain)) {
2833 				/* The chain exists only because there is
2834 				 * some action referencing it.
2835 				 */
2836 				tcf_chain_hold(chain);
2837 			} else {
2838 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2839 				err = -EEXIST;
2840 				goto errout_block_locked;
2841 			}
2842 		} else {
2843 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2844 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2845 				err = -ENOENT;
2846 				goto errout_block_locked;
2847 			}
2848 			chain = tcf_chain_create(block, chain_index);
2849 			if (!chain) {
2850 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2851 				err = -ENOMEM;
2852 				goto errout_block_locked;
2853 			}
2854 		}
2855 	} else {
2856 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2857 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2858 			err = -EINVAL;
2859 			goto errout_block_locked;
2860 		}
2861 		tcf_chain_hold(chain);
2862 	}
2863 
2864 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2865 		/* Modifying chain requires holding parent block lock. In case
2866 		 * the chain was successfully added, take a reference to the
2867 		 * chain. This ensures that an empty chain does not disappear at
2868 		 * the end of this function.
2869 		 */
2870 		tcf_chain_hold(chain);
2871 		chain->explicitly_created = true;
2872 	}
2873 	mutex_unlock(&block->lock);
2874 
2875 	switch (n->nlmsg_type) {
2876 	case RTM_NEWCHAIN:
2877 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2878 		if (err) {
2879 			tcf_chain_put_explicitly_created(chain);
2880 			goto errout;
2881 		}
2882 
2883 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2884 				RTM_NEWCHAIN, false);
2885 		break;
2886 	case RTM_DELCHAIN:
2887 		tfilter_notify_chain(net, skb, block, q, parent, n,
2888 				     chain, RTM_DELTFILTER);
2889 		/* Flush the chain first as the user requested chain removal. */
2890 		tcf_chain_flush(chain, true);
2891 		/* In case the chain was successfully deleted, put a reference
2892 		 * to the chain previously taken during addition.
2893 		 */
2894 		tcf_chain_put_explicitly_created(chain);
2895 		break;
2896 	case RTM_GETCHAIN:
2897 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2898 				      n->nlmsg_flags, n->nlmsg_type, true);
2899 		if (err < 0)
2900 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2901 		break;
2902 	default:
2903 		err = -EOPNOTSUPP;
2904 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2905 		goto errout;
2906 	}
2907 
2908 errout:
2909 	tcf_chain_put(chain);
2910 errout_block:
2911 	tcf_block_release(q, block, true);
2912 	if (err == -EAGAIN)
2913 		/* Replay the request. */
2914 		goto replay;
2915 	return err;
2916 
2917 errout_block_locked:
2918 	mutex_unlock(&block->lock);
2919 	goto errout_block;
2920 }
2921 
2922 /* called with RTNL */
2923 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2924 {
2925 	struct net *net = sock_net(skb->sk);
2926 	struct nlattr *tca[TCA_MAX + 1];
2927 	struct Qdisc *q = NULL;
2928 	struct tcf_block *block;
2929 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2930 	struct tcf_chain *chain;
2931 	long index_start;
2932 	long index;
2933 	int err;
2934 
2935 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2936 		return skb->len;
2937 
2938 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2939 				     rtm_tca_policy, cb->extack);
2940 	if (err)
2941 		return err;
2942 
2943 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2944 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2945 		if (!block)
2946 			goto out;
2947 	} else {
2948 		const struct Qdisc_class_ops *cops;
2949 		struct net_device *dev;
2950 		unsigned long cl = 0;
2951 
2952 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2953 		if (!dev)
2954 			return skb->len;
2955 
2956 		if (!tcm->tcm_parent)
2957 			q = dev->qdisc;
2958 		else
2959 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2960 
2961 		if (!q)
2962 			goto out;
2963 		cops = q->ops->cl_ops;
2964 		if (!cops)
2965 			goto out;
2966 		if (!cops->tcf_block)
2967 			goto out;
2968 		if (TC_H_MIN(tcm->tcm_parent)) {
2969 			cl = cops->find(q, tcm->tcm_parent);
2970 			if (cl == 0)
2971 				goto out;
2972 		}
2973 		block = cops->tcf_block(q, cl, NULL);
2974 		if (!block)
2975 			goto out;
2976 		if (tcf_block_shared(block))
2977 			q = NULL;
2978 	}
2979 
2980 	index_start = cb->args[0];
2981 	index = 0;
2982 
2983 	mutex_lock(&block->lock);
2984 	list_for_each_entry(chain, &block->chain_list, list) {
2985 		if ((tca[TCA_CHAIN] &&
2986 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2987 			continue;
2988 		if (index < index_start) {
2989 			index++;
2990 			continue;
2991 		}
2992 		if (tcf_chain_held_by_acts_only(chain))
2993 			continue;
2994 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2995 					 chain->index, net, skb, block,
2996 					 NETLINK_CB(cb->skb).portid,
2997 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2998 					 RTM_NEWCHAIN);
2999 		if (err <= 0)
3000 			break;
3001 		index++;
3002 	}
3003 	mutex_unlock(&block->lock);
3004 
3005 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3006 		tcf_block_refcnt_put(block, true);
3007 	cb->args[0] = index;
3008 
3009 out:
3010 	/* If we did no progress, the error (EMSGSIZE) is real */
3011 	if (skb->len == 0 && err)
3012 		return err;
3013 	return skb->len;
3014 }
3015 
3016 void tcf_exts_destroy(struct tcf_exts *exts)
3017 {
3018 #ifdef CONFIG_NET_CLS_ACT
3019 	if (exts->actions) {
3020 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3021 		kfree(exts->actions);
3022 	}
3023 	exts->nr_actions = 0;
3024 #endif
3025 }
3026 EXPORT_SYMBOL(tcf_exts_destroy);
3027 
3028 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3029 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3030 		      u32 flags, struct netlink_ext_ack *extack)
3031 {
3032 #ifdef CONFIG_NET_CLS_ACT
3033 	{
3034 		int init_res[TCA_ACT_MAX_PRIO] = {};
3035 		struct tc_action *act;
3036 		size_t attr_size = 0;
3037 
3038 		if (exts->police && tb[exts->police]) {
3039 			struct tc_action_ops *a_o;
3040 
3041 			a_o = tc_action_load_ops(tb[exts->police], true,
3042 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3043 						 extack);
3044 			if (IS_ERR(a_o))
3045 				return PTR_ERR(a_o);
3046 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3047 			act = tcf_action_init_1(net, tp, tb[exts->police],
3048 						rate_tlv, a_o, init_res, flags,
3049 						extack);
3050 			module_put(a_o->owner);
3051 			if (IS_ERR(act))
3052 				return PTR_ERR(act);
3053 
3054 			act->type = exts->type = TCA_OLD_COMPAT;
3055 			exts->actions[0] = act;
3056 			exts->nr_actions = 1;
3057 			tcf_idr_insert_many(exts->actions);
3058 		} else if (exts->action && tb[exts->action]) {
3059 			int err;
3060 
3061 			flags |= TCA_ACT_FLAGS_BIND;
3062 			err = tcf_action_init(net, tp, tb[exts->action],
3063 					      rate_tlv, exts->actions, init_res,
3064 					      &attr_size, flags, extack);
3065 			if (err < 0)
3066 				return err;
3067 			exts->nr_actions = err;
3068 		}
3069 	}
3070 #else
3071 	if ((exts->action && tb[exts->action]) ||
3072 	    (exts->police && tb[exts->police])) {
3073 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3074 		return -EOPNOTSUPP;
3075 	}
3076 #endif
3077 
3078 	return 0;
3079 }
3080 EXPORT_SYMBOL(tcf_exts_validate);
3081 
3082 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3083 {
3084 #ifdef CONFIG_NET_CLS_ACT
3085 	struct tcf_exts old = *dst;
3086 
3087 	*dst = *src;
3088 	tcf_exts_destroy(&old);
3089 #endif
3090 }
3091 EXPORT_SYMBOL(tcf_exts_change);
3092 
3093 #ifdef CONFIG_NET_CLS_ACT
3094 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3095 {
3096 	if (exts->nr_actions == 0)
3097 		return NULL;
3098 	else
3099 		return exts->actions[0];
3100 }
3101 #endif
3102 
3103 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3104 {
3105 #ifdef CONFIG_NET_CLS_ACT
3106 	struct nlattr *nest;
3107 
3108 	if (exts->action && tcf_exts_has_actions(exts)) {
3109 		/*
3110 		 * again for backward compatible mode - we want
3111 		 * to work with both old and new modes of entering
3112 		 * tc data even if iproute2  was newer - jhs
3113 		 */
3114 		if (exts->type != TCA_OLD_COMPAT) {
3115 			nest = nla_nest_start_noflag(skb, exts->action);
3116 			if (nest == NULL)
3117 				goto nla_put_failure;
3118 
3119 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3120 			    < 0)
3121 				goto nla_put_failure;
3122 			nla_nest_end(skb, nest);
3123 		} else if (exts->police) {
3124 			struct tc_action *act = tcf_exts_first_act(exts);
3125 			nest = nla_nest_start_noflag(skb, exts->police);
3126 			if (nest == NULL || !act)
3127 				goto nla_put_failure;
3128 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3129 				goto nla_put_failure;
3130 			nla_nest_end(skb, nest);
3131 		}
3132 	}
3133 	return 0;
3134 
3135 nla_put_failure:
3136 	nla_nest_cancel(skb, nest);
3137 	return -1;
3138 #else
3139 	return 0;
3140 #endif
3141 }
3142 EXPORT_SYMBOL(tcf_exts_dump);
3143 
3144 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3145 {
3146 #ifdef CONFIG_NET_CLS_ACT
3147 	struct nlattr *nest;
3148 
3149 	if (!exts->action || !tcf_exts_has_actions(exts))
3150 		return 0;
3151 
3152 	nest = nla_nest_start_noflag(skb, exts->action);
3153 	if (!nest)
3154 		goto nla_put_failure;
3155 
3156 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3157 		goto nla_put_failure;
3158 	nla_nest_end(skb, nest);
3159 	return 0;
3160 
3161 nla_put_failure:
3162 	nla_nest_cancel(skb, nest);
3163 	return -1;
3164 #else
3165 	return 0;
3166 #endif
3167 }
3168 EXPORT_SYMBOL(tcf_exts_terse_dump);
3169 
3170 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3171 {
3172 #ifdef CONFIG_NET_CLS_ACT
3173 	struct tc_action *a = tcf_exts_first_act(exts);
3174 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3175 		return -1;
3176 #endif
3177 	return 0;
3178 }
3179 EXPORT_SYMBOL(tcf_exts_dump_stats);
3180 
3181 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3182 {
3183 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3184 		return;
3185 	*flags |= TCA_CLS_FLAGS_IN_HW;
3186 	atomic_inc(&block->offloadcnt);
3187 }
3188 
3189 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3190 {
3191 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3192 		return;
3193 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3194 	atomic_dec(&block->offloadcnt);
3195 }
3196 
3197 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3198 				      struct tcf_proto *tp, u32 *cnt,
3199 				      u32 *flags, u32 diff, bool add)
3200 {
3201 	lockdep_assert_held(&block->cb_lock);
3202 
3203 	spin_lock(&tp->lock);
3204 	if (add) {
3205 		if (!*cnt)
3206 			tcf_block_offload_inc(block, flags);
3207 		*cnt += diff;
3208 	} else {
3209 		*cnt -= diff;
3210 		if (!*cnt)
3211 			tcf_block_offload_dec(block, flags);
3212 	}
3213 	spin_unlock(&tp->lock);
3214 }
3215 
3216 static void
3217 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3218 			 u32 *cnt, u32 *flags)
3219 {
3220 	lockdep_assert_held(&block->cb_lock);
3221 
3222 	spin_lock(&tp->lock);
3223 	tcf_block_offload_dec(block, flags);
3224 	*cnt = 0;
3225 	spin_unlock(&tp->lock);
3226 }
3227 
3228 static int
3229 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3230 		   void *type_data, bool err_stop)
3231 {
3232 	struct flow_block_cb *block_cb;
3233 	int ok_count = 0;
3234 	int err;
3235 
3236 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3237 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3238 		if (err) {
3239 			if (err_stop)
3240 				return err;
3241 		} else {
3242 			ok_count++;
3243 		}
3244 	}
3245 	return ok_count;
3246 }
3247 
3248 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3249 		     void *type_data, bool err_stop, bool rtnl_held)
3250 {
3251 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3252 	int ok_count;
3253 
3254 retry:
3255 	if (take_rtnl)
3256 		rtnl_lock();
3257 	down_read(&block->cb_lock);
3258 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3259 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3260 	 * obtain the locks in same order here.
3261 	 */
3262 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3263 		up_read(&block->cb_lock);
3264 		take_rtnl = true;
3265 		goto retry;
3266 	}
3267 
3268 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3269 
3270 	up_read(&block->cb_lock);
3271 	if (take_rtnl)
3272 		rtnl_unlock();
3273 	return ok_count;
3274 }
3275 EXPORT_SYMBOL(tc_setup_cb_call);
3276 
3277 /* Non-destructive filter add. If filter that wasn't already in hardware is
3278  * successfully offloaded, increment block offloads counter. On failure,
3279  * previously offloaded filter is considered to be intact and offloads counter
3280  * is not decremented.
3281  */
3282 
3283 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3284 		    enum tc_setup_type type, void *type_data, bool err_stop,
3285 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3286 {
3287 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3288 	int ok_count;
3289 
3290 retry:
3291 	if (take_rtnl)
3292 		rtnl_lock();
3293 	down_read(&block->cb_lock);
3294 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3295 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3296 	 * obtain the locks in same order here.
3297 	 */
3298 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3299 		up_read(&block->cb_lock);
3300 		take_rtnl = true;
3301 		goto retry;
3302 	}
3303 
3304 	/* Make sure all netdevs sharing this block are offload-capable. */
3305 	if (block->nooffloaddevcnt && err_stop) {
3306 		ok_count = -EOPNOTSUPP;
3307 		goto err_unlock;
3308 	}
3309 
3310 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3311 	if (ok_count < 0)
3312 		goto err_unlock;
3313 
3314 	if (tp->ops->hw_add)
3315 		tp->ops->hw_add(tp, type_data);
3316 	if (ok_count > 0)
3317 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3318 					  ok_count, true);
3319 err_unlock:
3320 	up_read(&block->cb_lock);
3321 	if (take_rtnl)
3322 		rtnl_unlock();
3323 	return ok_count < 0 ? ok_count : 0;
3324 }
3325 EXPORT_SYMBOL(tc_setup_cb_add);
3326 
3327 /* Destructive filter replace. If filter that wasn't already in hardware is
3328  * successfully offloaded, increment block offload counter. On failure,
3329  * previously offloaded filter is considered to be destroyed and offload counter
3330  * is decremented.
3331  */
3332 
3333 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3334 			enum tc_setup_type type, void *type_data, bool err_stop,
3335 			u32 *old_flags, unsigned int *old_in_hw_count,
3336 			u32 *new_flags, unsigned int *new_in_hw_count,
3337 			bool rtnl_held)
3338 {
3339 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3340 	int ok_count;
3341 
3342 retry:
3343 	if (take_rtnl)
3344 		rtnl_lock();
3345 	down_read(&block->cb_lock);
3346 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3347 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3348 	 * obtain the locks in same order here.
3349 	 */
3350 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3351 		up_read(&block->cb_lock);
3352 		take_rtnl = true;
3353 		goto retry;
3354 	}
3355 
3356 	/* Make sure all netdevs sharing this block are offload-capable. */
3357 	if (block->nooffloaddevcnt && err_stop) {
3358 		ok_count = -EOPNOTSUPP;
3359 		goto err_unlock;
3360 	}
3361 
3362 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3363 	if (tp->ops->hw_del)
3364 		tp->ops->hw_del(tp, type_data);
3365 
3366 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3367 	if (ok_count < 0)
3368 		goto err_unlock;
3369 
3370 	if (tp->ops->hw_add)
3371 		tp->ops->hw_add(tp, type_data);
3372 	if (ok_count > 0)
3373 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3374 					  new_flags, ok_count, true);
3375 err_unlock:
3376 	up_read(&block->cb_lock);
3377 	if (take_rtnl)
3378 		rtnl_unlock();
3379 	return ok_count < 0 ? ok_count : 0;
3380 }
3381 EXPORT_SYMBOL(tc_setup_cb_replace);
3382 
3383 /* Destroy filter and decrement block offload counter, if filter was previously
3384  * offloaded.
3385  */
3386 
3387 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3388 			enum tc_setup_type type, void *type_data, bool err_stop,
3389 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3390 {
3391 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3392 	int ok_count;
3393 
3394 retry:
3395 	if (take_rtnl)
3396 		rtnl_lock();
3397 	down_read(&block->cb_lock);
3398 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3399 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3400 	 * obtain the locks in same order here.
3401 	 */
3402 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3403 		up_read(&block->cb_lock);
3404 		take_rtnl = true;
3405 		goto retry;
3406 	}
3407 
3408 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3409 
3410 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3411 	if (tp->ops->hw_del)
3412 		tp->ops->hw_del(tp, type_data);
3413 
3414 	up_read(&block->cb_lock);
3415 	if (take_rtnl)
3416 		rtnl_unlock();
3417 	return ok_count < 0 ? ok_count : 0;
3418 }
3419 EXPORT_SYMBOL(tc_setup_cb_destroy);
3420 
3421 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3422 			  bool add, flow_setup_cb_t *cb,
3423 			  enum tc_setup_type type, void *type_data,
3424 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3425 {
3426 	int err = cb(type, type_data, cb_priv);
3427 
3428 	if (err) {
3429 		if (add && tc_skip_sw(*flags))
3430 			return err;
3431 	} else {
3432 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3433 					  add);
3434 	}
3435 
3436 	return 0;
3437 }
3438 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3439 
3440 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3441 			      const struct tc_action *act)
3442 {
3443 	struct tc_cookie *cookie;
3444 	int err = 0;
3445 
3446 	rcu_read_lock();
3447 	cookie = rcu_dereference(act->act_cookie);
3448 	if (cookie) {
3449 		entry->cookie = flow_action_cookie_create(cookie->data,
3450 							  cookie->len,
3451 							  GFP_ATOMIC);
3452 		if (!entry->cookie)
3453 			err = -ENOMEM;
3454 	}
3455 	rcu_read_unlock();
3456 	return err;
3457 }
3458 
3459 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3460 {
3461 	flow_action_cookie_destroy(entry->cookie);
3462 }
3463 
3464 void tc_cleanup_flow_action(struct flow_action *flow_action)
3465 {
3466 	struct flow_action_entry *entry;
3467 	int i;
3468 
3469 	flow_action_for_each(i, entry, flow_action) {
3470 		tcf_act_put_cookie(entry);
3471 		if (entry->destructor)
3472 			entry->destructor(entry->destructor_priv);
3473 	}
3474 }
3475 EXPORT_SYMBOL(tc_cleanup_flow_action);
3476 
3477 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3478 			       const struct tc_action *act)
3479 {
3480 #ifdef CONFIG_NET_CLS_ACT
3481 	entry->dev = act->ops->get_dev(act, &entry->destructor);
3482 	if (!entry->dev)
3483 		return;
3484 	entry->destructor_priv = entry->dev;
3485 #endif
3486 }
3487 
3488 static void tcf_tunnel_encap_put_tunnel(void *priv)
3489 {
3490 	struct ip_tunnel_info *tunnel = priv;
3491 
3492 	kfree(tunnel);
3493 }
3494 
3495 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3496 				       const struct tc_action *act)
3497 {
3498 	entry->tunnel = tcf_tunnel_info_copy(act);
3499 	if (!entry->tunnel)
3500 		return -ENOMEM;
3501 	entry->destructor = tcf_tunnel_encap_put_tunnel;
3502 	entry->destructor_priv = entry->tunnel;
3503 	return 0;
3504 }
3505 
3506 static void tcf_sample_get_group(struct flow_action_entry *entry,
3507 				 const struct tc_action *act)
3508 {
3509 #ifdef CONFIG_NET_CLS_ACT
3510 	entry->sample.psample_group =
3511 		act->ops->get_psample_group(act, &entry->destructor);
3512 	entry->destructor_priv = entry->sample.psample_group;
3513 #endif
3514 }
3515 
3516 static void tcf_gate_entry_destructor(void *priv)
3517 {
3518 	struct action_gate_entry *oe = priv;
3519 
3520 	kfree(oe);
3521 }
3522 
3523 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3524 				const struct tc_action *act)
3525 {
3526 	entry->gate.entries = tcf_gate_get_list(act);
3527 
3528 	if (!entry->gate.entries)
3529 		return -EINVAL;
3530 
3531 	entry->destructor = tcf_gate_entry_destructor;
3532 	entry->destructor_priv = entry->gate.entries;
3533 
3534 	return 0;
3535 }
3536 
3537 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3538 {
3539 	if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3540 		return FLOW_ACTION_HW_STATS_DONT_CARE;
3541 	else if (!hw_stats)
3542 		return FLOW_ACTION_HW_STATS_DISABLED;
3543 
3544 	return hw_stats;
3545 }
3546 
3547 int tc_setup_flow_action(struct flow_action *flow_action,
3548 			 const struct tcf_exts *exts)
3549 {
3550 	struct tc_action *act;
3551 	int i, j, k, err = 0;
3552 
3553 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3554 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3555 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3556 
3557 	if (!exts)
3558 		return 0;
3559 
3560 	j = 0;
3561 	tcf_exts_for_each_action(i, act, exts) {
3562 		struct flow_action_entry *entry;
3563 
3564 		entry = &flow_action->entries[j];
3565 		spin_lock_bh(&act->tcfa_lock);
3566 		err = tcf_act_get_cookie(entry, act);
3567 		if (err)
3568 			goto err_out_locked;
3569 
3570 		entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3571 
3572 		if (is_tcf_gact_ok(act)) {
3573 			entry->id = FLOW_ACTION_ACCEPT;
3574 		} else if (is_tcf_gact_shot(act)) {
3575 			entry->id = FLOW_ACTION_DROP;
3576 		} else if (is_tcf_gact_trap(act)) {
3577 			entry->id = FLOW_ACTION_TRAP;
3578 		} else if (is_tcf_gact_goto_chain(act)) {
3579 			entry->id = FLOW_ACTION_GOTO;
3580 			entry->chain_index = tcf_gact_goto_chain_index(act);
3581 		} else if (is_tcf_mirred_egress_redirect(act)) {
3582 			entry->id = FLOW_ACTION_REDIRECT;
3583 			tcf_mirred_get_dev(entry, act);
3584 		} else if (is_tcf_mirred_egress_mirror(act)) {
3585 			entry->id = FLOW_ACTION_MIRRED;
3586 			tcf_mirred_get_dev(entry, act);
3587 		} else if (is_tcf_mirred_ingress_redirect(act)) {
3588 			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3589 			tcf_mirred_get_dev(entry, act);
3590 		} else if (is_tcf_mirred_ingress_mirror(act)) {
3591 			entry->id = FLOW_ACTION_MIRRED_INGRESS;
3592 			tcf_mirred_get_dev(entry, act);
3593 		} else if (is_tcf_vlan(act)) {
3594 			switch (tcf_vlan_action(act)) {
3595 			case TCA_VLAN_ACT_PUSH:
3596 				entry->id = FLOW_ACTION_VLAN_PUSH;
3597 				entry->vlan.vid = tcf_vlan_push_vid(act);
3598 				entry->vlan.proto = tcf_vlan_push_proto(act);
3599 				entry->vlan.prio = tcf_vlan_push_prio(act);
3600 				break;
3601 			case TCA_VLAN_ACT_POP:
3602 				entry->id = FLOW_ACTION_VLAN_POP;
3603 				break;
3604 			case TCA_VLAN_ACT_MODIFY:
3605 				entry->id = FLOW_ACTION_VLAN_MANGLE;
3606 				entry->vlan.vid = tcf_vlan_push_vid(act);
3607 				entry->vlan.proto = tcf_vlan_push_proto(act);
3608 				entry->vlan.prio = tcf_vlan_push_prio(act);
3609 				break;
3610 			default:
3611 				err = -EOPNOTSUPP;
3612 				goto err_out_locked;
3613 			}
3614 		} else if (is_tcf_tunnel_set(act)) {
3615 			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3616 			err = tcf_tunnel_encap_get_tunnel(entry, act);
3617 			if (err)
3618 				goto err_out_locked;
3619 		} else if (is_tcf_tunnel_release(act)) {
3620 			entry->id = FLOW_ACTION_TUNNEL_DECAP;
3621 		} else if (is_tcf_pedit(act)) {
3622 			for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3623 				switch (tcf_pedit_cmd(act, k)) {
3624 				case TCA_PEDIT_KEY_EX_CMD_SET:
3625 					entry->id = FLOW_ACTION_MANGLE;
3626 					break;
3627 				case TCA_PEDIT_KEY_EX_CMD_ADD:
3628 					entry->id = FLOW_ACTION_ADD;
3629 					break;
3630 				default:
3631 					err = -EOPNOTSUPP;
3632 					goto err_out_locked;
3633 				}
3634 				entry->mangle.htype = tcf_pedit_htype(act, k);
3635 				entry->mangle.mask = tcf_pedit_mask(act, k);
3636 				entry->mangle.val = tcf_pedit_val(act, k);
3637 				entry->mangle.offset = tcf_pedit_offset(act, k);
3638 				entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3639 				entry = &flow_action->entries[++j];
3640 			}
3641 		} else if (is_tcf_csum(act)) {
3642 			entry->id = FLOW_ACTION_CSUM;
3643 			entry->csum_flags = tcf_csum_update_flags(act);
3644 		} else if (is_tcf_skbedit_mark(act)) {
3645 			entry->id = FLOW_ACTION_MARK;
3646 			entry->mark = tcf_skbedit_mark(act);
3647 		} else if (is_tcf_sample(act)) {
3648 			entry->id = FLOW_ACTION_SAMPLE;
3649 			entry->sample.trunc_size = tcf_sample_trunc_size(act);
3650 			entry->sample.truncate = tcf_sample_truncate(act);
3651 			entry->sample.rate = tcf_sample_rate(act);
3652 			tcf_sample_get_group(entry, act);
3653 		} else if (is_tcf_police(act)) {
3654 			entry->id = FLOW_ACTION_POLICE;
3655 			entry->police.burst = tcf_police_burst(act);
3656 			entry->police.rate_bytes_ps =
3657 				tcf_police_rate_bytes_ps(act);
3658 			entry->police.burst_pkt = tcf_police_burst_pkt(act);
3659 			entry->police.rate_pkt_ps =
3660 				tcf_police_rate_pkt_ps(act);
3661 			entry->police.mtu = tcf_police_tcfp_mtu(act);
3662 			entry->police.index = act->tcfa_index;
3663 		} else if (is_tcf_ct(act)) {
3664 			entry->id = FLOW_ACTION_CT;
3665 			entry->ct.action = tcf_ct_action(act);
3666 			entry->ct.zone = tcf_ct_zone(act);
3667 			entry->ct.flow_table = tcf_ct_ft(act);
3668 		} else if (is_tcf_mpls(act)) {
3669 			switch (tcf_mpls_action(act)) {
3670 			case TCA_MPLS_ACT_PUSH:
3671 				entry->id = FLOW_ACTION_MPLS_PUSH;
3672 				entry->mpls_push.proto = tcf_mpls_proto(act);
3673 				entry->mpls_push.label = tcf_mpls_label(act);
3674 				entry->mpls_push.tc = tcf_mpls_tc(act);
3675 				entry->mpls_push.bos = tcf_mpls_bos(act);
3676 				entry->mpls_push.ttl = tcf_mpls_ttl(act);
3677 				break;
3678 			case TCA_MPLS_ACT_POP:
3679 				entry->id = FLOW_ACTION_MPLS_POP;
3680 				entry->mpls_pop.proto = tcf_mpls_proto(act);
3681 				break;
3682 			case TCA_MPLS_ACT_MODIFY:
3683 				entry->id = FLOW_ACTION_MPLS_MANGLE;
3684 				entry->mpls_mangle.label = tcf_mpls_label(act);
3685 				entry->mpls_mangle.tc = tcf_mpls_tc(act);
3686 				entry->mpls_mangle.bos = tcf_mpls_bos(act);
3687 				entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3688 				break;
3689 			default:
3690 				goto err_out_locked;
3691 			}
3692 		} else if (is_tcf_skbedit_ptype(act)) {
3693 			entry->id = FLOW_ACTION_PTYPE;
3694 			entry->ptype = tcf_skbedit_ptype(act);
3695 		} else if (is_tcf_skbedit_priority(act)) {
3696 			entry->id = FLOW_ACTION_PRIORITY;
3697 			entry->priority = tcf_skbedit_priority(act);
3698 		} else if (is_tcf_gate(act)) {
3699 			entry->id = FLOW_ACTION_GATE;
3700 			entry->gate.index = tcf_gate_index(act);
3701 			entry->gate.prio = tcf_gate_prio(act);
3702 			entry->gate.basetime = tcf_gate_basetime(act);
3703 			entry->gate.cycletime = tcf_gate_cycletime(act);
3704 			entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3705 			entry->gate.num_entries = tcf_gate_num_entries(act);
3706 			err = tcf_gate_get_entries(entry, act);
3707 			if (err)
3708 				goto err_out_locked;
3709 		} else {
3710 			err = -EOPNOTSUPP;
3711 			goto err_out_locked;
3712 		}
3713 		spin_unlock_bh(&act->tcfa_lock);
3714 
3715 		if (!is_tcf_pedit(act))
3716 			j++;
3717 	}
3718 
3719 err_out:
3720 	if (err)
3721 		tc_cleanup_flow_action(flow_action);
3722 
3723 	return err;
3724 err_out_locked:
3725 	spin_unlock_bh(&act->tcfa_lock);
3726 	goto err_out;
3727 }
3728 EXPORT_SYMBOL(tc_setup_flow_action);
3729 
3730 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3731 {
3732 	unsigned int num_acts = 0;
3733 	struct tc_action *act;
3734 	int i;
3735 
3736 	tcf_exts_for_each_action(i, act, exts) {
3737 		if (is_tcf_pedit(act))
3738 			num_acts += tcf_pedit_nkeys(act);
3739 		else
3740 			num_acts++;
3741 	}
3742 	return num_acts;
3743 }
3744 EXPORT_SYMBOL(tcf_exts_num_actions);
3745 
3746 #ifdef CONFIG_NET_CLS_ACT
3747 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3748 					u32 *p_block_index,
3749 					struct netlink_ext_ack *extack)
3750 {
3751 	*p_block_index = nla_get_u32(block_index_attr);
3752 	if (!*p_block_index) {
3753 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3754 		return -EINVAL;
3755 	}
3756 
3757 	return 0;
3758 }
3759 
3760 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3761 		    enum flow_block_binder_type binder_type,
3762 		    struct nlattr *block_index_attr,
3763 		    struct netlink_ext_ack *extack)
3764 {
3765 	u32 block_index;
3766 	int err;
3767 
3768 	if (!block_index_attr)
3769 		return 0;
3770 
3771 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3772 	if (err)
3773 		return err;
3774 
3775 	if (!block_index)
3776 		return 0;
3777 
3778 	qe->info.binder_type = binder_type;
3779 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3780 	qe->info.chain_head_change_priv = &qe->filter_chain;
3781 	qe->info.block_index = block_index;
3782 
3783 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3784 }
3785 EXPORT_SYMBOL(tcf_qevent_init);
3786 
3787 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3788 {
3789 	if (qe->info.block_index)
3790 		tcf_block_put_ext(qe->block, sch, &qe->info);
3791 }
3792 EXPORT_SYMBOL(tcf_qevent_destroy);
3793 
3794 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3795 			       struct netlink_ext_ack *extack)
3796 {
3797 	u32 block_index;
3798 	int err;
3799 
3800 	if (!block_index_attr)
3801 		return 0;
3802 
3803 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3804 	if (err)
3805 		return err;
3806 
3807 	/* Bounce newly-configured block or change in block. */
3808 	if (block_index != qe->info.block_index) {
3809 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3810 		return -EINVAL;
3811 	}
3812 
3813 	return 0;
3814 }
3815 EXPORT_SYMBOL(tcf_qevent_validate_change);
3816 
3817 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3818 				  struct sk_buff **to_free, int *ret)
3819 {
3820 	struct tcf_result cl_res;
3821 	struct tcf_proto *fl;
3822 
3823 	if (!qe->info.block_index)
3824 		return skb;
3825 
3826 	fl = rcu_dereference_bh(qe->filter_chain);
3827 
3828 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3829 	case TC_ACT_SHOT:
3830 		qdisc_qstats_drop(sch);
3831 		__qdisc_drop(skb, to_free);
3832 		*ret = __NET_XMIT_BYPASS;
3833 		return NULL;
3834 	case TC_ACT_STOLEN:
3835 	case TC_ACT_QUEUED:
3836 	case TC_ACT_TRAP:
3837 		__qdisc_drop(skb, to_free);
3838 		*ret = __NET_XMIT_STOLEN;
3839 		return NULL;
3840 	case TC_ACT_REDIRECT:
3841 		skb_do_redirect(skb);
3842 		*ret = __NET_XMIT_STOLEN;
3843 		return NULL;
3844 	}
3845 
3846 	return skb;
3847 }
3848 EXPORT_SYMBOL(tcf_qevent_handle);
3849 
3850 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3851 {
3852 	if (!qe->info.block_index)
3853 		return 0;
3854 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3855 }
3856 EXPORT_SYMBOL(tcf_qevent_dump);
3857 #endif
3858 
3859 static __net_init int tcf_net_init(struct net *net)
3860 {
3861 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3862 
3863 	spin_lock_init(&tn->idr_lock);
3864 	idr_init(&tn->idr);
3865 	return 0;
3866 }
3867 
3868 static void __net_exit tcf_net_exit(struct net *net)
3869 {
3870 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3871 
3872 	idr_destroy(&tn->idr);
3873 }
3874 
3875 static struct pernet_operations tcf_net_ops = {
3876 	.init = tcf_net_init,
3877 	.exit = tcf_net_exit,
3878 	.id   = &tcf_net_id,
3879 	.size = sizeof(struct tcf_net),
3880 };
3881 
3882 static int __init tc_filter_init(void)
3883 {
3884 	int err;
3885 
3886 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3887 	if (!tc_filter_wq)
3888 		return -ENOMEM;
3889 
3890 	err = register_pernet_subsys(&tcf_net_ops);
3891 	if (err)
3892 		goto err_register_pernet_subsys;
3893 
3894 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3895 		      RTNL_FLAG_DOIT_UNLOCKED);
3896 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3897 		      RTNL_FLAG_DOIT_UNLOCKED);
3898 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3899 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3900 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3901 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3902 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3903 		      tc_dump_chain, 0);
3904 
3905 	return 0;
3906 
3907 err_register_pernet_subsys:
3908 	destroy_workqueue(tc_filter_wq);
3909 	return err;
3910 }
3911 
3912 subsys_initcall(tc_filter_init);
3913