xref: /openbmc/linux/net/sched/cls_api.c (revision 72ed5d5624af384eaf74d84915810d54486a75e2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43 #include <net/tc_wrapper.h>
44 
45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
46 
47 /* The list of all installed classifier types */
48 static LIST_HEAD(tcf_proto_base);
49 
50 /* Protects list of registered TC modules. It is pure SMP lock. */
51 static DEFINE_RWLOCK(cls_mod_lock);
52 
53 #ifdef CONFIG_NET_CLS_ACT
54 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
55 EXPORT_SYMBOL(tc_skb_ext_tc);
56 
57 void tc_skb_ext_tc_enable(void)
58 {
59 	static_branch_inc(&tc_skb_ext_tc);
60 }
61 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
62 
63 void tc_skb_ext_tc_disable(void)
64 {
65 	static_branch_dec(&tc_skb_ext_tc);
66 }
67 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
68 #endif
69 
70 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
71 {
72 	return jhash_3words(tp->chain->index, tp->prio,
73 			    (__force __u32)tp->protocol, 0);
74 }
75 
76 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
77 					struct tcf_proto *tp)
78 {
79 	struct tcf_block *block = chain->block;
80 
81 	mutex_lock(&block->proto_destroy_lock);
82 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
83 		     destroy_obj_hashfn(tp));
84 	mutex_unlock(&block->proto_destroy_lock);
85 }
86 
87 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
88 			  const struct tcf_proto *tp2)
89 {
90 	return tp1->chain->index == tp2->chain->index &&
91 	       tp1->prio == tp2->prio &&
92 	       tp1->protocol == tp2->protocol;
93 }
94 
95 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
96 					struct tcf_proto *tp)
97 {
98 	u32 hash = destroy_obj_hashfn(tp);
99 	struct tcf_proto *iter;
100 	bool found = false;
101 
102 	rcu_read_lock();
103 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
104 				   destroy_ht_node, hash) {
105 		if (tcf_proto_cmp(tp, iter)) {
106 			found = true;
107 			break;
108 		}
109 	}
110 	rcu_read_unlock();
111 
112 	return found;
113 }
114 
115 static void
116 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
117 {
118 	struct tcf_block *block = chain->block;
119 
120 	mutex_lock(&block->proto_destroy_lock);
121 	if (hash_hashed(&tp->destroy_ht_node))
122 		hash_del_rcu(&tp->destroy_ht_node);
123 	mutex_unlock(&block->proto_destroy_lock);
124 }
125 
126 /* Find classifier type by string name */
127 
128 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
129 {
130 	const struct tcf_proto_ops *t, *res = NULL;
131 
132 	if (kind) {
133 		read_lock(&cls_mod_lock);
134 		list_for_each_entry(t, &tcf_proto_base, head) {
135 			if (strcmp(kind, t->kind) == 0) {
136 				if (try_module_get(t->owner))
137 					res = t;
138 				break;
139 			}
140 		}
141 		read_unlock(&cls_mod_lock);
142 	}
143 	return res;
144 }
145 
146 static const struct tcf_proto_ops *
147 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
148 		     struct netlink_ext_ack *extack)
149 {
150 	const struct tcf_proto_ops *ops;
151 
152 	ops = __tcf_proto_lookup_ops(kind);
153 	if (ops)
154 		return ops;
155 #ifdef CONFIG_MODULES
156 	if (rtnl_held)
157 		rtnl_unlock();
158 	request_module("cls_%s", kind);
159 	if (rtnl_held)
160 		rtnl_lock();
161 	ops = __tcf_proto_lookup_ops(kind);
162 	/* We dropped the RTNL semaphore in order to perform
163 	 * the module load. So, even if we succeeded in loading
164 	 * the module we have to replay the request. We indicate
165 	 * this using -EAGAIN.
166 	 */
167 	if (ops) {
168 		module_put(ops->owner);
169 		return ERR_PTR(-EAGAIN);
170 	}
171 #endif
172 	NL_SET_ERR_MSG(extack, "TC classifier not found");
173 	return ERR_PTR(-ENOENT);
174 }
175 
176 /* Register(unregister) new classifier type */
177 
178 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
179 {
180 	struct tcf_proto_ops *t;
181 	int rc = -EEXIST;
182 
183 	write_lock(&cls_mod_lock);
184 	list_for_each_entry(t, &tcf_proto_base, head)
185 		if (!strcmp(ops->kind, t->kind))
186 			goto out;
187 
188 	list_add_tail(&ops->head, &tcf_proto_base);
189 	rc = 0;
190 out:
191 	write_unlock(&cls_mod_lock);
192 	return rc;
193 }
194 EXPORT_SYMBOL(register_tcf_proto_ops);
195 
196 static struct workqueue_struct *tc_filter_wq;
197 
198 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
199 {
200 	struct tcf_proto_ops *t;
201 	int rc = -ENOENT;
202 
203 	/* Wait for outstanding call_rcu()s, if any, from a
204 	 * tcf_proto_ops's destroy() handler.
205 	 */
206 	rcu_barrier();
207 	flush_workqueue(tc_filter_wq);
208 
209 	write_lock(&cls_mod_lock);
210 	list_for_each_entry(t, &tcf_proto_base, head) {
211 		if (t == ops) {
212 			list_del(&t->head);
213 			rc = 0;
214 			break;
215 		}
216 	}
217 	write_unlock(&cls_mod_lock);
218 
219 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
220 }
221 EXPORT_SYMBOL(unregister_tcf_proto_ops);
222 
223 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
224 {
225 	INIT_RCU_WORK(rwork, func);
226 	return queue_rcu_work(tc_filter_wq, rwork);
227 }
228 EXPORT_SYMBOL(tcf_queue_work);
229 
230 /* Select new prio value from the range, managed by kernel. */
231 
232 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
233 {
234 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
235 
236 	if (tp)
237 		first = tp->prio - 1;
238 
239 	return TC_H_MAJ(first);
240 }
241 
242 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
243 {
244 	if (kind)
245 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
246 	memset(name, 0, IFNAMSIZ);
247 	return false;
248 }
249 
250 static bool tcf_proto_is_unlocked(const char *kind)
251 {
252 	const struct tcf_proto_ops *ops;
253 	bool ret;
254 
255 	if (strlen(kind) == 0)
256 		return false;
257 
258 	ops = tcf_proto_lookup_ops(kind, false, NULL);
259 	/* On error return false to take rtnl lock. Proto lookup/create
260 	 * functions will perform lookup again and properly handle errors.
261 	 */
262 	if (IS_ERR(ops))
263 		return false;
264 
265 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
266 	module_put(ops->owner);
267 	return ret;
268 }
269 
270 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
271 					  u32 prio, struct tcf_chain *chain,
272 					  bool rtnl_held,
273 					  struct netlink_ext_ack *extack)
274 {
275 	struct tcf_proto *tp;
276 	int err;
277 
278 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
279 	if (!tp)
280 		return ERR_PTR(-ENOBUFS);
281 
282 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
283 	if (IS_ERR(tp->ops)) {
284 		err = PTR_ERR(tp->ops);
285 		goto errout;
286 	}
287 	tp->classify = tp->ops->classify;
288 	tp->protocol = protocol;
289 	tp->prio = prio;
290 	tp->chain = chain;
291 	spin_lock_init(&tp->lock);
292 	refcount_set(&tp->refcnt, 1);
293 
294 	err = tp->ops->init(tp);
295 	if (err) {
296 		module_put(tp->ops->owner);
297 		goto errout;
298 	}
299 	return tp;
300 
301 errout:
302 	kfree(tp);
303 	return ERR_PTR(err);
304 }
305 
306 static void tcf_proto_get(struct tcf_proto *tp)
307 {
308 	refcount_inc(&tp->refcnt);
309 }
310 
311 static void tcf_chain_put(struct tcf_chain *chain);
312 
313 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
314 			      bool sig_destroy, struct netlink_ext_ack *extack)
315 {
316 	tp->ops->destroy(tp, rtnl_held, extack);
317 	if (sig_destroy)
318 		tcf_proto_signal_destroyed(tp->chain, tp);
319 	tcf_chain_put(tp->chain);
320 	module_put(tp->ops->owner);
321 	kfree_rcu(tp, rcu);
322 }
323 
324 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
325 			  struct netlink_ext_ack *extack)
326 {
327 	if (refcount_dec_and_test(&tp->refcnt))
328 		tcf_proto_destroy(tp, rtnl_held, true, extack);
329 }
330 
331 static bool tcf_proto_check_delete(struct tcf_proto *tp)
332 {
333 	if (tp->ops->delete_empty)
334 		return tp->ops->delete_empty(tp);
335 
336 	tp->deleting = true;
337 	return tp->deleting;
338 }
339 
340 static void tcf_proto_mark_delete(struct tcf_proto *tp)
341 {
342 	spin_lock(&tp->lock);
343 	tp->deleting = true;
344 	spin_unlock(&tp->lock);
345 }
346 
347 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
348 {
349 	bool deleting;
350 
351 	spin_lock(&tp->lock);
352 	deleting = tp->deleting;
353 	spin_unlock(&tp->lock);
354 
355 	return deleting;
356 }
357 
358 #define ASSERT_BLOCK_LOCKED(block)					\
359 	lockdep_assert_held(&(block)->lock)
360 
361 struct tcf_filter_chain_list_item {
362 	struct list_head list;
363 	tcf_chain_head_change_t *chain_head_change;
364 	void *chain_head_change_priv;
365 };
366 
367 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
368 					  u32 chain_index)
369 {
370 	struct tcf_chain *chain;
371 
372 	ASSERT_BLOCK_LOCKED(block);
373 
374 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
375 	if (!chain)
376 		return NULL;
377 	list_add_tail_rcu(&chain->list, &block->chain_list);
378 	mutex_init(&chain->filter_chain_lock);
379 	chain->block = block;
380 	chain->index = chain_index;
381 	chain->refcnt = 1;
382 	if (!chain->index)
383 		block->chain0.chain = chain;
384 	return chain;
385 }
386 
387 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
388 				       struct tcf_proto *tp_head)
389 {
390 	if (item->chain_head_change)
391 		item->chain_head_change(tp_head, item->chain_head_change_priv);
392 }
393 
394 static void tcf_chain0_head_change(struct tcf_chain *chain,
395 				   struct tcf_proto *tp_head)
396 {
397 	struct tcf_filter_chain_list_item *item;
398 	struct tcf_block *block = chain->block;
399 
400 	if (chain->index)
401 		return;
402 
403 	mutex_lock(&block->lock);
404 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
405 		tcf_chain_head_change_item(item, tp_head);
406 	mutex_unlock(&block->lock);
407 }
408 
409 /* Returns true if block can be safely freed. */
410 
411 static bool tcf_chain_detach(struct tcf_chain *chain)
412 {
413 	struct tcf_block *block = chain->block;
414 
415 	ASSERT_BLOCK_LOCKED(block);
416 
417 	list_del_rcu(&chain->list);
418 	if (!chain->index)
419 		block->chain0.chain = NULL;
420 
421 	if (list_empty(&block->chain_list) &&
422 	    refcount_read(&block->refcnt) == 0)
423 		return true;
424 
425 	return false;
426 }
427 
428 static void tcf_block_destroy(struct tcf_block *block)
429 {
430 	mutex_destroy(&block->lock);
431 	mutex_destroy(&block->proto_destroy_lock);
432 	kfree_rcu(block, rcu);
433 }
434 
435 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
436 {
437 	struct tcf_block *block = chain->block;
438 
439 	mutex_destroy(&chain->filter_chain_lock);
440 	kfree_rcu(chain, rcu);
441 	if (free_block)
442 		tcf_block_destroy(block);
443 }
444 
445 static void tcf_chain_hold(struct tcf_chain *chain)
446 {
447 	ASSERT_BLOCK_LOCKED(chain->block);
448 
449 	++chain->refcnt;
450 }
451 
452 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
453 {
454 	ASSERT_BLOCK_LOCKED(chain->block);
455 
456 	/* In case all the references are action references, this
457 	 * chain should not be shown to the user.
458 	 */
459 	return chain->refcnt == chain->action_refcnt;
460 }
461 
462 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
463 					  u32 chain_index)
464 {
465 	struct tcf_chain *chain;
466 
467 	ASSERT_BLOCK_LOCKED(block);
468 
469 	list_for_each_entry(chain, &block->chain_list, list) {
470 		if (chain->index == chain_index)
471 			return chain;
472 	}
473 	return NULL;
474 }
475 
476 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
477 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
478 					      u32 chain_index)
479 {
480 	struct tcf_chain *chain;
481 
482 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
483 		if (chain->index == chain_index)
484 			return chain;
485 	}
486 	return NULL;
487 }
488 #endif
489 
490 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
491 			   u32 seq, u16 flags, int event, bool unicast,
492 			   struct netlink_ext_ack *extack);
493 
494 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
495 					 u32 chain_index, bool create,
496 					 bool by_act)
497 {
498 	struct tcf_chain *chain = NULL;
499 	bool is_first_reference;
500 
501 	mutex_lock(&block->lock);
502 	chain = tcf_chain_lookup(block, chain_index);
503 	if (chain) {
504 		tcf_chain_hold(chain);
505 	} else {
506 		if (!create)
507 			goto errout;
508 		chain = tcf_chain_create(block, chain_index);
509 		if (!chain)
510 			goto errout;
511 	}
512 
513 	if (by_act)
514 		++chain->action_refcnt;
515 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
516 	mutex_unlock(&block->lock);
517 
518 	/* Send notification only in case we got the first
519 	 * non-action reference. Until then, the chain acts only as
520 	 * a placeholder for actions pointing to it and user ought
521 	 * not know about them.
522 	 */
523 	if (is_first_reference && !by_act)
524 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
525 				RTM_NEWCHAIN, false, NULL);
526 
527 	return chain;
528 
529 errout:
530 	mutex_unlock(&block->lock);
531 	return chain;
532 }
533 
534 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
535 				       bool create)
536 {
537 	return __tcf_chain_get(block, chain_index, create, false);
538 }
539 
540 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
541 {
542 	return __tcf_chain_get(block, chain_index, true, true);
543 }
544 EXPORT_SYMBOL(tcf_chain_get_by_act);
545 
546 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
547 			       void *tmplt_priv);
548 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
549 				  void *tmplt_priv, u32 chain_index,
550 				  struct tcf_block *block, struct sk_buff *oskb,
551 				  u32 seq, u16 flags, bool unicast);
552 
553 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
554 			    bool explicitly_created)
555 {
556 	struct tcf_block *block = chain->block;
557 	const struct tcf_proto_ops *tmplt_ops;
558 	bool free_block = false;
559 	unsigned int refcnt;
560 	void *tmplt_priv;
561 
562 	mutex_lock(&block->lock);
563 	if (explicitly_created) {
564 		if (!chain->explicitly_created) {
565 			mutex_unlock(&block->lock);
566 			return;
567 		}
568 		chain->explicitly_created = false;
569 	}
570 
571 	if (by_act)
572 		chain->action_refcnt--;
573 
574 	/* tc_chain_notify_delete can't be called while holding block lock.
575 	 * However, when block is unlocked chain can be changed concurrently, so
576 	 * save these to temporary variables.
577 	 */
578 	refcnt = --chain->refcnt;
579 	tmplt_ops = chain->tmplt_ops;
580 	tmplt_priv = chain->tmplt_priv;
581 
582 	/* The last dropped non-action reference will trigger notification. */
583 	if (refcnt - chain->action_refcnt == 0 && !by_act) {
584 		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
585 				       block, NULL, 0, 0, false);
586 		/* Last reference to chain, no need to lock. */
587 		chain->flushing = false;
588 	}
589 
590 	if (refcnt == 0)
591 		free_block = tcf_chain_detach(chain);
592 	mutex_unlock(&block->lock);
593 
594 	if (refcnt == 0) {
595 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
596 		tcf_chain_destroy(chain, free_block);
597 	}
598 }
599 
600 static void tcf_chain_put(struct tcf_chain *chain)
601 {
602 	__tcf_chain_put(chain, false, false);
603 }
604 
605 void tcf_chain_put_by_act(struct tcf_chain *chain)
606 {
607 	__tcf_chain_put(chain, true, false);
608 }
609 EXPORT_SYMBOL(tcf_chain_put_by_act);
610 
611 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
612 {
613 	__tcf_chain_put(chain, false, true);
614 }
615 
616 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
617 {
618 	struct tcf_proto *tp, *tp_next;
619 
620 	mutex_lock(&chain->filter_chain_lock);
621 	tp = tcf_chain_dereference(chain->filter_chain, chain);
622 	while (tp) {
623 		tp_next = rcu_dereference_protected(tp->next, 1);
624 		tcf_proto_signal_destroying(chain, tp);
625 		tp = tp_next;
626 	}
627 	tp = tcf_chain_dereference(chain->filter_chain, chain);
628 	RCU_INIT_POINTER(chain->filter_chain, NULL);
629 	tcf_chain0_head_change(chain, NULL);
630 	chain->flushing = true;
631 	mutex_unlock(&chain->filter_chain_lock);
632 
633 	while (tp) {
634 		tp_next = rcu_dereference_protected(tp->next, 1);
635 		tcf_proto_put(tp, rtnl_held, NULL);
636 		tp = tp_next;
637 	}
638 }
639 
640 static int tcf_block_setup(struct tcf_block *block,
641 			   struct flow_block_offload *bo);
642 
643 static void tcf_block_offload_init(struct flow_block_offload *bo,
644 				   struct net_device *dev, struct Qdisc *sch,
645 				   enum flow_block_command command,
646 				   enum flow_block_binder_type binder_type,
647 				   struct flow_block *flow_block,
648 				   bool shared, struct netlink_ext_ack *extack)
649 {
650 	bo->net = dev_net(dev);
651 	bo->command = command;
652 	bo->binder_type = binder_type;
653 	bo->block = flow_block;
654 	bo->block_shared = shared;
655 	bo->extack = extack;
656 	bo->sch = sch;
657 	bo->cb_list_head = &flow_block->cb_list;
658 	INIT_LIST_HEAD(&bo->cb_list);
659 }
660 
661 static void tcf_block_unbind(struct tcf_block *block,
662 			     struct flow_block_offload *bo);
663 
664 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
665 {
666 	struct tcf_block *block = block_cb->indr.data;
667 	struct net_device *dev = block_cb->indr.dev;
668 	struct Qdisc *sch = block_cb->indr.sch;
669 	struct netlink_ext_ack extack = {};
670 	struct flow_block_offload bo = {};
671 
672 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
673 			       block_cb->indr.binder_type,
674 			       &block->flow_block, tcf_block_shared(block),
675 			       &extack);
676 	rtnl_lock();
677 	down_write(&block->cb_lock);
678 	list_del(&block_cb->driver_list);
679 	list_move(&block_cb->list, &bo.cb_list);
680 	tcf_block_unbind(block, &bo);
681 	up_write(&block->cb_lock);
682 	rtnl_unlock();
683 }
684 
685 static bool tcf_block_offload_in_use(struct tcf_block *block)
686 {
687 	return atomic_read(&block->offloadcnt);
688 }
689 
690 static int tcf_block_offload_cmd(struct tcf_block *block,
691 				 struct net_device *dev, struct Qdisc *sch,
692 				 struct tcf_block_ext_info *ei,
693 				 enum flow_block_command command,
694 				 struct netlink_ext_ack *extack)
695 {
696 	struct flow_block_offload bo = {};
697 
698 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
699 			       &block->flow_block, tcf_block_shared(block),
700 			       extack);
701 
702 	if (dev->netdev_ops->ndo_setup_tc) {
703 		int err;
704 
705 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
706 		if (err < 0) {
707 			if (err != -EOPNOTSUPP)
708 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
709 			return err;
710 		}
711 
712 		return tcf_block_setup(block, &bo);
713 	}
714 
715 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
716 				    tc_block_indr_cleanup);
717 	tcf_block_setup(block, &bo);
718 
719 	return -EOPNOTSUPP;
720 }
721 
722 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
723 				  struct tcf_block_ext_info *ei,
724 				  struct netlink_ext_ack *extack)
725 {
726 	struct net_device *dev = q->dev_queue->dev;
727 	int err;
728 
729 	down_write(&block->cb_lock);
730 
731 	/* If tc offload feature is disabled and the block we try to bind
732 	 * to already has some offloaded filters, forbid to bind.
733 	 */
734 	if (dev->netdev_ops->ndo_setup_tc &&
735 	    !tc_can_offload(dev) &&
736 	    tcf_block_offload_in_use(block)) {
737 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
738 		err = -EOPNOTSUPP;
739 		goto err_unlock;
740 	}
741 
742 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
743 	if (err == -EOPNOTSUPP)
744 		goto no_offload_dev_inc;
745 	if (err)
746 		goto err_unlock;
747 
748 	up_write(&block->cb_lock);
749 	return 0;
750 
751 no_offload_dev_inc:
752 	if (tcf_block_offload_in_use(block))
753 		goto err_unlock;
754 
755 	err = 0;
756 	block->nooffloaddevcnt++;
757 err_unlock:
758 	up_write(&block->cb_lock);
759 	return err;
760 }
761 
762 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
763 				     struct tcf_block_ext_info *ei)
764 {
765 	struct net_device *dev = q->dev_queue->dev;
766 	int err;
767 
768 	down_write(&block->cb_lock);
769 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
770 	if (err == -EOPNOTSUPP)
771 		goto no_offload_dev_dec;
772 	up_write(&block->cb_lock);
773 	return;
774 
775 no_offload_dev_dec:
776 	WARN_ON(block->nooffloaddevcnt-- == 0);
777 	up_write(&block->cb_lock);
778 }
779 
780 static int
781 tcf_chain0_head_change_cb_add(struct tcf_block *block,
782 			      struct tcf_block_ext_info *ei,
783 			      struct netlink_ext_ack *extack)
784 {
785 	struct tcf_filter_chain_list_item *item;
786 	struct tcf_chain *chain0;
787 
788 	item = kmalloc(sizeof(*item), GFP_KERNEL);
789 	if (!item) {
790 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
791 		return -ENOMEM;
792 	}
793 	item->chain_head_change = ei->chain_head_change;
794 	item->chain_head_change_priv = ei->chain_head_change_priv;
795 
796 	mutex_lock(&block->lock);
797 	chain0 = block->chain0.chain;
798 	if (chain0)
799 		tcf_chain_hold(chain0);
800 	else
801 		list_add(&item->list, &block->chain0.filter_chain_list);
802 	mutex_unlock(&block->lock);
803 
804 	if (chain0) {
805 		struct tcf_proto *tp_head;
806 
807 		mutex_lock(&chain0->filter_chain_lock);
808 
809 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
810 		if (tp_head)
811 			tcf_chain_head_change_item(item, tp_head);
812 
813 		mutex_lock(&block->lock);
814 		list_add(&item->list, &block->chain0.filter_chain_list);
815 		mutex_unlock(&block->lock);
816 
817 		mutex_unlock(&chain0->filter_chain_lock);
818 		tcf_chain_put(chain0);
819 	}
820 
821 	return 0;
822 }
823 
824 static void
825 tcf_chain0_head_change_cb_del(struct tcf_block *block,
826 			      struct tcf_block_ext_info *ei)
827 {
828 	struct tcf_filter_chain_list_item *item;
829 
830 	mutex_lock(&block->lock);
831 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
832 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
833 		    (item->chain_head_change == ei->chain_head_change &&
834 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
835 			if (block->chain0.chain)
836 				tcf_chain_head_change_item(item, NULL);
837 			list_del(&item->list);
838 			mutex_unlock(&block->lock);
839 
840 			kfree(item);
841 			return;
842 		}
843 	}
844 	mutex_unlock(&block->lock);
845 	WARN_ON(1);
846 }
847 
848 struct tcf_net {
849 	spinlock_t idr_lock; /* Protects idr */
850 	struct idr idr;
851 };
852 
853 static unsigned int tcf_net_id;
854 
855 static int tcf_block_insert(struct tcf_block *block, struct net *net,
856 			    struct netlink_ext_ack *extack)
857 {
858 	struct tcf_net *tn = net_generic(net, tcf_net_id);
859 	int err;
860 
861 	idr_preload(GFP_KERNEL);
862 	spin_lock(&tn->idr_lock);
863 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
864 			    GFP_NOWAIT);
865 	spin_unlock(&tn->idr_lock);
866 	idr_preload_end();
867 
868 	return err;
869 }
870 
871 static void tcf_block_remove(struct tcf_block *block, struct net *net)
872 {
873 	struct tcf_net *tn = net_generic(net, tcf_net_id);
874 
875 	spin_lock(&tn->idr_lock);
876 	idr_remove(&tn->idr, block->index);
877 	spin_unlock(&tn->idr_lock);
878 }
879 
880 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
881 					  u32 block_index,
882 					  struct netlink_ext_ack *extack)
883 {
884 	struct tcf_block *block;
885 
886 	block = kzalloc(sizeof(*block), GFP_KERNEL);
887 	if (!block) {
888 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
889 		return ERR_PTR(-ENOMEM);
890 	}
891 	mutex_init(&block->lock);
892 	mutex_init(&block->proto_destroy_lock);
893 	init_rwsem(&block->cb_lock);
894 	flow_block_init(&block->flow_block);
895 	INIT_LIST_HEAD(&block->chain_list);
896 	INIT_LIST_HEAD(&block->owner_list);
897 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
898 
899 	refcount_set(&block->refcnt, 1);
900 	block->net = net;
901 	block->index = block_index;
902 
903 	/* Don't store q pointer for blocks which are shared */
904 	if (!tcf_block_shared(block))
905 		block->q = q;
906 	return block;
907 }
908 
909 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
910 {
911 	struct tcf_net *tn = net_generic(net, tcf_net_id);
912 
913 	return idr_find(&tn->idr, block_index);
914 }
915 
916 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
917 {
918 	struct tcf_block *block;
919 
920 	rcu_read_lock();
921 	block = tcf_block_lookup(net, block_index);
922 	if (block && !refcount_inc_not_zero(&block->refcnt))
923 		block = NULL;
924 	rcu_read_unlock();
925 
926 	return block;
927 }
928 
929 static struct tcf_chain *
930 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
931 {
932 	mutex_lock(&block->lock);
933 	if (chain)
934 		chain = list_is_last(&chain->list, &block->chain_list) ?
935 			NULL : list_next_entry(chain, list);
936 	else
937 		chain = list_first_entry_or_null(&block->chain_list,
938 						 struct tcf_chain, list);
939 
940 	/* skip all action-only chains */
941 	while (chain && tcf_chain_held_by_acts_only(chain))
942 		chain = list_is_last(&chain->list, &block->chain_list) ?
943 			NULL : list_next_entry(chain, list);
944 
945 	if (chain)
946 		tcf_chain_hold(chain);
947 	mutex_unlock(&block->lock);
948 
949 	return chain;
950 }
951 
952 /* Function to be used by all clients that want to iterate over all chains on
953  * block. It properly obtains block->lock and takes reference to chain before
954  * returning it. Users of this function must be tolerant to concurrent chain
955  * insertion/deletion or ensure that no concurrent chain modification is
956  * possible. Note that all netlink dump callbacks cannot guarantee to provide
957  * consistent dump because rtnl lock is released each time skb is filled with
958  * data and sent to user-space.
959  */
960 
961 struct tcf_chain *
962 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
963 {
964 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
965 
966 	if (chain)
967 		tcf_chain_put(chain);
968 
969 	return chain_next;
970 }
971 EXPORT_SYMBOL(tcf_get_next_chain);
972 
973 static struct tcf_proto *
974 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
975 {
976 	u32 prio = 0;
977 
978 	ASSERT_RTNL();
979 	mutex_lock(&chain->filter_chain_lock);
980 
981 	if (!tp) {
982 		tp = tcf_chain_dereference(chain->filter_chain, chain);
983 	} else if (tcf_proto_is_deleting(tp)) {
984 		/* 'deleting' flag is set and chain->filter_chain_lock was
985 		 * unlocked, which means next pointer could be invalid. Restart
986 		 * search.
987 		 */
988 		prio = tp->prio + 1;
989 		tp = tcf_chain_dereference(chain->filter_chain, chain);
990 
991 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
992 			if (!tp->deleting && tp->prio >= prio)
993 				break;
994 	} else {
995 		tp = tcf_chain_dereference(tp->next, chain);
996 	}
997 
998 	if (tp)
999 		tcf_proto_get(tp);
1000 
1001 	mutex_unlock(&chain->filter_chain_lock);
1002 
1003 	return tp;
1004 }
1005 
1006 /* Function to be used by all clients that want to iterate over all tp's on
1007  * chain. Users of this function must be tolerant to concurrent tp
1008  * insertion/deletion or ensure that no concurrent chain modification is
1009  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1010  * consistent dump because rtnl lock is released each time skb is filled with
1011  * data and sent to user-space.
1012  */
1013 
1014 struct tcf_proto *
1015 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1016 {
1017 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1018 
1019 	if (tp)
1020 		tcf_proto_put(tp, true, NULL);
1021 
1022 	return tp_next;
1023 }
1024 EXPORT_SYMBOL(tcf_get_next_proto);
1025 
1026 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1027 {
1028 	struct tcf_chain *chain;
1029 
1030 	/* Last reference to block. At this point chains cannot be added or
1031 	 * removed concurrently.
1032 	 */
1033 	for (chain = tcf_get_next_chain(block, NULL);
1034 	     chain;
1035 	     chain = tcf_get_next_chain(block, chain)) {
1036 		tcf_chain_put_explicitly_created(chain);
1037 		tcf_chain_flush(chain, rtnl_held);
1038 	}
1039 }
1040 
1041 /* Lookup Qdisc and increments its reference counter.
1042  * Set parent, if necessary.
1043  */
1044 
1045 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1046 			    u32 *parent, int ifindex, bool rtnl_held,
1047 			    struct netlink_ext_ack *extack)
1048 {
1049 	const struct Qdisc_class_ops *cops;
1050 	struct net_device *dev;
1051 	int err = 0;
1052 
1053 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1054 		return 0;
1055 
1056 	rcu_read_lock();
1057 
1058 	/* Find link */
1059 	dev = dev_get_by_index_rcu(net, ifindex);
1060 	if (!dev) {
1061 		rcu_read_unlock();
1062 		return -ENODEV;
1063 	}
1064 
1065 	/* Find qdisc */
1066 	if (!*parent) {
1067 		*q = rcu_dereference(dev->qdisc);
1068 		*parent = (*q)->handle;
1069 	} else {
1070 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1071 		if (!*q) {
1072 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1073 			err = -EINVAL;
1074 			goto errout_rcu;
1075 		}
1076 	}
1077 
1078 	*q = qdisc_refcount_inc_nz(*q);
1079 	if (!*q) {
1080 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1081 		err = -EINVAL;
1082 		goto errout_rcu;
1083 	}
1084 
1085 	/* Is it classful? */
1086 	cops = (*q)->ops->cl_ops;
1087 	if (!cops) {
1088 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1089 		err = -EINVAL;
1090 		goto errout_qdisc;
1091 	}
1092 
1093 	if (!cops->tcf_block) {
1094 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1095 		err = -EOPNOTSUPP;
1096 		goto errout_qdisc;
1097 	}
1098 
1099 errout_rcu:
1100 	/* At this point we know that qdisc is not noop_qdisc,
1101 	 * which means that qdisc holds a reference to net_device
1102 	 * and we hold a reference to qdisc, so it is safe to release
1103 	 * rcu read lock.
1104 	 */
1105 	rcu_read_unlock();
1106 	return err;
1107 
1108 errout_qdisc:
1109 	rcu_read_unlock();
1110 
1111 	if (rtnl_held)
1112 		qdisc_put(*q);
1113 	else
1114 		qdisc_put_unlocked(*q);
1115 	*q = NULL;
1116 
1117 	return err;
1118 }
1119 
1120 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1121 			       int ifindex, struct netlink_ext_ack *extack)
1122 {
1123 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1124 		return 0;
1125 
1126 	/* Do we search for filter, attached to class? */
1127 	if (TC_H_MIN(parent)) {
1128 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1129 
1130 		*cl = cops->find(q, parent);
1131 		if (*cl == 0) {
1132 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1133 			return -ENOENT;
1134 		}
1135 	}
1136 
1137 	return 0;
1138 }
1139 
1140 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1141 					  unsigned long cl, int ifindex,
1142 					  u32 block_index,
1143 					  struct netlink_ext_ack *extack)
1144 {
1145 	struct tcf_block *block;
1146 
1147 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1148 		block = tcf_block_refcnt_get(net, block_index);
1149 		if (!block) {
1150 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1151 			return ERR_PTR(-EINVAL);
1152 		}
1153 	} else {
1154 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1155 
1156 		block = cops->tcf_block(q, cl, extack);
1157 		if (!block)
1158 			return ERR_PTR(-EINVAL);
1159 
1160 		if (tcf_block_shared(block)) {
1161 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1162 			return ERR_PTR(-EOPNOTSUPP);
1163 		}
1164 
1165 		/* Always take reference to block in order to support execution
1166 		 * of rules update path of cls API without rtnl lock. Caller
1167 		 * must release block when it is finished using it. 'if' block
1168 		 * of this conditional obtain reference to block by calling
1169 		 * tcf_block_refcnt_get().
1170 		 */
1171 		refcount_inc(&block->refcnt);
1172 	}
1173 
1174 	return block;
1175 }
1176 
1177 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1178 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1179 {
1180 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1181 		/* Flushing/putting all chains will cause the block to be
1182 		 * deallocated when last chain is freed. However, if chain_list
1183 		 * is empty, block has to be manually deallocated. After block
1184 		 * reference counter reached 0, it is no longer possible to
1185 		 * increment it or add new chains to block.
1186 		 */
1187 		bool free_block = list_empty(&block->chain_list);
1188 
1189 		mutex_unlock(&block->lock);
1190 		if (tcf_block_shared(block))
1191 			tcf_block_remove(block, block->net);
1192 
1193 		if (q)
1194 			tcf_block_offload_unbind(block, q, ei);
1195 
1196 		if (free_block)
1197 			tcf_block_destroy(block);
1198 		else
1199 			tcf_block_flush_all_chains(block, rtnl_held);
1200 	} else if (q) {
1201 		tcf_block_offload_unbind(block, q, ei);
1202 	}
1203 }
1204 
1205 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1206 {
1207 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1208 }
1209 
1210 /* Find tcf block.
1211  * Set q, parent, cl when appropriate.
1212  */
1213 
1214 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1215 					u32 *parent, unsigned long *cl,
1216 					int ifindex, u32 block_index,
1217 					struct netlink_ext_ack *extack)
1218 {
1219 	struct tcf_block *block;
1220 	int err = 0;
1221 
1222 	ASSERT_RTNL();
1223 
1224 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1225 	if (err)
1226 		goto errout;
1227 
1228 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1229 	if (err)
1230 		goto errout_qdisc;
1231 
1232 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1233 	if (IS_ERR(block)) {
1234 		err = PTR_ERR(block);
1235 		goto errout_qdisc;
1236 	}
1237 
1238 	return block;
1239 
1240 errout_qdisc:
1241 	if (*q)
1242 		qdisc_put(*q);
1243 errout:
1244 	*q = NULL;
1245 	return ERR_PTR(err);
1246 }
1247 
1248 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1249 			      bool rtnl_held)
1250 {
1251 	if (!IS_ERR_OR_NULL(block))
1252 		tcf_block_refcnt_put(block, rtnl_held);
1253 
1254 	if (q) {
1255 		if (rtnl_held)
1256 			qdisc_put(q);
1257 		else
1258 			qdisc_put_unlocked(q);
1259 	}
1260 }
1261 
1262 struct tcf_block_owner_item {
1263 	struct list_head list;
1264 	struct Qdisc *q;
1265 	enum flow_block_binder_type binder_type;
1266 };
1267 
1268 static void
1269 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1270 			       struct Qdisc *q,
1271 			       enum flow_block_binder_type binder_type)
1272 {
1273 	if (block->keep_dst &&
1274 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1275 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1276 		netif_keep_dst(qdisc_dev(q));
1277 }
1278 
1279 void tcf_block_netif_keep_dst(struct tcf_block *block)
1280 {
1281 	struct tcf_block_owner_item *item;
1282 
1283 	block->keep_dst = true;
1284 	list_for_each_entry(item, &block->owner_list, list)
1285 		tcf_block_owner_netif_keep_dst(block, item->q,
1286 					       item->binder_type);
1287 }
1288 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1289 
1290 static int tcf_block_owner_add(struct tcf_block *block,
1291 			       struct Qdisc *q,
1292 			       enum flow_block_binder_type binder_type)
1293 {
1294 	struct tcf_block_owner_item *item;
1295 
1296 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1297 	if (!item)
1298 		return -ENOMEM;
1299 	item->q = q;
1300 	item->binder_type = binder_type;
1301 	list_add(&item->list, &block->owner_list);
1302 	return 0;
1303 }
1304 
1305 static void tcf_block_owner_del(struct tcf_block *block,
1306 				struct Qdisc *q,
1307 				enum flow_block_binder_type binder_type)
1308 {
1309 	struct tcf_block_owner_item *item;
1310 
1311 	list_for_each_entry(item, &block->owner_list, list) {
1312 		if (item->q == q && item->binder_type == binder_type) {
1313 			list_del(&item->list);
1314 			kfree(item);
1315 			return;
1316 		}
1317 	}
1318 	WARN_ON(1);
1319 }
1320 
1321 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1322 		      struct tcf_block_ext_info *ei,
1323 		      struct netlink_ext_ack *extack)
1324 {
1325 	struct net *net = qdisc_net(q);
1326 	struct tcf_block *block = NULL;
1327 	int err;
1328 
1329 	if (ei->block_index)
1330 		/* block_index not 0 means the shared block is requested */
1331 		block = tcf_block_refcnt_get(net, ei->block_index);
1332 
1333 	if (!block) {
1334 		block = tcf_block_create(net, q, ei->block_index, extack);
1335 		if (IS_ERR(block))
1336 			return PTR_ERR(block);
1337 		if (tcf_block_shared(block)) {
1338 			err = tcf_block_insert(block, net, extack);
1339 			if (err)
1340 				goto err_block_insert;
1341 		}
1342 	}
1343 
1344 	err = tcf_block_owner_add(block, q, ei->binder_type);
1345 	if (err)
1346 		goto err_block_owner_add;
1347 
1348 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1349 
1350 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1351 	if (err)
1352 		goto err_chain0_head_change_cb_add;
1353 
1354 	err = tcf_block_offload_bind(block, q, ei, extack);
1355 	if (err)
1356 		goto err_block_offload_bind;
1357 
1358 	*p_block = block;
1359 	return 0;
1360 
1361 err_block_offload_bind:
1362 	tcf_chain0_head_change_cb_del(block, ei);
1363 err_chain0_head_change_cb_add:
1364 	tcf_block_owner_del(block, q, ei->binder_type);
1365 err_block_owner_add:
1366 err_block_insert:
1367 	tcf_block_refcnt_put(block, true);
1368 	return err;
1369 }
1370 EXPORT_SYMBOL(tcf_block_get_ext);
1371 
1372 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1373 {
1374 	struct tcf_proto __rcu **p_filter_chain = priv;
1375 
1376 	rcu_assign_pointer(*p_filter_chain, tp_head);
1377 }
1378 
1379 int tcf_block_get(struct tcf_block **p_block,
1380 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1381 		  struct netlink_ext_ack *extack)
1382 {
1383 	struct tcf_block_ext_info ei = {
1384 		.chain_head_change = tcf_chain_head_change_dflt,
1385 		.chain_head_change_priv = p_filter_chain,
1386 	};
1387 
1388 	WARN_ON(!p_filter_chain);
1389 	return tcf_block_get_ext(p_block, q, &ei, extack);
1390 }
1391 EXPORT_SYMBOL(tcf_block_get);
1392 
1393 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1394  * actions should be all removed after flushing.
1395  */
1396 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1397 		       struct tcf_block_ext_info *ei)
1398 {
1399 	if (!block)
1400 		return;
1401 	tcf_chain0_head_change_cb_del(block, ei);
1402 	tcf_block_owner_del(block, q, ei->binder_type);
1403 
1404 	__tcf_block_put(block, q, ei, true);
1405 }
1406 EXPORT_SYMBOL(tcf_block_put_ext);
1407 
1408 void tcf_block_put(struct tcf_block *block)
1409 {
1410 	struct tcf_block_ext_info ei = {0, };
1411 
1412 	if (!block)
1413 		return;
1414 	tcf_block_put_ext(block, block->q, &ei);
1415 }
1416 
1417 EXPORT_SYMBOL(tcf_block_put);
1418 
1419 static int
1420 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1421 			    void *cb_priv, bool add, bool offload_in_use,
1422 			    struct netlink_ext_ack *extack)
1423 {
1424 	struct tcf_chain *chain, *chain_prev;
1425 	struct tcf_proto *tp, *tp_prev;
1426 	int err;
1427 
1428 	lockdep_assert_held(&block->cb_lock);
1429 
1430 	for (chain = __tcf_get_next_chain(block, NULL);
1431 	     chain;
1432 	     chain_prev = chain,
1433 		     chain = __tcf_get_next_chain(block, chain),
1434 		     tcf_chain_put(chain_prev)) {
1435 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1436 		     tp_prev = tp,
1437 			     tp = __tcf_get_next_proto(chain, tp),
1438 			     tcf_proto_put(tp_prev, true, NULL)) {
1439 			if (tp->ops->reoffload) {
1440 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1441 							 extack);
1442 				if (err && add)
1443 					goto err_playback_remove;
1444 			} else if (add && offload_in_use) {
1445 				err = -EOPNOTSUPP;
1446 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1447 				goto err_playback_remove;
1448 			}
1449 		}
1450 	}
1451 
1452 	return 0;
1453 
1454 err_playback_remove:
1455 	tcf_proto_put(tp, true, NULL);
1456 	tcf_chain_put(chain);
1457 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1458 				    extack);
1459 	return err;
1460 }
1461 
1462 static int tcf_block_bind(struct tcf_block *block,
1463 			  struct flow_block_offload *bo)
1464 {
1465 	struct flow_block_cb *block_cb, *next;
1466 	int err, i = 0;
1467 
1468 	lockdep_assert_held(&block->cb_lock);
1469 
1470 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1471 		err = tcf_block_playback_offloads(block, block_cb->cb,
1472 						  block_cb->cb_priv, true,
1473 						  tcf_block_offload_in_use(block),
1474 						  bo->extack);
1475 		if (err)
1476 			goto err_unroll;
1477 		if (!bo->unlocked_driver_cb)
1478 			block->lockeddevcnt++;
1479 
1480 		i++;
1481 	}
1482 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1483 
1484 	return 0;
1485 
1486 err_unroll:
1487 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1488 		if (i-- > 0) {
1489 			list_del(&block_cb->list);
1490 			tcf_block_playback_offloads(block, block_cb->cb,
1491 						    block_cb->cb_priv, false,
1492 						    tcf_block_offload_in_use(block),
1493 						    NULL);
1494 			if (!bo->unlocked_driver_cb)
1495 				block->lockeddevcnt--;
1496 		}
1497 		flow_block_cb_free(block_cb);
1498 	}
1499 
1500 	return err;
1501 }
1502 
1503 static void tcf_block_unbind(struct tcf_block *block,
1504 			     struct flow_block_offload *bo)
1505 {
1506 	struct flow_block_cb *block_cb, *next;
1507 
1508 	lockdep_assert_held(&block->cb_lock);
1509 
1510 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1511 		tcf_block_playback_offloads(block, block_cb->cb,
1512 					    block_cb->cb_priv, false,
1513 					    tcf_block_offload_in_use(block),
1514 					    NULL);
1515 		list_del(&block_cb->list);
1516 		flow_block_cb_free(block_cb);
1517 		if (!bo->unlocked_driver_cb)
1518 			block->lockeddevcnt--;
1519 	}
1520 }
1521 
1522 static int tcf_block_setup(struct tcf_block *block,
1523 			   struct flow_block_offload *bo)
1524 {
1525 	int err;
1526 
1527 	switch (bo->command) {
1528 	case FLOW_BLOCK_BIND:
1529 		err = tcf_block_bind(block, bo);
1530 		break;
1531 	case FLOW_BLOCK_UNBIND:
1532 		err = 0;
1533 		tcf_block_unbind(block, bo);
1534 		break;
1535 	default:
1536 		WARN_ON_ONCE(1);
1537 		err = -EOPNOTSUPP;
1538 	}
1539 
1540 	return err;
1541 }
1542 
1543 /* Main classifier routine: scans classifier chain attached
1544  * to this qdisc, (optionally) tests for protocol and asks
1545  * specific classifiers.
1546  */
1547 static inline int __tcf_classify(struct sk_buff *skb,
1548 				 const struct tcf_proto *tp,
1549 				 const struct tcf_proto *orig_tp,
1550 				 struct tcf_result *res,
1551 				 bool compat_mode,
1552 				 u32 *last_executed_chain)
1553 {
1554 #ifdef CONFIG_NET_CLS_ACT
1555 	const int max_reclassify_loop = 16;
1556 	const struct tcf_proto *first_tp;
1557 	int limit = 0;
1558 
1559 reclassify:
1560 #endif
1561 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1562 		__be16 protocol = skb_protocol(skb, false);
1563 		int err;
1564 
1565 		if (tp->protocol != protocol &&
1566 		    tp->protocol != htons(ETH_P_ALL))
1567 			continue;
1568 
1569 		err = tc_classify(skb, tp, res);
1570 #ifdef CONFIG_NET_CLS_ACT
1571 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1572 			first_tp = orig_tp;
1573 			*last_executed_chain = first_tp->chain->index;
1574 			goto reset;
1575 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1576 			first_tp = res->goto_tp;
1577 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1578 			goto reset;
1579 		}
1580 #endif
1581 		if (err >= 0)
1582 			return err;
1583 	}
1584 
1585 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1586 #ifdef CONFIG_NET_CLS_ACT
1587 reset:
1588 	if (unlikely(limit++ >= max_reclassify_loop)) {
1589 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1590 				       tp->chain->block->index,
1591 				       tp->prio & 0xffff,
1592 				       ntohs(tp->protocol));
1593 		return TC_ACT_SHOT;
1594 	}
1595 
1596 	tp = first_tp;
1597 	goto reclassify;
1598 #endif
1599 }
1600 
1601 int tcf_classify(struct sk_buff *skb,
1602 		 const struct tcf_block *block,
1603 		 const struct tcf_proto *tp,
1604 		 struct tcf_result *res, bool compat_mode)
1605 {
1606 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1607 	u32 last_executed_chain = 0;
1608 
1609 	return __tcf_classify(skb, tp, tp, res, compat_mode,
1610 			      &last_executed_chain);
1611 #else
1612 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1613 	const struct tcf_proto *orig_tp = tp;
1614 	struct tc_skb_ext *ext;
1615 	int ret;
1616 
1617 	if (block) {
1618 		ext = skb_ext_find(skb, TC_SKB_EXT);
1619 
1620 		if (ext && ext->chain) {
1621 			struct tcf_chain *fchain;
1622 
1623 			fchain = tcf_chain_lookup_rcu(block, ext->chain);
1624 			if (!fchain)
1625 				return TC_ACT_SHOT;
1626 
1627 			/* Consume, so cloned/redirect skbs won't inherit ext */
1628 			skb_ext_del(skb, TC_SKB_EXT);
1629 
1630 			tp = rcu_dereference_bh(fchain->filter_chain);
1631 			last_executed_chain = fchain->index;
1632 		}
1633 	}
1634 
1635 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1636 			     &last_executed_chain);
1637 
1638 	if (tc_skb_ext_tc_enabled()) {
1639 		/* If we missed on some chain */
1640 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1641 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1642 
1643 			ext = tc_skb_ext_alloc(skb);
1644 			if (WARN_ON_ONCE(!ext))
1645 				return TC_ACT_SHOT;
1646 			ext->chain = last_executed_chain;
1647 			ext->mru = cb->mru;
1648 			ext->post_ct = cb->post_ct;
1649 			ext->post_ct_snat = cb->post_ct_snat;
1650 			ext->post_ct_dnat = cb->post_ct_dnat;
1651 			ext->zone = cb->zone;
1652 		}
1653 	}
1654 
1655 	return ret;
1656 #endif
1657 }
1658 EXPORT_SYMBOL(tcf_classify);
1659 
1660 struct tcf_chain_info {
1661 	struct tcf_proto __rcu **pprev;
1662 	struct tcf_proto __rcu *next;
1663 };
1664 
1665 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1666 					   struct tcf_chain_info *chain_info)
1667 {
1668 	return tcf_chain_dereference(*chain_info->pprev, chain);
1669 }
1670 
1671 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1672 			       struct tcf_chain_info *chain_info,
1673 			       struct tcf_proto *tp)
1674 {
1675 	if (chain->flushing)
1676 		return -EAGAIN;
1677 
1678 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1679 	if (*chain_info->pprev == chain->filter_chain)
1680 		tcf_chain0_head_change(chain, tp);
1681 	tcf_proto_get(tp);
1682 	rcu_assign_pointer(*chain_info->pprev, tp);
1683 
1684 	return 0;
1685 }
1686 
1687 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1688 				struct tcf_chain_info *chain_info,
1689 				struct tcf_proto *tp)
1690 {
1691 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1692 
1693 	tcf_proto_mark_delete(tp);
1694 	if (tp == chain->filter_chain)
1695 		tcf_chain0_head_change(chain, next);
1696 	RCU_INIT_POINTER(*chain_info->pprev, next);
1697 }
1698 
1699 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1700 					   struct tcf_chain_info *chain_info,
1701 					   u32 protocol, u32 prio,
1702 					   bool prio_allocate);
1703 
1704 /* Try to insert new proto.
1705  * If proto with specified priority already exists, free new proto
1706  * and return existing one.
1707  */
1708 
1709 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1710 						    struct tcf_proto *tp_new,
1711 						    u32 protocol, u32 prio,
1712 						    bool rtnl_held)
1713 {
1714 	struct tcf_chain_info chain_info;
1715 	struct tcf_proto *tp;
1716 	int err = 0;
1717 
1718 	mutex_lock(&chain->filter_chain_lock);
1719 
1720 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1721 		mutex_unlock(&chain->filter_chain_lock);
1722 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1723 		return ERR_PTR(-EAGAIN);
1724 	}
1725 
1726 	tp = tcf_chain_tp_find(chain, &chain_info,
1727 			       protocol, prio, false);
1728 	if (!tp)
1729 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1730 	mutex_unlock(&chain->filter_chain_lock);
1731 
1732 	if (tp) {
1733 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1734 		tp_new = tp;
1735 	} else if (err) {
1736 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1737 		tp_new = ERR_PTR(err);
1738 	}
1739 
1740 	return tp_new;
1741 }
1742 
1743 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1744 				      struct tcf_proto *tp, bool rtnl_held,
1745 				      struct netlink_ext_ack *extack)
1746 {
1747 	struct tcf_chain_info chain_info;
1748 	struct tcf_proto *tp_iter;
1749 	struct tcf_proto **pprev;
1750 	struct tcf_proto *next;
1751 
1752 	mutex_lock(&chain->filter_chain_lock);
1753 
1754 	/* Atomically find and remove tp from chain. */
1755 	for (pprev = &chain->filter_chain;
1756 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1757 	     pprev = &tp_iter->next) {
1758 		if (tp_iter == tp) {
1759 			chain_info.pprev = pprev;
1760 			chain_info.next = tp_iter->next;
1761 			WARN_ON(tp_iter->deleting);
1762 			break;
1763 		}
1764 	}
1765 	/* Verify that tp still exists and no new filters were inserted
1766 	 * concurrently.
1767 	 * Mark tp for deletion if it is empty.
1768 	 */
1769 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1770 		mutex_unlock(&chain->filter_chain_lock);
1771 		return;
1772 	}
1773 
1774 	tcf_proto_signal_destroying(chain, tp);
1775 	next = tcf_chain_dereference(chain_info.next, chain);
1776 	if (tp == chain->filter_chain)
1777 		tcf_chain0_head_change(chain, next);
1778 	RCU_INIT_POINTER(*chain_info.pprev, next);
1779 	mutex_unlock(&chain->filter_chain_lock);
1780 
1781 	tcf_proto_put(tp, rtnl_held, extack);
1782 }
1783 
1784 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1785 					   struct tcf_chain_info *chain_info,
1786 					   u32 protocol, u32 prio,
1787 					   bool prio_allocate)
1788 {
1789 	struct tcf_proto **pprev;
1790 	struct tcf_proto *tp;
1791 
1792 	/* Check the chain for existence of proto-tcf with this priority */
1793 	for (pprev = &chain->filter_chain;
1794 	     (tp = tcf_chain_dereference(*pprev, chain));
1795 	     pprev = &tp->next) {
1796 		if (tp->prio >= prio) {
1797 			if (tp->prio == prio) {
1798 				if (prio_allocate ||
1799 				    (tp->protocol != protocol && protocol))
1800 					return ERR_PTR(-EINVAL);
1801 			} else {
1802 				tp = NULL;
1803 			}
1804 			break;
1805 		}
1806 	}
1807 	chain_info->pprev = pprev;
1808 	if (tp) {
1809 		chain_info->next = tp->next;
1810 		tcf_proto_get(tp);
1811 	} else {
1812 		chain_info->next = NULL;
1813 	}
1814 	return tp;
1815 }
1816 
1817 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1818 			 struct tcf_proto *tp, struct tcf_block *block,
1819 			 struct Qdisc *q, u32 parent, void *fh,
1820 			 u32 portid, u32 seq, u16 flags, int event,
1821 			 bool terse_dump, bool rtnl_held,
1822 			 struct netlink_ext_ack *extack)
1823 {
1824 	struct tcmsg *tcm;
1825 	struct nlmsghdr  *nlh;
1826 	unsigned char *b = skb_tail_pointer(skb);
1827 
1828 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1829 	if (!nlh)
1830 		goto out_nlmsg_trim;
1831 	tcm = nlmsg_data(nlh);
1832 	tcm->tcm_family = AF_UNSPEC;
1833 	tcm->tcm__pad1 = 0;
1834 	tcm->tcm__pad2 = 0;
1835 	if (q) {
1836 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1837 		tcm->tcm_parent = parent;
1838 	} else {
1839 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1840 		tcm->tcm_block_index = block->index;
1841 	}
1842 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1843 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1844 		goto nla_put_failure;
1845 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1846 		goto nla_put_failure;
1847 	if (!fh) {
1848 		tcm->tcm_handle = 0;
1849 	} else if (terse_dump) {
1850 		if (tp->ops->terse_dump) {
1851 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1852 						rtnl_held) < 0)
1853 				goto nla_put_failure;
1854 		} else {
1855 			goto cls_op_not_supp;
1856 		}
1857 	} else {
1858 		if (tp->ops->dump &&
1859 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1860 			goto nla_put_failure;
1861 	}
1862 
1863 	if (extack && extack->_msg &&
1864 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1865 		goto nla_put_failure;
1866 
1867 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1868 
1869 	return skb->len;
1870 
1871 out_nlmsg_trim:
1872 nla_put_failure:
1873 cls_op_not_supp:
1874 	nlmsg_trim(skb, b);
1875 	return -1;
1876 }
1877 
1878 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1879 			  struct nlmsghdr *n, struct tcf_proto *tp,
1880 			  struct tcf_block *block, struct Qdisc *q,
1881 			  u32 parent, void *fh, int event, bool unicast,
1882 			  bool rtnl_held, struct netlink_ext_ack *extack)
1883 {
1884 	struct sk_buff *skb;
1885 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1886 	int err = 0;
1887 
1888 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1889 	if (!skb)
1890 		return -ENOBUFS;
1891 
1892 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1893 			  n->nlmsg_seq, n->nlmsg_flags, event,
1894 			  false, rtnl_held, extack) <= 0) {
1895 		kfree_skb(skb);
1896 		return -EINVAL;
1897 	}
1898 
1899 	if (unicast)
1900 		err = rtnl_unicast(skb, net, portid);
1901 	else
1902 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1903 				     n->nlmsg_flags & NLM_F_ECHO);
1904 	return err;
1905 }
1906 
1907 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1908 			      struct nlmsghdr *n, struct tcf_proto *tp,
1909 			      struct tcf_block *block, struct Qdisc *q,
1910 			      u32 parent, void *fh, bool unicast, bool *last,
1911 			      bool rtnl_held, struct netlink_ext_ack *extack)
1912 {
1913 	struct sk_buff *skb;
1914 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1915 	int err;
1916 
1917 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1918 	if (!skb)
1919 		return -ENOBUFS;
1920 
1921 	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1922 			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1923 			  false, rtnl_held, extack) <= 0) {
1924 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1925 		kfree_skb(skb);
1926 		return -EINVAL;
1927 	}
1928 
1929 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1930 	if (err) {
1931 		kfree_skb(skb);
1932 		return err;
1933 	}
1934 
1935 	if (unicast)
1936 		err = rtnl_unicast(skb, net, portid);
1937 	else
1938 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1939 				     n->nlmsg_flags & NLM_F_ECHO);
1940 	if (err < 0)
1941 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1942 
1943 	return err;
1944 }
1945 
1946 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1947 				 struct tcf_block *block, struct Qdisc *q,
1948 				 u32 parent, struct nlmsghdr *n,
1949 				 struct tcf_chain *chain, int event,
1950 				 struct netlink_ext_ack *extack)
1951 {
1952 	struct tcf_proto *tp;
1953 
1954 	for (tp = tcf_get_next_proto(chain, NULL);
1955 	     tp; tp = tcf_get_next_proto(chain, tp))
1956 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
1957 			       event, false, true, extack);
1958 }
1959 
1960 static void tfilter_put(struct tcf_proto *tp, void *fh)
1961 {
1962 	if (tp->ops->put && fh)
1963 		tp->ops->put(tp, fh);
1964 }
1965 
1966 static bool is_qdisc_ingress(__u32 classid)
1967 {
1968 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
1969 }
1970 
1971 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1972 			  struct netlink_ext_ack *extack)
1973 {
1974 	struct net *net = sock_net(skb->sk);
1975 	struct nlattr *tca[TCA_MAX + 1];
1976 	char name[IFNAMSIZ];
1977 	struct tcmsg *t;
1978 	u32 protocol;
1979 	u32 prio;
1980 	bool prio_allocate;
1981 	u32 parent;
1982 	u32 chain_index;
1983 	struct Qdisc *q;
1984 	struct tcf_chain_info chain_info;
1985 	struct tcf_chain *chain;
1986 	struct tcf_block *block;
1987 	struct tcf_proto *tp;
1988 	unsigned long cl;
1989 	void *fh;
1990 	int err;
1991 	int tp_created;
1992 	bool rtnl_held = false;
1993 	u32 flags;
1994 
1995 replay:
1996 	tp_created = 0;
1997 
1998 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1999 				     rtm_tca_policy, extack);
2000 	if (err < 0)
2001 		return err;
2002 
2003 	t = nlmsg_data(n);
2004 	protocol = TC_H_MIN(t->tcm_info);
2005 	prio = TC_H_MAJ(t->tcm_info);
2006 	prio_allocate = false;
2007 	parent = t->tcm_parent;
2008 	tp = NULL;
2009 	cl = 0;
2010 	block = NULL;
2011 	q = NULL;
2012 	chain = NULL;
2013 	flags = 0;
2014 
2015 	if (prio == 0) {
2016 		/* If no priority is provided by the user,
2017 		 * we allocate one.
2018 		 */
2019 		if (n->nlmsg_flags & NLM_F_CREATE) {
2020 			prio = TC_H_MAKE(0x80000000U, 0U);
2021 			prio_allocate = true;
2022 		} else {
2023 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2024 			return -ENOENT;
2025 		}
2026 	}
2027 
2028 	/* Find head of filter chain. */
2029 
2030 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2031 	if (err)
2032 		return err;
2033 
2034 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2035 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2036 		err = -EINVAL;
2037 		goto errout;
2038 	}
2039 
2040 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2041 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2042 	 * type is not specified, classifier is not unlocked.
2043 	 */
2044 	if (rtnl_held ||
2045 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2046 	    !tcf_proto_is_unlocked(name)) {
2047 		rtnl_held = true;
2048 		rtnl_lock();
2049 	}
2050 
2051 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2052 	if (err)
2053 		goto errout;
2054 
2055 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2056 				 extack);
2057 	if (IS_ERR(block)) {
2058 		err = PTR_ERR(block);
2059 		goto errout;
2060 	}
2061 	block->classid = parent;
2062 
2063 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2064 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2065 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2066 		err = -EINVAL;
2067 		goto errout;
2068 	}
2069 	chain = tcf_chain_get(block, chain_index, true);
2070 	if (!chain) {
2071 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2072 		err = -ENOMEM;
2073 		goto errout;
2074 	}
2075 
2076 	mutex_lock(&chain->filter_chain_lock);
2077 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2078 			       prio, prio_allocate);
2079 	if (IS_ERR(tp)) {
2080 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2081 		err = PTR_ERR(tp);
2082 		goto errout_locked;
2083 	}
2084 
2085 	if (tp == NULL) {
2086 		struct tcf_proto *tp_new = NULL;
2087 
2088 		if (chain->flushing) {
2089 			err = -EAGAIN;
2090 			goto errout_locked;
2091 		}
2092 
2093 		/* Proto-tcf does not exist, create new one */
2094 
2095 		if (tca[TCA_KIND] == NULL || !protocol) {
2096 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2097 			err = -EINVAL;
2098 			goto errout_locked;
2099 		}
2100 
2101 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2102 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2103 			err = -ENOENT;
2104 			goto errout_locked;
2105 		}
2106 
2107 		if (prio_allocate)
2108 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2109 							       &chain_info));
2110 
2111 		mutex_unlock(&chain->filter_chain_lock);
2112 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2113 					  rtnl_held, extack);
2114 		if (IS_ERR(tp_new)) {
2115 			err = PTR_ERR(tp_new);
2116 			goto errout_tp;
2117 		}
2118 
2119 		tp_created = 1;
2120 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2121 						rtnl_held);
2122 		if (IS_ERR(tp)) {
2123 			err = PTR_ERR(tp);
2124 			goto errout_tp;
2125 		}
2126 	} else {
2127 		mutex_unlock(&chain->filter_chain_lock);
2128 	}
2129 
2130 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2131 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2132 		err = -EINVAL;
2133 		goto errout;
2134 	}
2135 
2136 	fh = tp->ops->get(tp, t->tcm_handle);
2137 
2138 	if (!fh) {
2139 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2140 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2141 			err = -ENOENT;
2142 			goto errout;
2143 		}
2144 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2145 		tfilter_put(tp, fh);
2146 		NL_SET_ERR_MSG(extack, "Filter already exists");
2147 		err = -EEXIST;
2148 		goto errout;
2149 	}
2150 
2151 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2152 		tfilter_put(tp, fh);
2153 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2154 		err = -EINVAL;
2155 		goto errout;
2156 	}
2157 
2158 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2159 		flags |= TCA_ACT_FLAGS_REPLACE;
2160 	if (!rtnl_held)
2161 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2162 	if (is_qdisc_ingress(parent))
2163 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2164 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2165 			      flags, extack);
2166 	if (err == 0) {
2167 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2168 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2169 		tfilter_put(tp, fh);
2170 		/* q pointer is NULL for shared blocks */
2171 		if (q)
2172 			q->flags &= ~TCQ_F_CAN_BYPASS;
2173 	}
2174 
2175 errout:
2176 	if (err && tp_created)
2177 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2178 errout_tp:
2179 	if (chain) {
2180 		if (tp && !IS_ERR(tp))
2181 			tcf_proto_put(tp, rtnl_held, NULL);
2182 		if (!tp_created)
2183 			tcf_chain_put(chain);
2184 	}
2185 	tcf_block_release(q, block, rtnl_held);
2186 
2187 	if (rtnl_held)
2188 		rtnl_unlock();
2189 
2190 	if (err == -EAGAIN) {
2191 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2192 		 * of target chain.
2193 		 */
2194 		rtnl_held = true;
2195 		/* Replay the request. */
2196 		goto replay;
2197 	}
2198 	return err;
2199 
2200 errout_locked:
2201 	mutex_unlock(&chain->filter_chain_lock);
2202 	goto errout;
2203 }
2204 
2205 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2206 			  struct netlink_ext_ack *extack)
2207 {
2208 	struct net *net = sock_net(skb->sk);
2209 	struct nlattr *tca[TCA_MAX + 1];
2210 	char name[IFNAMSIZ];
2211 	struct tcmsg *t;
2212 	u32 protocol;
2213 	u32 prio;
2214 	u32 parent;
2215 	u32 chain_index;
2216 	struct Qdisc *q = NULL;
2217 	struct tcf_chain_info chain_info;
2218 	struct tcf_chain *chain = NULL;
2219 	struct tcf_block *block = NULL;
2220 	struct tcf_proto *tp = NULL;
2221 	unsigned long cl = 0;
2222 	void *fh = NULL;
2223 	int err;
2224 	bool rtnl_held = false;
2225 
2226 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2227 				     rtm_tca_policy, extack);
2228 	if (err < 0)
2229 		return err;
2230 
2231 	t = nlmsg_data(n);
2232 	protocol = TC_H_MIN(t->tcm_info);
2233 	prio = TC_H_MAJ(t->tcm_info);
2234 	parent = t->tcm_parent;
2235 
2236 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2237 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2238 		return -ENOENT;
2239 	}
2240 
2241 	/* Find head of filter chain. */
2242 
2243 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2244 	if (err)
2245 		return err;
2246 
2247 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2248 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2249 		err = -EINVAL;
2250 		goto errout;
2251 	}
2252 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2253 	 * found), qdisc is not unlocked, classifier type is not specified,
2254 	 * classifier is not unlocked.
2255 	 */
2256 	if (!prio ||
2257 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2258 	    !tcf_proto_is_unlocked(name)) {
2259 		rtnl_held = true;
2260 		rtnl_lock();
2261 	}
2262 
2263 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2264 	if (err)
2265 		goto errout;
2266 
2267 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2268 				 extack);
2269 	if (IS_ERR(block)) {
2270 		err = PTR_ERR(block);
2271 		goto errout;
2272 	}
2273 
2274 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2275 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2276 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2277 		err = -EINVAL;
2278 		goto errout;
2279 	}
2280 	chain = tcf_chain_get(block, chain_index, false);
2281 	if (!chain) {
2282 		/* User requested flush on non-existent chain. Nothing to do,
2283 		 * so just return success.
2284 		 */
2285 		if (prio == 0) {
2286 			err = 0;
2287 			goto errout;
2288 		}
2289 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2290 		err = -ENOENT;
2291 		goto errout;
2292 	}
2293 
2294 	if (prio == 0) {
2295 		tfilter_notify_chain(net, skb, block, q, parent, n,
2296 				     chain, RTM_DELTFILTER, extack);
2297 		tcf_chain_flush(chain, rtnl_held);
2298 		err = 0;
2299 		goto errout;
2300 	}
2301 
2302 	mutex_lock(&chain->filter_chain_lock);
2303 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2304 			       prio, false);
2305 	if (!tp || IS_ERR(tp)) {
2306 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2307 		err = tp ? PTR_ERR(tp) : -ENOENT;
2308 		goto errout_locked;
2309 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2310 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2311 		err = -EINVAL;
2312 		goto errout_locked;
2313 	} else if (t->tcm_handle == 0) {
2314 		tcf_proto_signal_destroying(chain, tp);
2315 		tcf_chain_tp_remove(chain, &chain_info, tp);
2316 		mutex_unlock(&chain->filter_chain_lock);
2317 
2318 		tcf_proto_put(tp, rtnl_held, NULL);
2319 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2320 			       RTM_DELTFILTER, false, rtnl_held, extack);
2321 		err = 0;
2322 		goto errout;
2323 	}
2324 	mutex_unlock(&chain->filter_chain_lock);
2325 
2326 	fh = tp->ops->get(tp, t->tcm_handle);
2327 
2328 	if (!fh) {
2329 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2330 		err = -ENOENT;
2331 	} else {
2332 		bool last;
2333 
2334 		err = tfilter_del_notify(net, skb, n, tp, block,
2335 					 q, parent, fh, false, &last,
2336 					 rtnl_held, extack);
2337 
2338 		if (err)
2339 			goto errout;
2340 		if (last)
2341 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2342 	}
2343 
2344 errout:
2345 	if (chain) {
2346 		if (tp && !IS_ERR(tp))
2347 			tcf_proto_put(tp, rtnl_held, NULL);
2348 		tcf_chain_put(chain);
2349 	}
2350 	tcf_block_release(q, block, rtnl_held);
2351 
2352 	if (rtnl_held)
2353 		rtnl_unlock();
2354 
2355 	return err;
2356 
2357 errout_locked:
2358 	mutex_unlock(&chain->filter_chain_lock);
2359 	goto errout;
2360 }
2361 
2362 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2363 			  struct netlink_ext_ack *extack)
2364 {
2365 	struct net *net = sock_net(skb->sk);
2366 	struct nlattr *tca[TCA_MAX + 1];
2367 	char name[IFNAMSIZ];
2368 	struct tcmsg *t;
2369 	u32 protocol;
2370 	u32 prio;
2371 	u32 parent;
2372 	u32 chain_index;
2373 	struct Qdisc *q = NULL;
2374 	struct tcf_chain_info chain_info;
2375 	struct tcf_chain *chain = NULL;
2376 	struct tcf_block *block = NULL;
2377 	struct tcf_proto *tp = NULL;
2378 	unsigned long cl = 0;
2379 	void *fh = NULL;
2380 	int err;
2381 	bool rtnl_held = false;
2382 
2383 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2384 				     rtm_tca_policy, extack);
2385 	if (err < 0)
2386 		return err;
2387 
2388 	t = nlmsg_data(n);
2389 	protocol = TC_H_MIN(t->tcm_info);
2390 	prio = TC_H_MAJ(t->tcm_info);
2391 	parent = t->tcm_parent;
2392 
2393 	if (prio == 0) {
2394 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2395 		return -ENOENT;
2396 	}
2397 
2398 	/* Find head of filter chain. */
2399 
2400 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2401 	if (err)
2402 		return err;
2403 
2404 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2405 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2406 		err = -EINVAL;
2407 		goto errout;
2408 	}
2409 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2410 	 * unlocked, classifier type is not specified, classifier is not
2411 	 * unlocked.
2412 	 */
2413 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2414 	    !tcf_proto_is_unlocked(name)) {
2415 		rtnl_held = true;
2416 		rtnl_lock();
2417 	}
2418 
2419 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2420 	if (err)
2421 		goto errout;
2422 
2423 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2424 				 extack);
2425 	if (IS_ERR(block)) {
2426 		err = PTR_ERR(block);
2427 		goto errout;
2428 	}
2429 
2430 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2431 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2432 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2433 		err = -EINVAL;
2434 		goto errout;
2435 	}
2436 	chain = tcf_chain_get(block, chain_index, false);
2437 	if (!chain) {
2438 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2439 		err = -EINVAL;
2440 		goto errout;
2441 	}
2442 
2443 	mutex_lock(&chain->filter_chain_lock);
2444 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2445 			       prio, false);
2446 	mutex_unlock(&chain->filter_chain_lock);
2447 	if (!tp || IS_ERR(tp)) {
2448 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2449 		err = tp ? PTR_ERR(tp) : -ENOENT;
2450 		goto errout;
2451 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2452 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2453 		err = -EINVAL;
2454 		goto errout;
2455 	}
2456 
2457 	fh = tp->ops->get(tp, t->tcm_handle);
2458 
2459 	if (!fh) {
2460 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2461 		err = -ENOENT;
2462 	} else {
2463 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2464 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2465 		if (err < 0)
2466 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2467 	}
2468 
2469 	tfilter_put(tp, fh);
2470 errout:
2471 	if (chain) {
2472 		if (tp && !IS_ERR(tp))
2473 			tcf_proto_put(tp, rtnl_held, NULL);
2474 		tcf_chain_put(chain);
2475 	}
2476 	tcf_block_release(q, block, rtnl_held);
2477 
2478 	if (rtnl_held)
2479 		rtnl_unlock();
2480 
2481 	return err;
2482 }
2483 
2484 struct tcf_dump_args {
2485 	struct tcf_walker w;
2486 	struct sk_buff *skb;
2487 	struct netlink_callback *cb;
2488 	struct tcf_block *block;
2489 	struct Qdisc *q;
2490 	u32 parent;
2491 	bool terse_dump;
2492 };
2493 
2494 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2495 {
2496 	struct tcf_dump_args *a = (void *)arg;
2497 	struct net *net = sock_net(a->skb->sk);
2498 
2499 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2500 			     n, NETLINK_CB(a->cb->skb).portid,
2501 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2502 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2503 }
2504 
2505 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2506 			   struct sk_buff *skb, struct netlink_callback *cb,
2507 			   long index_start, long *p_index, bool terse)
2508 {
2509 	struct net *net = sock_net(skb->sk);
2510 	struct tcf_block *block = chain->block;
2511 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2512 	struct tcf_proto *tp, *tp_prev;
2513 	struct tcf_dump_args arg;
2514 
2515 	for (tp = __tcf_get_next_proto(chain, NULL);
2516 	     tp;
2517 	     tp_prev = tp,
2518 		     tp = __tcf_get_next_proto(chain, tp),
2519 		     tcf_proto_put(tp_prev, true, NULL),
2520 		     (*p_index)++) {
2521 		if (*p_index < index_start)
2522 			continue;
2523 		if (TC_H_MAJ(tcm->tcm_info) &&
2524 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2525 			continue;
2526 		if (TC_H_MIN(tcm->tcm_info) &&
2527 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2528 			continue;
2529 		if (*p_index > index_start)
2530 			memset(&cb->args[1], 0,
2531 			       sizeof(cb->args) - sizeof(cb->args[0]));
2532 		if (cb->args[1] == 0) {
2533 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2534 					  NETLINK_CB(cb->skb).portid,
2535 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2536 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2537 				goto errout;
2538 			cb->args[1] = 1;
2539 		}
2540 		if (!tp->ops->walk)
2541 			continue;
2542 		arg.w.fn = tcf_node_dump;
2543 		arg.skb = skb;
2544 		arg.cb = cb;
2545 		arg.block = block;
2546 		arg.q = q;
2547 		arg.parent = parent;
2548 		arg.w.stop = 0;
2549 		arg.w.skip = cb->args[1] - 1;
2550 		arg.w.count = 0;
2551 		arg.w.cookie = cb->args[2];
2552 		arg.terse_dump = terse;
2553 		tp->ops->walk(tp, &arg.w, true);
2554 		cb->args[2] = arg.w.cookie;
2555 		cb->args[1] = arg.w.count + 1;
2556 		if (arg.w.stop)
2557 			goto errout;
2558 	}
2559 	return true;
2560 
2561 errout:
2562 	tcf_proto_put(tp, true, NULL);
2563 	return false;
2564 }
2565 
2566 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2567 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2568 };
2569 
2570 /* called with RTNL */
2571 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2572 {
2573 	struct tcf_chain *chain, *chain_prev;
2574 	struct net *net = sock_net(skb->sk);
2575 	struct nlattr *tca[TCA_MAX + 1];
2576 	struct Qdisc *q = NULL;
2577 	struct tcf_block *block;
2578 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2579 	bool terse_dump = false;
2580 	long index_start;
2581 	long index;
2582 	u32 parent;
2583 	int err;
2584 
2585 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2586 		return skb->len;
2587 
2588 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2589 				     tcf_tfilter_dump_policy, cb->extack);
2590 	if (err)
2591 		return err;
2592 
2593 	if (tca[TCA_DUMP_FLAGS]) {
2594 		struct nla_bitfield32 flags =
2595 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2596 
2597 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2598 	}
2599 
2600 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2601 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2602 		if (!block)
2603 			goto out;
2604 		/* If we work with block index, q is NULL and parent value
2605 		 * will never be used in the following code. The check
2606 		 * in tcf_fill_node prevents it. However, compiler does not
2607 		 * see that far, so set parent to zero to silence the warning
2608 		 * about parent being uninitialized.
2609 		 */
2610 		parent = 0;
2611 	} else {
2612 		const struct Qdisc_class_ops *cops;
2613 		struct net_device *dev;
2614 		unsigned long cl = 0;
2615 
2616 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2617 		if (!dev)
2618 			return skb->len;
2619 
2620 		parent = tcm->tcm_parent;
2621 		if (!parent)
2622 			q = rtnl_dereference(dev->qdisc);
2623 		else
2624 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2625 		if (!q)
2626 			goto out;
2627 		cops = q->ops->cl_ops;
2628 		if (!cops)
2629 			goto out;
2630 		if (!cops->tcf_block)
2631 			goto out;
2632 		if (TC_H_MIN(tcm->tcm_parent)) {
2633 			cl = cops->find(q, tcm->tcm_parent);
2634 			if (cl == 0)
2635 				goto out;
2636 		}
2637 		block = cops->tcf_block(q, cl, NULL);
2638 		if (!block)
2639 			goto out;
2640 		parent = block->classid;
2641 		if (tcf_block_shared(block))
2642 			q = NULL;
2643 	}
2644 
2645 	index_start = cb->args[0];
2646 	index = 0;
2647 
2648 	for (chain = __tcf_get_next_chain(block, NULL);
2649 	     chain;
2650 	     chain_prev = chain,
2651 		     chain = __tcf_get_next_chain(block, chain),
2652 		     tcf_chain_put(chain_prev)) {
2653 		if (tca[TCA_CHAIN] &&
2654 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2655 			continue;
2656 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2657 				    index_start, &index, terse_dump)) {
2658 			tcf_chain_put(chain);
2659 			err = -EMSGSIZE;
2660 			break;
2661 		}
2662 	}
2663 
2664 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2665 		tcf_block_refcnt_put(block, true);
2666 	cb->args[0] = index;
2667 
2668 out:
2669 	/* If we did no progress, the error (EMSGSIZE) is real */
2670 	if (skb->len == 0 && err)
2671 		return err;
2672 	return skb->len;
2673 }
2674 
2675 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2676 			      void *tmplt_priv, u32 chain_index,
2677 			      struct net *net, struct sk_buff *skb,
2678 			      struct tcf_block *block,
2679 			      u32 portid, u32 seq, u16 flags, int event,
2680 			      struct netlink_ext_ack *extack)
2681 {
2682 	unsigned char *b = skb_tail_pointer(skb);
2683 	const struct tcf_proto_ops *ops;
2684 	struct nlmsghdr *nlh;
2685 	struct tcmsg *tcm;
2686 	void *priv;
2687 
2688 	ops = tmplt_ops;
2689 	priv = tmplt_priv;
2690 
2691 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2692 	if (!nlh)
2693 		goto out_nlmsg_trim;
2694 	tcm = nlmsg_data(nlh);
2695 	tcm->tcm_family = AF_UNSPEC;
2696 	tcm->tcm__pad1 = 0;
2697 	tcm->tcm__pad2 = 0;
2698 	tcm->tcm_handle = 0;
2699 	if (block->q) {
2700 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2701 		tcm->tcm_parent = block->q->handle;
2702 	} else {
2703 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2704 		tcm->tcm_block_index = block->index;
2705 	}
2706 
2707 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2708 		goto nla_put_failure;
2709 
2710 	if (ops) {
2711 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2712 			goto nla_put_failure;
2713 		if (ops->tmplt_dump(skb, net, priv) < 0)
2714 			goto nla_put_failure;
2715 	}
2716 
2717 	if (extack && extack->_msg &&
2718 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2719 		goto out_nlmsg_trim;
2720 
2721 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2722 
2723 	return skb->len;
2724 
2725 out_nlmsg_trim:
2726 nla_put_failure:
2727 	nlmsg_trim(skb, b);
2728 	return -EMSGSIZE;
2729 }
2730 
2731 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2732 			   u32 seq, u16 flags, int event, bool unicast,
2733 			   struct netlink_ext_ack *extack)
2734 {
2735 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2736 	struct tcf_block *block = chain->block;
2737 	struct net *net = block->net;
2738 	struct sk_buff *skb;
2739 	int err = 0;
2740 
2741 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2742 	if (!skb)
2743 		return -ENOBUFS;
2744 
2745 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2746 			       chain->index, net, skb, block, portid,
2747 			       seq, flags, event, extack) <= 0) {
2748 		kfree_skb(skb);
2749 		return -EINVAL;
2750 	}
2751 
2752 	if (unicast)
2753 		err = rtnl_unicast(skb, net, portid);
2754 	else
2755 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2756 				     flags & NLM_F_ECHO);
2757 
2758 	return err;
2759 }
2760 
2761 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2762 				  void *tmplt_priv, u32 chain_index,
2763 				  struct tcf_block *block, struct sk_buff *oskb,
2764 				  u32 seq, u16 flags, bool unicast)
2765 {
2766 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2767 	struct net *net = block->net;
2768 	struct sk_buff *skb;
2769 
2770 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2771 	if (!skb)
2772 		return -ENOBUFS;
2773 
2774 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2775 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2776 		kfree_skb(skb);
2777 		return -EINVAL;
2778 	}
2779 
2780 	if (unicast)
2781 		return rtnl_unicast(skb, net, portid);
2782 
2783 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2784 }
2785 
2786 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2787 			      struct nlattr **tca,
2788 			      struct netlink_ext_ack *extack)
2789 {
2790 	const struct tcf_proto_ops *ops;
2791 	char name[IFNAMSIZ];
2792 	void *tmplt_priv;
2793 
2794 	/* If kind is not set, user did not specify template. */
2795 	if (!tca[TCA_KIND])
2796 		return 0;
2797 
2798 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2799 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2800 		return -EINVAL;
2801 	}
2802 
2803 	ops = tcf_proto_lookup_ops(name, true, extack);
2804 	if (IS_ERR(ops))
2805 		return PTR_ERR(ops);
2806 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2807 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2808 		return -EOPNOTSUPP;
2809 	}
2810 
2811 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2812 	if (IS_ERR(tmplt_priv)) {
2813 		module_put(ops->owner);
2814 		return PTR_ERR(tmplt_priv);
2815 	}
2816 	chain->tmplt_ops = ops;
2817 	chain->tmplt_priv = tmplt_priv;
2818 	return 0;
2819 }
2820 
2821 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2822 			       void *tmplt_priv)
2823 {
2824 	/* If template ops are set, no work to do for us. */
2825 	if (!tmplt_ops)
2826 		return;
2827 
2828 	tmplt_ops->tmplt_destroy(tmplt_priv);
2829 	module_put(tmplt_ops->owner);
2830 }
2831 
2832 /* Add/delete/get a chain */
2833 
2834 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2835 			struct netlink_ext_ack *extack)
2836 {
2837 	struct net *net = sock_net(skb->sk);
2838 	struct nlattr *tca[TCA_MAX + 1];
2839 	struct tcmsg *t;
2840 	u32 parent;
2841 	u32 chain_index;
2842 	struct Qdisc *q;
2843 	struct tcf_chain *chain;
2844 	struct tcf_block *block;
2845 	unsigned long cl;
2846 	int err;
2847 
2848 replay:
2849 	q = NULL;
2850 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2851 				     rtm_tca_policy, extack);
2852 	if (err < 0)
2853 		return err;
2854 
2855 	t = nlmsg_data(n);
2856 	parent = t->tcm_parent;
2857 	cl = 0;
2858 
2859 	block = tcf_block_find(net, &q, &parent, &cl,
2860 			       t->tcm_ifindex, t->tcm_block_index, extack);
2861 	if (IS_ERR(block))
2862 		return PTR_ERR(block);
2863 
2864 	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2865 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2866 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2867 		err = -EINVAL;
2868 		goto errout_block;
2869 	}
2870 
2871 	mutex_lock(&block->lock);
2872 	chain = tcf_chain_lookup(block, chain_index);
2873 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2874 		if (chain) {
2875 			if (tcf_chain_held_by_acts_only(chain)) {
2876 				/* The chain exists only because there is
2877 				 * some action referencing it.
2878 				 */
2879 				tcf_chain_hold(chain);
2880 			} else {
2881 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2882 				err = -EEXIST;
2883 				goto errout_block_locked;
2884 			}
2885 		} else {
2886 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2887 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2888 				err = -ENOENT;
2889 				goto errout_block_locked;
2890 			}
2891 			chain = tcf_chain_create(block, chain_index);
2892 			if (!chain) {
2893 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2894 				err = -ENOMEM;
2895 				goto errout_block_locked;
2896 			}
2897 		}
2898 	} else {
2899 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2900 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2901 			err = -EINVAL;
2902 			goto errout_block_locked;
2903 		}
2904 		tcf_chain_hold(chain);
2905 	}
2906 
2907 	if (n->nlmsg_type == RTM_NEWCHAIN) {
2908 		/* Modifying chain requires holding parent block lock. In case
2909 		 * the chain was successfully added, take a reference to the
2910 		 * chain. This ensures that an empty chain does not disappear at
2911 		 * the end of this function.
2912 		 */
2913 		tcf_chain_hold(chain);
2914 		chain->explicitly_created = true;
2915 	}
2916 	mutex_unlock(&block->lock);
2917 
2918 	switch (n->nlmsg_type) {
2919 	case RTM_NEWCHAIN:
2920 		err = tc_chain_tmplt_add(chain, net, tca, extack);
2921 		if (err) {
2922 			tcf_chain_put_explicitly_created(chain);
2923 			goto errout;
2924 		}
2925 
2926 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2927 				RTM_NEWCHAIN, false, extack);
2928 		break;
2929 	case RTM_DELCHAIN:
2930 		tfilter_notify_chain(net, skb, block, q, parent, n,
2931 				     chain, RTM_DELTFILTER, extack);
2932 		/* Flush the chain first as the user requested chain removal. */
2933 		tcf_chain_flush(chain, true);
2934 		/* In case the chain was successfully deleted, put a reference
2935 		 * to the chain previously taken during addition.
2936 		 */
2937 		tcf_chain_put_explicitly_created(chain);
2938 		break;
2939 	case RTM_GETCHAIN:
2940 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2941 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
2942 		if (err < 0)
2943 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2944 		break;
2945 	default:
2946 		err = -EOPNOTSUPP;
2947 		NL_SET_ERR_MSG(extack, "Unsupported message type");
2948 		goto errout;
2949 	}
2950 
2951 errout:
2952 	tcf_chain_put(chain);
2953 errout_block:
2954 	tcf_block_release(q, block, true);
2955 	if (err == -EAGAIN)
2956 		/* Replay the request. */
2957 		goto replay;
2958 	return err;
2959 
2960 errout_block_locked:
2961 	mutex_unlock(&block->lock);
2962 	goto errout_block;
2963 }
2964 
2965 /* called with RTNL */
2966 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2967 {
2968 	struct net *net = sock_net(skb->sk);
2969 	struct nlattr *tca[TCA_MAX + 1];
2970 	struct Qdisc *q = NULL;
2971 	struct tcf_block *block;
2972 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2973 	struct tcf_chain *chain;
2974 	long index_start;
2975 	long index;
2976 	int err;
2977 
2978 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2979 		return skb->len;
2980 
2981 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2982 				     rtm_tca_policy, cb->extack);
2983 	if (err)
2984 		return err;
2985 
2986 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2987 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2988 		if (!block)
2989 			goto out;
2990 	} else {
2991 		const struct Qdisc_class_ops *cops;
2992 		struct net_device *dev;
2993 		unsigned long cl = 0;
2994 
2995 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2996 		if (!dev)
2997 			return skb->len;
2998 
2999 		if (!tcm->tcm_parent)
3000 			q = rtnl_dereference(dev->qdisc);
3001 		else
3002 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3003 
3004 		if (!q)
3005 			goto out;
3006 		cops = q->ops->cl_ops;
3007 		if (!cops)
3008 			goto out;
3009 		if (!cops->tcf_block)
3010 			goto out;
3011 		if (TC_H_MIN(tcm->tcm_parent)) {
3012 			cl = cops->find(q, tcm->tcm_parent);
3013 			if (cl == 0)
3014 				goto out;
3015 		}
3016 		block = cops->tcf_block(q, cl, NULL);
3017 		if (!block)
3018 			goto out;
3019 		if (tcf_block_shared(block))
3020 			q = NULL;
3021 	}
3022 
3023 	index_start = cb->args[0];
3024 	index = 0;
3025 
3026 	mutex_lock(&block->lock);
3027 	list_for_each_entry(chain, &block->chain_list, list) {
3028 		if ((tca[TCA_CHAIN] &&
3029 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3030 			continue;
3031 		if (index < index_start) {
3032 			index++;
3033 			continue;
3034 		}
3035 		if (tcf_chain_held_by_acts_only(chain))
3036 			continue;
3037 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3038 					 chain->index, net, skb, block,
3039 					 NETLINK_CB(cb->skb).portid,
3040 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3041 					 RTM_NEWCHAIN, NULL);
3042 		if (err <= 0)
3043 			break;
3044 		index++;
3045 	}
3046 	mutex_unlock(&block->lock);
3047 
3048 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3049 		tcf_block_refcnt_put(block, true);
3050 	cb->args[0] = index;
3051 
3052 out:
3053 	/* If we did no progress, the error (EMSGSIZE) is real */
3054 	if (skb->len == 0 && err)
3055 		return err;
3056 	return skb->len;
3057 }
3058 
3059 void tcf_exts_destroy(struct tcf_exts *exts)
3060 {
3061 #ifdef CONFIG_NET_CLS_ACT
3062 	if (exts->actions) {
3063 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3064 		kfree(exts->actions);
3065 	}
3066 	exts->nr_actions = 0;
3067 #endif
3068 }
3069 EXPORT_SYMBOL(tcf_exts_destroy);
3070 
3071 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3072 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3073 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3074 {
3075 #ifdef CONFIG_NET_CLS_ACT
3076 	{
3077 		int init_res[TCA_ACT_MAX_PRIO] = {};
3078 		struct tc_action *act;
3079 		size_t attr_size = 0;
3080 
3081 		if (exts->police && tb[exts->police]) {
3082 			struct tc_action_ops *a_o;
3083 
3084 			a_o = tc_action_load_ops(tb[exts->police], true,
3085 						 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3086 						 extack);
3087 			if (IS_ERR(a_o))
3088 				return PTR_ERR(a_o);
3089 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3090 			act = tcf_action_init_1(net, tp, tb[exts->police],
3091 						rate_tlv, a_o, init_res, flags,
3092 						extack);
3093 			module_put(a_o->owner);
3094 			if (IS_ERR(act))
3095 				return PTR_ERR(act);
3096 
3097 			act->type = exts->type = TCA_OLD_COMPAT;
3098 			exts->actions[0] = act;
3099 			exts->nr_actions = 1;
3100 			tcf_idr_insert_many(exts->actions);
3101 		} else if (exts->action && tb[exts->action]) {
3102 			int err;
3103 
3104 			flags |= TCA_ACT_FLAGS_BIND;
3105 			err = tcf_action_init(net, tp, tb[exts->action],
3106 					      rate_tlv, exts->actions, init_res,
3107 					      &attr_size, flags, fl_flags,
3108 					      extack);
3109 			if (err < 0)
3110 				return err;
3111 			exts->nr_actions = err;
3112 		}
3113 	}
3114 #else
3115 	if ((exts->action && tb[exts->action]) ||
3116 	    (exts->police && tb[exts->police])) {
3117 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3118 		return -EOPNOTSUPP;
3119 	}
3120 #endif
3121 
3122 	return 0;
3123 }
3124 EXPORT_SYMBOL(tcf_exts_validate_ex);
3125 
3126 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3127 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3128 		      u32 flags, struct netlink_ext_ack *extack)
3129 {
3130 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3131 				    flags, 0, extack);
3132 }
3133 EXPORT_SYMBOL(tcf_exts_validate);
3134 
3135 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3136 {
3137 #ifdef CONFIG_NET_CLS_ACT
3138 	struct tcf_exts old = *dst;
3139 
3140 	*dst = *src;
3141 	tcf_exts_destroy(&old);
3142 #endif
3143 }
3144 EXPORT_SYMBOL(tcf_exts_change);
3145 
3146 #ifdef CONFIG_NET_CLS_ACT
3147 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3148 {
3149 	if (exts->nr_actions == 0)
3150 		return NULL;
3151 	else
3152 		return exts->actions[0];
3153 }
3154 #endif
3155 
3156 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3157 {
3158 #ifdef CONFIG_NET_CLS_ACT
3159 	struct nlattr *nest;
3160 
3161 	if (exts->action && tcf_exts_has_actions(exts)) {
3162 		/*
3163 		 * again for backward compatible mode - we want
3164 		 * to work with both old and new modes of entering
3165 		 * tc data even if iproute2  was newer - jhs
3166 		 */
3167 		if (exts->type != TCA_OLD_COMPAT) {
3168 			nest = nla_nest_start_noflag(skb, exts->action);
3169 			if (nest == NULL)
3170 				goto nla_put_failure;
3171 
3172 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3173 			    < 0)
3174 				goto nla_put_failure;
3175 			nla_nest_end(skb, nest);
3176 		} else if (exts->police) {
3177 			struct tc_action *act = tcf_exts_first_act(exts);
3178 			nest = nla_nest_start_noflag(skb, exts->police);
3179 			if (nest == NULL || !act)
3180 				goto nla_put_failure;
3181 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3182 				goto nla_put_failure;
3183 			nla_nest_end(skb, nest);
3184 		}
3185 	}
3186 	return 0;
3187 
3188 nla_put_failure:
3189 	nla_nest_cancel(skb, nest);
3190 	return -1;
3191 #else
3192 	return 0;
3193 #endif
3194 }
3195 EXPORT_SYMBOL(tcf_exts_dump);
3196 
3197 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3198 {
3199 #ifdef CONFIG_NET_CLS_ACT
3200 	struct nlattr *nest;
3201 
3202 	if (!exts->action || !tcf_exts_has_actions(exts))
3203 		return 0;
3204 
3205 	nest = nla_nest_start_noflag(skb, exts->action);
3206 	if (!nest)
3207 		goto nla_put_failure;
3208 
3209 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3210 		goto nla_put_failure;
3211 	nla_nest_end(skb, nest);
3212 	return 0;
3213 
3214 nla_put_failure:
3215 	nla_nest_cancel(skb, nest);
3216 	return -1;
3217 #else
3218 	return 0;
3219 #endif
3220 }
3221 EXPORT_SYMBOL(tcf_exts_terse_dump);
3222 
3223 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3224 {
3225 #ifdef CONFIG_NET_CLS_ACT
3226 	struct tc_action *a = tcf_exts_first_act(exts);
3227 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3228 		return -1;
3229 #endif
3230 	return 0;
3231 }
3232 EXPORT_SYMBOL(tcf_exts_dump_stats);
3233 
3234 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3235 {
3236 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3237 		return;
3238 	*flags |= TCA_CLS_FLAGS_IN_HW;
3239 	atomic_inc(&block->offloadcnt);
3240 }
3241 
3242 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3243 {
3244 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3245 		return;
3246 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3247 	atomic_dec(&block->offloadcnt);
3248 }
3249 
3250 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3251 				      struct tcf_proto *tp, u32 *cnt,
3252 				      u32 *flags, u32 diff, bool add)
3253 {
3254 	lockdep_assert_held(&block->cb_lock);
3255 
3256 	spin_lock(&tp->lock);
3257 	if (add) {
3258 		if (!*cnt)
3259 			tcf_block_offload_inc(block, flags);
3260 		*cnt += diff;
3261 	} else {
3262 		*cnt -= diff;
3263 		if (!*cnt)
3264 			tcf_block_offload_dec(block, flags);
3265 	}
3266 	spin_unlock(&tp->lock);
3267 }
3268 
3269 static void
3270 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3271 			 u32 *cnt, u32 *flags)
3272 {
3273 	lockdep_assert_held(&block->cb_lock);
3274 
3275 	spin_lock(&tp->lock);
3276 	tcf_block_offload_dec(block, flags);
3277 	*cnt = 0;
3278 	spin_unlock(&tp->lock);
3279 }
3280 
3281 static int
3282 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3283 		   void *type_data, bool err_stop)
3284 {
3285 	struct flow_block_cb *block_cb;
3286 	int ok_count = 0;
3287 	int err;
3288 
3289 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3290 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3291 		if (err) {
3292 			if (err_stop)
3293 				return err;
3294 		} else {
3295 			ok_count++;
3296 		}
3297 	}
3298 	return ok_count;
3299 }
3300 
3301 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3302 		     void *type_data, bool err_stop, bool rtnl_held)
3303 {
3304 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3305 	int ok_count;
3306 
3307 retry:
3308 	if (take_rtnl)
3309 		rtnl_lock();
3310 	down_read(&block->cb_lock);
3311 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3312 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3313 	 * obtain the locks in same order here.
3314 	 */
3315 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3316 		up_read(&block->cb_lock);
3317 		take_rtnl = true;
3318 		goto retry;
3319 	}
3320 
3321 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3322 
3323 	up_read(&block->cb_lock);
3324 	if (take_rtnl)
3325 		rtnl_unlock();
3326 	return ok_count;
3327 }
3328 EXPORT_SYMBOL(tc_setup_cb_call);
3329 
3330 /* Non-destructive filter add. If filter that wasn't already in hardware is
3331  * successfully offloaded, increment block offloads counter. On failure,
3332  * previously offloaded filter is considered to be intact and offloads counter
3333  * is not decremented.
3334  */
3335 
3336 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3337 		    enum tc_setup_type type, void *type_data, bool err_stop,
3338 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3339 {
3340 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3341 	int ok_count;
3342 
3343 retry:
3344 	if (take_rtnl)
3345 		rtnl_lock();
3346 	down_read(&block->cb_lock);
3347 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3348 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3349 	 * obtain the locks in same order here.
3350 	 */
3351 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3352 		up_read(&block->cb_lock);
3353 		take_rtnl = true;
3354 		goto retry;
3355 	}
3356 
3357 	/* Make sure all netdevs sharing this block are offload-capable. */
3358 	if (block->nooffloaddevcnt && err_stop) {
3359 		ok_count = -EOPNOTSUPP;
3360 		goto err_unlock;
3361 	}
3362 
3363 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3364 	if (ok_count < 0)
3365 		goto err_unlock;
3366 
3367 	if (tp->ops->hw_add)
3368 		tp->ops->hw_add(tp, type_data);
3369 	if (ok_count > 0)
3370 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3371 					  ok_count, true);
3372 err_unlock:
3373 	up_read(&block->cb_lock);
3374 	if (take_rtnl)
3375 		rtnl_unlock();
3376 	return min(ok_count, 0);
3377 }
3378 EXPORT_SYMBOL(tc_setup_cb_add);
3379 
3380 /* Destructive filter replace. If filter that wasn't already in hardware is
3381  * successfully offloaded, increment block offload counter. On failure,
3382  * previously offloaded filter is considered to be destroyed and offload counter
3383  * is decremented.
3384  */
3385 
3386 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3387 			enum tc_setup_type type, void *type_data, bool err_stop,
3388 			u32 *old_flags, unsigned int *old_in_hw_count,
3389 			u32 *new_flags, unsigned int *new_in_hw_count,
3390 			bool rtnl_held)
3391 {
3392 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3393 	int ok_count;
3394 
3395 retry:
3396 	if (take_rtnl)
3397 		rtnl_lock();
3398 	down_read(&block->cb_lock);
3399 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3400 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3401 	 * obtain the locks in same order here.
3402 	 */
3403 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3404 		up_read(&block->cb_lock);
3405 		take_rtnl = true;
3406 		goto retry;
3407 	}
3408 
3409 	/* Make sure all netdevs sharing this block are offload-capable. */
3410 	if (block->nooffloaddevcnt && err_stop) {
3411 		ok_count = -EOPNOTSUPP;
3412 		goto err_unlock;
3413 	}
3414 
3415 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3416 	if (tp->ops->hw_del)
3417 		tp->ops->hw_del(tp, type_data);
3418 
3419 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3420 	if (ok_count < 0)
3421 		goto err_unlock;
3422 
3423 	if (tp->ops->hw_add)
3424 		tp->ops->hw_add(tp, type_data);
3425 	if (ok_count > 0)
3426 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3427 					  new_flags, ok_count, true);
3428 err_unlock:
3429 	up_read(&block->cb_lock);
3430 	if (take_rtnl)
3431 		rtnl_unlock();
3432 	return min(ok_count, 0);
3433 }
3434 EXPORT_SYMBOL(tc_setup_cb_replace);
3435 
3436 /* Destroy filter and decrement block offload counter, if filter was previously
3437  * offloaded.
3438  */
3439 
3440 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3441 			enum tc_setup_type type, void *type_data, bool err_stop,
3442 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3443 {
3444 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3445 	int ok_count;
3446 
3447 retry:
3448 	if (take_rtnl)
3449 		rtnl_lock();
3450 	down_read(&block->cb_lock);
3451 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3452 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3453 	 * obtain the locks in same order here.
3454 	 */
3455 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3456 		up_read(&block->cb_lock);
3457 		take_rtnl = true;
3458 		goto retry;
3459 	}
3460 
3461 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3462 
3463 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3464 	if (tp->ops->hw_del)
3465 		tp->ops->hw_del(tp, type_data);
3466 
3467 	up_read(&block->cb_lock);
3468 	if (take_rtnl)
3469 		rtnl_unlock();
3470 	return min(ok_count, 0);
3471 }
3472 EXPORT_SYMBOL(tc_setup_cb_destroy);
3473 
3474 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3475 			  bool add, flow_setup_cb_t *cb,
3476 			  enum tc_setup_type type, void *type_data,
3477 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3478 {
3479 	int err = cb(type, type_data, cb_priv);
3480 
3481 	if (err) {
3482 		if (add && tc_skip_sw(*flags))
3483 			return err;
3484 	} else {
3485 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3486 					  add);
3487 	}
3488 
3489 	return 0;
3490 }
3491 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3492 
3493 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3494 			      const struct tc_action *act)
3495 {
3496 	struct tc_cookie *cookie;
3497 	int err = 0;
3498 
3499 	rcu_read_lock();
3500 	cookie = rcu_dereference(act->act_cookie);
3501 	if (cookie) {
3502 		entry->cookie = flow_action_cookie_create(cookie->data,
3503 							  cookie->len,
3504 							  GFP_ATOMIC);
3505 		if (!entry->cookie)
3506 			err = -ENOMEM;
3507 	}
3508 	rcu_read_unlock();
3509 	return err;
3510 }
3511 
3512 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3513 {
3514 	flow_action_cookie_destroy(entry->cookie);
3515 }
3516 
3517 void tc_cleanup_offload_action(struct flow_action *flow_action)
3518 {
3519 	struct flow_action_entry *entry;
3520 	int i;
3521 
3522 	flow_action_for_each(i, entry, flow_action) {
3523 		tcf_act_put_cookie(entry);
3524 		if (entry->destructor)
3525 			entry->destructor(entry->destructor_priv);
3526 	}
3527 }
3528 EXPORT_SYMBOL(tc_cleanup_offload_action);
3529 
3530 static int tc_setup_offload_act(struct tc_action *act,
3531 				struct flow_action_entry *entry,
3532 				u32 *index_inc,
3533 				struct netlink_ext_ack *extack)
3534 {
3535 #ifdef CONFIG_NET_CLS_ACT
3536 	if (act->ops->offload_act_setup) {
3537 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3538 						   extack);
3539 	} else {
3540 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3541 		return -EOPNOTSUPP;
3542 	}
3543 #else
3544 	return 0;
3545 #endif
3546 }
3547 
3548 int tc_setup_action(struct flow_action *flow_action,
3549 		    struct tc_action *actions[],
3550 		    struct netlink_ext_ack *extack)
3551 {
3552 	int i, j, k, index, err = 0;
3553 	struct tc_action *act;
3554 
3555 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3556 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3557 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3558 
3559 	if (!actions)
3560 		return 0;
3561 
3562 	j = 0;
3563 	tcf_act_for_each_action(i, act, actions) {
3564 		struct flow_action_entry *entry;
3565 
3566 		entry = &flow_action->entries[j];
3567 		spin_lock_bh(&act->tcfa_lock);
3568 		err = tcf_act_get_cookie(entry, act);
3569 		if (err)
3570 			goto err_out_locked;
3571 
3572 		index = 0;
3573 		err = tc_setup_offload_act(act, entry, &index, extack);
3574 		if (err)
3575 			goto err_out_locked;
3576 
3577 		for (k = 0; k < index ; k++) {
3578 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3579 			entry[k].hw_index = act->tcfa_index;
3580 			entry[k].act_cookie = (unsigned long)act;
3581 		}
3582 
3583 		j += index;
3584 
3585 		spin_unlock_bh(&act->tcfa_lock);
3586 	}
3587 
3588 err_out:
3589 	if (err)
3590 		tc_cleanup_offload_action(flow_action);
3591 
3592 	return err;
3593 err_out_locked:
3594 	spin_unlock_bh(&act->tcfa_lock);
3595 	goto err_out;
3596 }
3597 
3598 int tc_setup_offload_action(struct flow_action *flow_action,
3599 			    const struct tcf_exts *exts,
3600 			    struct netlink_ext_ack *extack)
3601 {
3602 #ifdef CONFIG_NET_CLS_ACT
3603 	if (!exts)
3604 		return 0;
3605 
3606 	return tc_setup_action(flow_action, exts->actions, extack);
3607 #else
3608 	return 0;
3609 #endif
3610 }
3611 EXPORT_SYMBOL(tc_setup_offload_action);
3612 
3613 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3614 {
3615 	unsigned int num_acts = 0;
3616 	struct tc_action *act;
3617 	int i;
3618 
3619 	tcf_exts_for_each_action(i, act, exts) {
3620 		if (is_tcf_pedit(act))
3621 			num_acts += tcf_pedit_nkeys(act);
3622 		else
3623 			num_acts++;
3624 	}
3625 	return num_acts;
3626 }
3627 EXPORT_SYMBOL(tcf_exts_num_actions);
3628 
3629 #ifdef CONFIG_NET_CLS_ACT
3630 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3631 					u32 *p_block_index,
3632 					struct netlink_ext_ack *extack)
3633 {
3634 	*p_block_index = nla_get_u32(block_index_attr);
3635 	if (!*p_block_index) {
3636 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3637 		return -EINVAL;
3638 	}
3639 
3640 	return 0;
3641 }
3642 
3643 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3644 		    enum flow_block_binder_type binder_type,
3645 		    struct nlattr *block_index_attr,
3646 		    struct netlink_ext_ack *extack)
3647 {
3648 	u32 block_index;
3649 	int err;
3650 
3651 	if (!block_index_attr)
3652 		return 0;
3653 
3654 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3655 	if (err)
3656 		return err;
3657 
3658 	qe->info.binder_type = binder_type;
3659 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3660 	qe->info.chain_head_change_priv = &qe->filter_chain;
3661 	qe->info.block_index = block_index;
3662 
3663 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3664 }
3665 EXPORT_SYMBOL(tcf_qevent_init);
3666 
3667 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3668 {
3669 	if (qe->info.block_index)
3670 		tcf_block_put_ext(qe->block, sch, &qe->info);
3671 }
3672 EXPORT_SYMBOL(tcf_qevent_destroy);
3673 
3674 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3675 			       struct netlink_ext_ack *extack)
3676 {
3677 	u32 block_index;
3678 	int err;
3679 
3680 	if (!block_index_attr)
3681 		return 0;
3682 
3683 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3684 	if (err)
3685 		return err;
3686 
3687 	/* Bounce newly-configured block or change in block. */
3688 	if (block_index != qe->info.block_index) {
3689 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3690 		return -EINVAL;
3691 	}
3692 
3693 	return 0;
3694 }
3695 EXPORT_SYMBOL(tcf_qevent_validate_change);
3696 
3697 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3698 				  struct sk_buff **to_free, int *ret)
3699 {
3700 	struct tcf_result cl_res;
3701 	struct tcf_proto *fl;
3702 
3703 	if (!qe->info.block_index)
3704 		return skb;
3705 
3706 	fl = rcu_dereference_bh(qe->filter_chain);
3707 
3708 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3709 	case TC_ACT_SHOT:
3710 		qdisc_qstats_drop(sch);
3711 		__qdisc_drop(skb, to_free);
3712 		*ret = __NET_XMIT_BYPASS;
3713 		return NULL;
3714 	case TC_ACT_STOLEN:
3715 	case TC_ACT_QUEUED:
3716 	case TC_ACT_TRAP:
3717 		__qdisc_drop(skb, to_free);
3718 		*ret = __NET_XMIT_STOLEN;
3719 		return NULL;
3720 	case TC_ACT_REDIRECT:
3721 		skb_do_redirect(skb);
3722 		*ret = __NET_XMIT_STOLEN;
3723 		return NULL;
3724 	}
3725 
3726 	return skb;
3727 }
3728 EXPORT_SYMBOL(tcf_qevent_handle);
3729 
3730 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3731 {
3732 	if (!qe->info.block_index)
3733 		return 0;
3734 	return nla_put_u32(skb, attr_name, qe->info.block_index);
3735 }
3736 EXPORT_SYMBOL(tcf_qevent_dump);
3737 #endif
3738 
3739 static __net_init int tcf_net_init(struct net *net)
3740 {
3741 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3742 
3743 	spin_lock_init(&tn->idr_lock);
3744 	idr_init(&tn->idr);
3745 	return 0;
3746 }
3747 
3748 static void __net_exit tcf_net_exit(struct net *net)
3749 {
3750 	struct tcf_net *tn = net_generic(net, tcf_net_id);
3751 
3752 	idr_destroy(&tn->idr);
3753 }
3754 
3755 static struct pernet_operations tcf_net_ops = {
3756 	.init = tcf_net_init,
3757 	.exit = tcf_net_exit,
3758 	.id   = &tcf_net_id,
3759 	.size = sizeof(struct tcf_net),
3760 };
3761 
3762 static int __init tc_filter_init(void)
3763 {
3764 	int err;
3765 
3766 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3767 	if (!tc_filter_wq)
3768 		return -ENOMEM;
3769 
3770 	err = register_pernet_subsys(&tcf_net_ops);
3771 	if (err)
3772 		goto err_register_pernet_subsys;
3773 
3774 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3775 		      RTNL_FLAG_DOIT_UNLOCKED);
3776 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3777 		      RTNL_FLAG_DOIT_UNLOCKED);
3778 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3779 		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3780 	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3781 	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3782 	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3783 		      tc_dump_chain, 0);
3784 
3785 	return 0;
3786 
3787 err_register_pernet_subsys:
3788 	destroy_workqueue(tc_filter_wq);
3789 	return err;
3790 }
3791 
3792 subsys_initcall(tc_filter_init);
3793