xref: /openbmc/linux/net/sched/act_api.c (revision fca3aa16)
1 /*
2  * net/sched/act_api.c	Packet action API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Author:	Jamal Hadi Salim
10  *
11  *
12  */
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <linux/rhashtable.h>
25 #include <linux/list.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/sch_generic.h>
29 #include <net/pkt_cls.h>
30 #include <net/act_api.h>
31 #include <net/netlink.h>
32 
33 static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
34 {
35 	u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
36 
37 	if (!tp)
38 		return -EINVAL;
39 	a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true);
40 	if (!a->goto_chain)
41 		return -ENOMEM;
42 	return 0;
43 }
44 
45 static void tcf_action_goto_chain_fini(struct tc_action *a)
46 {
47 	tcf_chain_put(a->goto_chain);
48 }
49 
50 static void tcf_action_goto_chain_exec(const struct tc_action *a,
51 				       struct tcf_result *res)
52 {
53 	const struct tcf_chain *chain = a->goto_chain;
54 
55 	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
56 }
57 
58 /* XXX: For standalone actions, we don't need a RCU grace period either, because
59  * actions are always connected to filters and filters are already destroyed in
60  * RCU callbacks, so after a RCU grace period actions are already disconnected
61  * from filters. Readers later can not find us.
62  */
63 static void free_tcf(struct tc_action *p)
64 {
65 	free_percpu(p->cpu_bstats);
66 	free_percpu(p->cpu_qstats);
67 
68 	if (p->act_cookie) {
69 		kfree(p->act_cookie->data);
70 		kfree(p->act_cookie);
71 	}
72 	if (p->goto_chain)
73 		tcf_action_goto_chain_fini(p);
74 
75 	kfree(p);
76 }
77 
78 static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
79 {
80 	spin_lock_bh(&idrinfo->lock);
81 	idr_remove(&idrinfo->action_idr, p->tcfa_index);
82 	spin_unlock_bh(&idrinfo->lock);
83 	gen_kill_estimator(&p->tcfa_rate_est);
84 	free_tcf(p);
85 }
86 
87 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
88 {
89 	int ret = 0;
90 
91 	ASSERT_RTNL();
92 
93 	if (p) {
94 		if (bind)
95 			p->tcfa_bindcnt--;
96 		else if (strict && p->tcfa_bindcnt > 0)
97 			return -EPERM;
98 
99 		p->tcfa_refcnt--;
100 		if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
101 			if (p->ops->cleanup)
102 				p->ops->cleanup(p);
103 			tcf_idr_remove(p->idrinfo, p);
104 			ret = ACT_P_DELETED;
105 		}
106 	}
107 
108 	return ret;
109 }
110 EXPORT_SYMBOL(__tcf_idr_release);
111 
112 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
113 {
114 	u32 cookie_len = 0;
115 
116 	if (act->act_cookie)
117 		cookie_len = nla_total_size(act->act_cookie->len);
118 
119 	return  nla_total_size(0) /* action number nested */
120 		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
121 		+ cookie_len /* TCA_ACT_COOKIE */
122 		+ nla_total_size(0) /* TCA_ACT_STATS nested */
123 		/* TCA_STATS_BASIC */
124 		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
125 		/* TCA_STATS_QUEUE */
126 		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
127 		+ nla_total_size(0) /* TCA_OPTIONS nested */
128 		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
129 }
130 
131 static size_t tcf_action_full_attrs_size(size_t sz)
132 {
133 	return NLMSG_HDRLEN                     /* struct nlmsghdr */
134 		+ sizeof(struct tcamsg)
135 		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
136 		+ sz;
137 }
138 
139 static size_t tcf_action_fill_size(const struct tc_action *act)
140 {
141 	size_t sz = tcf_action_shared_attrs_size(act);
142 
143 	if (act->ops->get_fill_size)
144 		return act->ops->get_fill_size(act) + sz;
145 	return sz;
146 }
147 
148 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
149 			   struct netlink_callback *cb)
150 {
151 	int err = 0, index = -1, s_i = 0, n_i = 0;
152 	u32 act_flags = cb->args[2];
153 	unsigned long jiffy_since = cb->args[3];
154 	struct nlattr *nest;
155 	struct idr *idr = &idrinfo->action_idr;
156 	struct tc_action *p;
157 	unsigned long id = 1;
158 
159 	spin_lock_bh(&idrinfo->lock);
160 
161 	s_i = cb->args[0];
162 
163 	idr_for_each_entry_ul(idr, p, id) {
164 		index++;
165 		if (index < s_i)
166 			continue;
167 
168 		if (jiffy_since &&
169 		    time_after(jiffy_since,
170 			       (unsigned long)p->tcfa_tm.lastuse))
171 			continue;
172 
173 		nest = nla_nest_start(skb, n_i);
174 		if (!nest) {
175 			index--;
176 			goto nla_put_failure;
177 		}
178 		err = tcf_action_dump_1(skb, p, 0, 0);
179 		if (err < 0) {
180 			index--;
181 			nlmsg_trim(skb, nest);
182 			goto done;
183 		}
184 		nla_nest_end(skb, nest);
185 		n_i++;
186 		if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
187 		    n_i >= TCA_ACT_MAX_PRIO)
188 			goto done;
189 	}
190 done:
191 	if (index >= 0)
192 		cb->args[0] = index + 1;
193 
194 	spin_unlock_bh(&idrinfo->lock);
195 	if (n_i) {
196 		if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
197 			cb->args[1] = n_i;
198 	}
199 	return n_i;
200 
201 nla_put_failure:
202 	nla_nest_cancel(skb, nest);
203 	goto done;
204 }
205 
206 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
207 			  const struct tc_action_ops *ops)
208 {
209 	struct nlattr *nest;
210 	int n_i = 0;
211 	int ret = -EINVAL;
212 	struct idr *idr = &idrinfo->action_idr;
213 	struct tc_action *p;
214 	unsigned long id = 1;
215 
216 	nest = nla_nest_start(skb, 0);
217 	if (nest == NULL)
218 		goto nla_put_failure;
219 	if (nla_put_string(skb, TCA_KIND, ops->kind))
220 		goto nla_put_failure;
221 
222 	idr_for_each_entry_ul(idr, p, id) {
223 		ret = __tcf_idr_release(p, false, true);
224 		if (ret == ACT_P_DELETED) {
225 			module_put(ops->owner);
226 			n_i++;
227 		} else if (ret < 0) {
228 			goto nla_put_failure;
229 		}
230 	}
231 	if (nla_put_u32(skb, TCA_FCNT, n_i))
232 		goto nla_put_failure;
233 	nla_nest_end(skb, nest);
234 
235 	return n_i;
236 nla_put_failure:
237 	nla_nest_cancel(skb, nest);
238 	return ret;
239 }
240 
241 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
242 		       struct netlink_callback *cb, int type,
243 		       const struct tc_action_ops *ops,
244 		       struct netlink_ext_ack *extack)
245 {
246 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
247 
248 	if (type == RTM_DELACTION) {
249 		return tcf_del_walker(idrinfo, skb, ops);
250 	} else if (type == RTM_GETACTION) {
251 		return tcf_dump_walker(idrinfo, skb, cb);
252 	} else {
253 		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
254 		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
255 		return -EINVAL;
256 	}
257 }
258 EXPORT_SYMBOL(tcf_generic_walker);
259 
260 static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
261 {
262 	struct tc_action *p = NULL;
263 
264 	spin_lock_bh(&idrinfo->lock);
265 	p = idr_find(&idrinfo->action_idr, index);
266 	spin_unlock_bh(&idrinfo->lock);
267 
268 	return p;
269 }
270 
271 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
272 {
273 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
274 	struct tc_action *p = tcf_idr_lookup(index, idrinfo);
275 
276 	if (p) {
277 		*a = p;
278 		return 1;
279 	}
280 	return 0;
281 }
282 EXPORT_SYMBOL(tcf_idr_search);
283 
284 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
285 		   int bind)
286 {
287 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
288 	struct tc_action *p = tcf_idr_lookup(index, idrinfo);
289 
290 	if (index && p) {
291 		if (bind)
292 			p->tcfa_bindcnt++;
293 		p->tcfa_refcnt++;
294 		*a = p;
295 		return true;
296 	}
297 	return false;
298 }
299 EXPORT_SYMBOL(tcf_idr_check);
300 
301 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
302 		   struct tc_action **a, const struct tc_action_ops *ops,
303 		   int bind, bool cpustats)
304 {
305 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
306 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
307 	struct idr *idr = &idrinfo->action_idr;
308 	int err = -ENOMEM;
309 
310 	if (unlikely(!p))
311 		return -ENOMEM;
312 	p->tcfa_refcnt = 1;
313 	if (bind)
314 		p->tcfa_bindcnt = 1;
315 
316 	if (cpustats) {
317 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
318 		if (!p->cpu_bstats)
319 			goto err1;
320 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
321 		if (!p->cpu_qstats)
322 			goto err2;
323 	}
324 	spin_lock_init(&p->tcfa_lock);
325 	idr_preload(GFP_KERNEL);
326 	spin_lock_bh(&idrinfo->lock);
327 	/* user doesn't specify an index */
328 	if (!index) {
329 		index = 1;
330 		err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
331 	} else {
332 		err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
333 	}
334 	spin_unlock_bh(&idrinfo->lock);
335 	idr_preload_end();
336 	if (err)
337 		goto err3;
338 
339 	p->tcfa_index = index;
340 	p->tcfa_tm.install = jiffies;
341 	p->tcfa_tm.lastuse = jiffies;
342 	p->tcfa_tm.firstuse = 0;
343 	if (est) {
344 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
345 					&p->tcfa_rate_est,
346 					&p->tcfa_lock, NULL, est);
347 		if (err)
348 			goto err4;
349 	}
350 
351 	p->idrinfo = idrinfo;
352 	p->ops = ops;
353 	INIT_LIST_HEAD(&p->list);
354 	*a = p;
355 	return 0;
356 err4:
357 	idr_remove(idr, index);
358 err3:
359 	free_percpu(p->cpu_qstats);
360 err2:
361 	free_percpu(p->cpu_bstats);
362 err1:
363 	kfree(p);
364 	return err;
365 }
366 EXPORT_SYMBOL(tcf_idr_create);
367 
368 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
369 {
370 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
371 
372 	spin_lock_bh(&idrinfo->lock);
373 	idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
374 	spin_unlock_bh(&idrinfo->lock);
375 }
376 EXPORT_SYMBOL(tcf_idr_insert);
377 
378 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
379 			 struct tcf_idrinfo *idrinfo)
380 {
381 	struct idr *idr = &idrinfo->action_idr;
382 	struct tc_action *p;
383 	int ret;
384 	unsigned long id = 1;
385 
386 	idr_for_each_entry_ul(idr, p, id) {
387 		ret = __tcf_idr_release(p, false, true);
388 		if (ret == ACT_P_DELETED)
389 			module_put(ops->owner);
390 		else if (ret < 0)
391 			return;
392 	}
393 	idr_destroy(&idrinfo->action_idr);
394 }
395 EXPORT_SYMBOL(tcf_idrinfo_destroy);
396 
397 static LIST_HEAD(act_base);
398 static DEFINE_RWLOCK(act_mod_lock);
399 
400 int tcf_register_action(struct tc_action_ops *act,
401 			struct pernet_operations *ops)
402 {
403 	struct tc_action_ops *a;
404 	int ret;
405 
406 	if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
407 		return -EINVAL;
408 
409 	/* We have to register pernet ops before making the action ops visible,
410 	 * otherwise tcf_action_init_1() could get a partially initialized
411 	 * netns.
412 	 */
413 	ret = register_pernet_subsys(ops);
414 	if (ret)
415 		return ret;
416 
417 	write_lock(&act_mod_lock);
418 	list_for_each_entry(a, &act_base, head) {
419 		if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
420 			write_unlock(&act_mod_lock);
421 			unregister_pernet_subsys(ops);
422 			return -EEXIST;
423 		}
424 	}
425 	list_add_tail(&act->head, &act_base);
426 	write_unlock(&act_mod_lock);
427 
428 	return 0;
429 }
430 EXPORT_SYMBOL(tcf_register_action);
431 
432 int tcf_unregister_action(struct tc_action_ops *act,
433 			  struct pernet_operations *ops)
434 {
435 	struct tc_action_ops *a;
436 	int err = -ENOENT;
437 
438 	write_lock(&act_mod_lock);
439 	list_for_each_entry(a, &act_base, head) {
440 		if (a == act) {
441 			list_del(&act->head);
442 			err = 0;
443 			break;
444 		}
445 	}
446 	write_unlock(&act_mod_lock);
447 	if (!err)
448 		unregister_pernet_subsys(ops);
449 	return err;
450 }
451 EXPORT_SYMBOL(tcf_unregister_action);
452 
453 /* lookup by name */
454 static struct tc_action_ops *tc_lookup_action_n(char *kind)
455 {
456 	struct tc_action_ops *a, *res = NULL;
457 
458 	if (kind) {
459 		read_lock(&act_mod_lock);
460 		list_for_each_entry(a, &act_base, head) {
461 			if (strcmp(kind, a->kind) == 0) {
462 				if (try_module_get(a->owner))
463 					res = a;
464 				break;
465 			}
466 		}
467 		read_unlock(&act_mod_lock);
468 	}
469 	return res;
470 }
471 
472 /* lookup by nlattr */
473 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
474 {
475 	struct tc_action_ops *a, *res = NULL;
476 
477 	if (kind) {
478 		read_lock(&act_mod_lock);
479 		list_for_each_entry(a, &act_base, head) {
480 			if (nla_strcmp(kind, a->kind) == 0) {
481 				if (try_module_get(a->owner))
482 					res = a;
483 				break;
484 			}
485 		}
486 		read_unlock(&act_mod_lock);
487 	}
488 	return res;
489 }
490 
491 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
492 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
493 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
494 		    int nr_actions, struct tcf_result *res)
495 {
496 	u32 jmp_prgcnt = 0;
497 	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
498 	int i;
499 	int ret = TC_ACT_OK;
500 
501 	if (skb_skip_tc_classify(skb))
502 		return TC_ACT_OK;
503 
504 restart_act_graph:
505 	for (i = 0; i < nr_actions; i++) {
506 		const struct tc_action *a = actions[i];
507 
508 		if (jmp_prgcnt > 0) {
509 			jmp_prgcnt -= 1;
510 			continue;
511 		}
512 repeat:
513 		ret = a->ops->act(skb, a, res);
514 		if (ret == TC_ACT_REPEAT)
515 			goto repeat;	/* we need a ttl - JHS */
516 
517 		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
518 			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
519 			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
520 				/* faulty opcode, stop pipeline */
521 				return TC_ACT_OK;
522 			} else {
523 				jmp_ttl -= 1;
524 				if (jmp_ttl > 0)
525 					goto restart_act_graph;
526 				else /* faulty graph, stop pipeline */
527 					return TC_ACT_OK;
528 			}
529 		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
530 			tcf_action_goto_chain_exec(a, res);
531 		}
532 
533 		if (ret != TC_ACT_PIPE)
534 			break;
535 	}
536 
537 	return ret;
538 }
539 EXPORT_SYMBOL(tcf_action_exec);
540 
541 int tcf_action_destroy(struct list_head *actions, int bind)
542 {
543 	const struct tc_action_ops *ops;
544 	struct tc_action *a, *tmp;
545 	int ret = 0;
546 
547 	list_for_each_entry_safe(a, tmp, actions, list) {
548 		ops = a->ops;
549 		ret = __tcf_idr_release(a, bind, true);
550 		if (ret == ACT_P_DELETED)
551 			module_put(ops->owner);
552 		else if (ret < 0)
553 			return ret;
554 	}
555 	return ret;
556 }
557 
558 int
559 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
560 {
561 	return a->ops->dump(skb, a, bind, ref);
562 }
563 
564 int
565 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
566 {
567 	int err = -EINVAL;
568 	unsigned char *b = skb_tail_pointer(skb);
569 	struct nlattr *nest;
570 
571 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
572 		goto nla_put_failure;
573 	if (tcf_action_copy_stats(skb, a, 0))
574 		goto nla_put_failure;
575 	if (a->act_cookie) {
576 		if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
577 			    a->act_cookie->data))
578 			goto nla_put_failure;
579 	}
580 
581 	nest = nla_nest_start(skb, TCA_OPTIONS);
582 	if (nest == NULL)
583 		goto nla_put_failure;
584 	err = tcf_action_dump_old(skb, a, bind, ref);
585 	if (err > 0) {
586 		nla_nest_end(skb, nest);
587 		return err;
588 	}
589 
590 nla_put_failure:
591 	nlmsg_trim(skb, b);
592 	return -1;
593 }
594 EXPORT_SYMBOL(tcf_action_dump_1);
595 
596 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions,
597 		    int bind, int ref)
598 {
599 	struct tc_action *a;
600 	int err = -EINVAL;
601 	struct nlattr *nest;
602 
603 	list_for_each_entry(a, actions, list) {
604 		nest = nla_nest_start(skb, a->order);
605 		if (nest == NULL)
606 			goto nla_put_failure;
607 		err = tcf_action_dump_1(skb, a, bind, ref);
608 		if (err < 0)
609 			goto errout;
610 		nla_nest_end(skb, nest);
611 	}
612 
613 	return 0;
614 
615 nla_put_failure:
616 	err = -EINVAL;
617 errout:
618 	nla_nest_cancel(skb, nest);
619 	return err;
620 }
621 
622 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
623 {
624 	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
625 	if (!c)
626 		return NULL;
627 
628 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
629 	if (!c->data) {
630 		kfree(c);
631 		return NULL;
632 	}
633 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
634 
635 	return c;
636 }
637 
638 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
639 				    struct nlattr *nla, struct nlattr *est,
640 				    char *name, int ovr, int bind,
641 				    struct netlink_ext_ack *extack)
642 {
643 	struct tc_action *a;
644 	struct tc_action_ops *a_o;
645 	struct tc_cookie *cookie = NULL;
646 	char act_name[IFNAMSIZ];
647 	struct nlattr *tb[TCA_ACT_MAX + 1];
648 	struct nlattr *kind;
649 	int err;
650 
651 	if (name == NULL) {
652 		err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
653 		if (err < 0)
654 			goto err_out;
655 		err = -EINVAL;
656 		kind = tb[TCA_ACT_KIND];
657 		if (!kind) {
658 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
659 			goto err_out;
660 		}
661 		if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
662 			NL_SET_ERR_MSG(extack, "TC action name too long");
663 			goto err_out;
664 		}
665 		if (tb[TCA_ACT_COOKIE]) {
666 			int cklen = nla_len(tb[TCA_ACT_COOKIE]);
667 
668 			if (cklen > TC_COOKIE_MAX_SIZE) {
669 				NL_SET_ERR_MSG(extack, "TC cookie size above the maximum");
670 				goto err_out;
671 			}
672 
673 			cookie = nla_memdup_cookie(tb);
674 			if (!cookie) {
675 				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
676 				err = -ENOMEM;
677 				goto err_out;
678 			}
679 		}
680 	} else {
681 		if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
682 			NL_SET_ERR_MSG(extack, "TC action name too long");
683 			err = -EINVAL;
684 			goto err_out;
685 		}
686 	}
687 
688 	a_o = tc_lookup_action_n(act_name);
689 	if (a_o == NULL) {
690 #ifdef CONFIG_MODULES
691 		rtnl_unlock();
692 		request_module("act_%s", act_name);
693 		rtnl_lock();
694 
695 		a_o = tc_lookup_action_n(act_name);
696 
697 		/* We dropped the RTNL semaphore in order to
698 		 * perform the module load.  So, even if we
699 		 * succeeded in loading the module we have to
700 		 * tell the caller to replay the request.  We
701 		 * indicate this using -EAGAIN.
702 		 */
703 		if (a_o != NULL) {
704 			err = -EAGAIN;
705 			goto err_mod;
706 		}
707 #endif
708 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
709 		err = -ENOENT;
710 		goto err_out;
711 	}
712 
713 	/* backward compatibility for policer */
714 	if (name == NULL)
715 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
716 				extack);
717 	else
718 		err = a_o->init(net, nla, est, &a, ovr, bind, extack);
719 	if (err < 0)
720 		goto err_mod;
721 
722 	if (name == NULL && tb[TCA_ACT_COOKIE]) {
723 		if (a->act_cookie) {
724 			kfree(a->act_cookie->data);
725 			kfree(a->act_cookie);
726 		}
727 		a->act_cookie = cookie;
728 	}
729 
730 	/* module count goes up only when brand new policy is created
731 	 * if it exists and is only bound to in a_o->init() then
732 	 * ACT_P_CREATED is not returned (a zero is).
733 	 */
734 	if (err != ACT_P_CREATED)
735 		module_put(a_o->owner);
736 
737 	if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
738 		err = tcf_action_goto_chain_init(a, tp);
739 		if (err) {
740 			LIST_HEAD(actions);
741 
742 			list_add_tail(&a->list, &actions);
743 			tcf_action_destroy(&actions, bind);
744 			NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
745 			return ERR_PTR(err);
746 		}
747 	}
748 
749 	return a;
750 
751 err_mod:
752 	module_put(a_o->owner);
753 err_out:
754 	if (cookie) {
755 		kfree(cookie->data);
756 		kfree(cookie);
757 	}
758 	return ERR_PTR(err);
759 }
760 
761 static void cleanup_a(struct list_head *actions, int ovr)
762 {
763 	struct tc_action *a;
764 
765 	if (!ovr)
766 		return;
767 
768 	list_for_each_entry(a, actions, list)
769 		a->tcfa_refcnt--;
770 }
771 
772 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
773 		    struct nlattr *est, char *name, int ovr, int bind,
774 		    struct list_head *actions, size_t *attr_size,
775 		    struct netlink_ext_ack *extack)
776 {
777 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
778 	struct tc_action *act;
779 	size_t sz = 0;
780 	int err;
781 	int i;
782 
783 	err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
784 	if (err < 0)
785 		return err;
786 
787 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
788 		act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
789 					extack);
790 		if (IS_ERR(act)) {
791 			err = PTR_ERR(act);
792 			goto err;
793 		}
794 		act->order = i;
795 		sz += tcf_action_fill_size(act);
796 		if (ovr)
797 			act->tcfa_refcnt++;
798 		list_add_tail(&act->list, actions);
799 	}
800 
801 	*attr_size = tcf_action_full_attrs_size(sz);
802 
803 	/* Remove the temp refcnt which was necessary to protect against
804 	 * destroying an existing action which was being replaced
805 	 */
806 	cleanup_a(actions, ovr);
807 	return 0;
808 
809 err:
810 	tcf_action_destroy(actions, bind);
811 	return err;
812 }
813 
814 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
815 			  int compat_mode)
816 {
817 	int err = 0;
818 	struct gnet_dump d;
819 
820 	if (p == NULL)
821 		goto errout;
822 
823 	/* compat_mode being true specifies a call that is supposed
824 	 * to add additional backward compatibility statistic TLVs.
825 	 */
826 	if (compat_mode) {
827 		if (p->type == TCA_OLD_COMPAT)
828 			err = gnet_stats_start_copy_compat(skb, 0,
829 							   TCA_STATS,
830 							   TCA_XSTATS,
831 							   &p->tcfa_lock, &d,
832 							   TCA_PAD);
833 		else
834 			return 0;
835 	} else
836 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
837 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
838 
839 	if (err < 0)
840 		goto errout;
841 
842 	if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
843 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
844 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
845 				  &p->tcfa_qstats,
846 				  p->tcfa_qstats.qlen) < 0)
847 		goto errout;
848 
849 	if (gnet_stats_finish_copy(&d) < 0)
850 		goto errout;
851 
852 	return 0;
853 
854 errout:
855 	return -1;
856 }
857 
858 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions,
859 			u32 portid, u32 seq, u16 flags, int event, int bind,
860 			int ref)
861 {
862 	struct tcamsg *t;
863 	struct nlmsghdr *nlh;
864 	unsigned char *b = skb_tail_pointer(skb);
865 	struct nlattr *nest;
866 
867 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
868 	if (!nlh)
869 		goto out_nlmsg_trim;
870 	t = nlmsg_data(nlh);
871 	t->tca_family = AF_UNSPEC;
872 	t->tca__pad1 = 0;
873 	t->tca__pad2 = 0;
874 
875 	nest = nla_nest_start(skb, TCA_ACT_TAB);
876 	if (!nest)
877 		goto out_nlmsg_trim;
878 
879 	if (tcf_action_dump(skb, actions, bind, ref) < 0)
880 		goto out_nlmsg_trim;
881 
882 	nla_nest_end(skb, nest);
883 
884 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
885 	return skb->len;
886 
887 out_nlmsg_trim:
888 	nlmsg_trim(skb, b);
889 	return -1;
890 }
891 
892 static int
893 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
894 	       struct list_head *actions, int event,
895 	       struct netlink_ext_ack *extack)
896 {
897 	struct sk_buff *skb;
898 
899 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
900 	if (!skb)
901 		return -ENOBUFS;
902 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
903 			 0, 0) <= 0) {
904 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
905 		kfree_skb(skb);
906 		return -EINVAL;
907 	}
908 
909 	return rtnl_unicast(skb, net, portid);
910 }
911 
912 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
913 					  struct nlmsghdr *n, u32 portid,
914 					  struct netlink_ext_ack *extack)
915 {
916 	struct nlattr *tb[TCA_ACT_MAX + 1];
917 	const struct tc_action_ops *ops;
918 	struct tc_action *a;
919 	int index;
920 	int err;
921 
922 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
923 	if (err < 0)
924 		goto err_out;
925 
926 	err = -EINVAL;
927 	if (tb[TCA_ACT_INDEX] == NULL ||
928 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
929 		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
930 		goto err_out;
931 	}
932 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
933 
934 	err = -EINVAL;
935 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
936 	if (!ops) { /* could happen in batch of actions */
937 		NL_SET_ERR_MSG(extack, "Specified TC action not found");
938 		goto err_out;
939 	}
940 	err = -ENOENT;
941 	if (ops->lookup(net, &a, index, extack) == 0)
942 		goto err_mod;
943 
944 	module_put(ops->owner);
945 	return a;
946 
947 err_mod:
948 	module_put(ops->owner);
949 err_out:
950 	return ERR_PTR(err);
951 }
952 
953 static int tca_action_flush(struct net *net, struct nlattr *nla,
954 			    struct nlmsghdr *n, u32 portid,
955 			    struct netlink_ext_ack *extack)
956 {
957 	struct sk_buff *skb;
958 	unsigned char *b;
959 	struct nlmsghdr *nlh;
960 	struct tcamsg *t;
961 	struct netlink_callback dcb;
962 	struct nlattr *nest;
963 	struct nlattr *tb[TCA_ACT_MAX + 1];
964 	const struct tc_action_ops *ops;
965 	struct nlattr *kind;
966 	int err = -ENOMEM;
967 
968 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
969 	if (!skb)
970 		return err;
971 
972 	b = skb_tail_pointer(skb);
973 
974 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, extack);
975 	if (err < 0)
976 		goto err_out;
977 
978 	err = -EINVAL;
979 	kind = tb[TCA_ACT_KIND];
980 	ops = tc_lookup_action(kind);
981 	if (!ops) { /*some idjot trying to flush unknown action */
982 		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
983 		goto err_out;
984 	}
985 
986 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
987 			sizeof(*t), 0);
988 	if (!nlh) {
989 		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
990 		goto out_module_put;
991 	}
992 	t = nlmsg_data(nlh);
993 	t->tca_family = AF_UNSPEC;
994 	t->tca__pad1 = 0;
995 	t->tca__pad2 = 0;
996 
997 	nest = nla_nest_start(skb, TCA_ACT_TAB);
998 	if (!nest) {
999 		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1000 		goto out_module_put;
1001 	}
1002 
1003 	err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1004 	if (err <= 0) {
1005 		nla_nest_cancel(skb, nest);
1006 		goto out_module_put;
1007 	}
1008 
1009 	nla_nest_end(skb, nest);
1010 
1011 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1012 	nlh->nlmsg_flags |= NLM_F_ROOT;
1013 	module_put(ops->owner);
1014 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1015 			     n->nlmsg_flags & NLM_F_ECHO);
1016 	if (err > 0)
1017 		return 0;
1018 	if (err < 0)
1019 		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1020 
1021 	return err;
1022 
1023 out_module_put:
1024 	module_put(ops->owner);
1025 err_out:
1026 	kfree_skb(skb);
1027 	return err;
1028 }
1029 
1030 static int
1031 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
1032 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1033 {
1034 	int ret;
1035 	struct sk_buff *skb;
1036 
1037 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1038 			GFP_KERNEL);
1039 	if (!skb)
1040 		return -ENOBUFS;
1041 
1042 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1043 			 0, 1) <= 0) {
1044 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1045 		kfree_skb(skb);
1046 		return -EINVAL;
1047 	}
1048 
1049 	/* now do the delete */
1050 	ret = tcf_action_destroy(actions, 0);
1051 	if (ret < 0) {
1052 		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1053 		kfree_skb(skb);
1054 		return ret;
1055 	}
1056 
1057 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1058 			     n->nlmsg_flags & NLM_F_ECHO);
1059 	if (ret > 0)
1060 		return 0;
1061 	return ret;
1062 }
1063 
1064 static int
1065 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1066 	      u32 portid, int event, struct netlink_ext_ack *extack)
1067 {
1068 	int i, ret;
1069 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1070 	struct tc_action *act;
1071 	size_t attr_size = 0;
1072 	LIST_HEAD(actions);
1073 
1074 	ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
1075 	if (ret < 0)
1076 		return ret;
1077 
1078 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1079 		if (tb[1])
1080 			return tca_action_flush(net, tb[1], n, portid, extack);
1081 
1082 		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1083 		return -EINVAL;
1084 	}
1085 
1086 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1087 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1088 		if (IS_ERR(act)) {
1089 			ret = PTR_ERR(act);
1090 			goto err;
1091 		}
1092 		act->order = i;
1093 		attr_size += tcf_action_fill_size(act);
1094 		list_add_tail(&act->list, &actions);
1095 	}
1096 
1097 	attr_size = tcf_action_full_attrs_size(attr_size);
1098 
1099 	if (event == RTM_GETACTION)
1100 		ret = tcf_get_notify(net, portid, n, &actions, event, extack);
1101 	else { /* delete */
1102 		ret = tcf_del_notify(net, n, &actions, portid, attr_size, extack);
1103 		if (ret)
1104 			goto err;
1105 		return ret;
1106 	}
1107 err:
1108 	if (event != RTM_GETACTION)
1109 		tcf_action_destroy(&actions, 0);
1110 	return ret;
1111 }
1112 
1113 static int
1114 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
1115 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1116 {
1117 	struct sk_buff *skb;
1118 	int err = 0;
1119 
1120 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1121 			GFP_KERNEL);
1122 	if (!skb)
1123 		return -ENOBUFS;
1124 
1125 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1126 			 RTM_NEWACTION, 0, 0) <= 0) {
1127 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1128 		kfree_skb(skb);
1129 		return -EINVAL;
1130 	}
1131 
1132 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1133 			     n->nlmsg_flags & NLM_F_ECHO);
1134 	if (err > 0)
1135 		err = 0;
1136 	return err;
1137 }
1138 
1139 static int tcf_action_add(struct net *net, struct nlattr *nla,
1140 			  struct nlmsghdr *n, u32 portid, int ovr,
1141 			  struct netlink_ext_ack *extack)
1142 {
1143 	size_t attr_size = 0;
1144 	int ret = 0;
1145 	LIST_HEAD(actions);
1146 
1147 	ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions,
1148 			      &attr_size, extack);
1149 	if (ret)
1150 		return ret;
1151 
1152 	return tcf_add_notify(net, n, &actions, portid, attr_size, extack);
1153 }
1154 
1155 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON;
1156 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1157 	[TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32,
1158 			     .validation_data = &tcaa_root_flags_allowed },
1159 	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
1160 };
1161 
1162 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1163 			 struct netlink_ext_ack *extack)
1164 {
1165 	struct net *net = sock_net(skb->sk);
1166 	struct nlattr *tca[TCA_ROOT_MAX + 1];
1167 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1168 	int ret = 0, ovr = 0;
1169 
1170 	if ((n->nlmsg_type != RTM_GETACTION) &&
1171 	    !netlink_capable(skb, CAP_NET_ADMIN))
1172 		return -EPERM;
1173 
1174 	ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL,
1175 			  extack);
1176 	if (ret < 0)
1177 		return ret;
1178 
1179 	if (tca[TCA_ACT_TAB] == NULL) {
1180 		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1181 		return -EINVAL;
1182 	}
1183 
1184 	/* n->nlmsg_flags & NLM_F_CREATE */
1185 	switch (n->nlmsg_type) {
1186 	case RTM_NEWACTION:
1187 		/* we are going to assume all other flags
1188 		 * imply create only if it doesn't exist
1189 		 * Note that CREATE | EXCL implies that
1190 		 * but since we want avoid ambiguity (eg when flags
1191 		 * is zero) then just set this
1192 		 */
1193 		if (n->nlmsg_flags & NLM_F_REPLACE)
1194 			ovr = 1;
1195 replay:
1196 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
1197 				     extack);
1198 		if (ret == -EAGAIN)
1199 			goto replay;
1200 		break;
1201 	case RTM_DELACTION:
1202 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1203 				    portid, RTM_DELACTION, extack);
1204 		break;
1205 	case RTM_GETACTION:
1206 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1207 				    portid, RTM_GETACTION, extack);
1208 		break;
1209 	default:
1210 		BUG();
1211 	}
1212 
1213 	return ret;
1214 }
1215 
1216 static struct nlattr *find_dump_kind(struct nlattr **nla)
1217 {
1218 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1219 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1220 	struct nlattr *kind;
1221 
1222 	tb1 = nla[TCA_ACT_TAB];
1223 	if (tb1 == NULL)
1224 		return NULL;
1225 
1226 	if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1227 		      NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1228 		return NULL;
1229 
1230 	if (tb[1] == NULL)
1231 		return NULL;
1232 	if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
1233 		return NULL;
1234 	kind = tb2[TCA_ACT_KIND];
1235 
1236 	return kind;
1237 }
1238 
1239 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1240 {
1241 	struct net *net = sock_net(skb->sk);
1242 	struct nlmsghdr *nlh;
1243 	unsigned char *b = skb_tail_pointer(skb);
1244 	struct nlattr *nest;
1245 	struct tc_action_ops *a_o;
1246 	int ret = 0;
1247 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1248 	struct nlattr *tb[TCA_ROOT_MAX + 1];
1249 	struct nlattr *count_attr = NULL;
1250 	unsigned long jiffy_since = 0;
1251 	struct nlattr *kind = NULL;
1252 	struct nla_bitfield32 bf;
1253 	u32 msecs_since = 0;
1254 	u32 act_count = 0;
1255 
1256 	ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX,
1257 			  tcaa_policy, NULL);
1258 	if (ret < 0)
1259 		return ret;
1260 
1261 	kind = find_dump_kind(tb);
1262 	if (kind == NULL) {
1263 		pr_info("tc_dump_action: action bad kind\n");
1264 		return 0;
1265 	}
1266 
1267 	a_o = tc_lookup_action(kind);
1268 	if (a_o == NULL)
1269 		return 0;
1270 
1271 	cb->args[2] = 0;
1272 	if (tb[TCA_ROOT_FLAGS]) {
1273 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1274 		cb->args[2] = bf.value;
1275 	}
1276 
1277 	if (tb[TCA_ROOT_TIME_DELTA]) {
1278 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1279 	}
1280 
1281 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1282 			cb->nlh->nlmsg_type, sizeof(*t), 0);
1283 	if (!nlh)
1284 		goto out_module_put;
1285 
1286 	if (msecs_since)
1287 		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1288 
1289 	t = nlmsg_data(nlh);
1290 	t->tca_family = AF_UNSPEC;
1291 	t->tca__pad1 = 0;
1292 	t->tca__pad2 = 0;
1293 	cb->args[3] = jiffy_since;
1294 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1295 	if (!count_attr)
1296 		goto out_module_put;
1297 
1298 	nest = nla_nest_start(skb, TCA_ACT_TAB);
1299 	if (nest == NULL)
1300 		goto out_module_put;
1301 
1302 	ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1303 	if (ret < 0)
1304 		goto out_module_put;
1305 
1306 	if (ret > 0) {
1307 		nla_nest_end(skb, nest);
1308 		ret = skb->len;
1309 		act_count = cb->args[1];
1310 		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1311 		cb->args[1] = 0;
1312 	} else
1313 		nlmsg_trim(skb, b);
1314 
1315 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1316 	if (NETLINK_CB(cb->skb).portid && ret)
1317 		nlh->nlmsg_flags |= NLM_F_MULTI;
1318 	module_put(a_o->owner);
1319 	return skb->len;
1320 
1321 out_module_put:
1322 	module_put(a_o->owner);
1323 	nlmsg_trim(skb, b);
1324 	return skb->len;
1325 }
1326 
1327 struct tcf_action_net {
1328 	struct rhashtable egdev_ht;
1329 };
1330 
1331 static unsigned int tcf_action_net_id;
1332 
1333 struct tcf_action_egdev_cb {
1334 	struct list_head list;
1335 	tc_setup_cb_t *cb;
1336 	void *cb_priv;
1337 };
1338 
1339 struct tcf_action_egdev {
1340 	struct rhash_head ht_node;
1341 	const struct net_device *dev;
1342 	unsigned int refcnt;
1343 	struct list_head cb_list;
1344 };
1345 
1346 static const struct rhashtable_params tcf_action_egdev_ht_params = {
1347 	.key_offset = offsetof(struct tcf_action_egdev, dev),
1348 	.head_offset = offsetof(struct tcf_action_egdev, ht_node),
1349 	.key_len = sizeof(const struct net_device *),
1350 };
1351 
1352 static struct tcf_action_egdev *
1353 tcf_action_egdev_lookup(const struct net_device *dev)
1354 {
1355 	struct net *net = dev_net(dev);
1356 	struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1357 
1358 	return rhashtable_lookup_fast(&tan->egdev_ht, &dev,
1359 				      tcf_action_egdev_ht_params);
1360 }
1361 
1362 static struct tcf_action_egdev *
1363 tcf_action_egdev_get(const struct net_device *dev)
1364 {
1365 	struct tcf_action_egdev *egdev;
1366 	struct tcf_action_net *tan;
1367 
1368 	egdev = tcf_action_egdev_lookup(dev);
1369 	if (egdev)
1370 		goto inc_ref;
1371 
1372 	egdev = kzalloc(sizeof(*egdev), GFP_KERNEL);
1373 	if (!egdev)
1374 		return NULL;
1375 	INIT_LIST_HEAD(&egdev->cb_list);
1376 	egdev->dev = dev;
1377 	tan = net_generic(dev_net(dev), tcf_action_net_id);
1378 	rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node,
1379 			       tcf_action_egdev_ht_params);
1380 
1381 inc_ref:
1382 	egdev->refcnt++;
1383 	return egdev;
1384 }
1385 
1386 static void tcf_action_egdev_put(struct tcf_action_egdev *egdev)
1387 {
1388 	struct tcf_action_net *tan;
1389 
1390 	if (--egdev->refcnt)
1391 		return;
1392 	tan = net_generic(dev_net(egdev->dev), tcf_action_net_id);
1393 	rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node,
1394 			       tcf_action_egdev_ht_params);
1395 	kfree(egdev);
1396 }
1397 
1398 static struct tcf_action_egdev_cb *
1399 tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev,
1400 			   tc_setup_cb_t *cb, void *cb_priv)
1401 {
1402 	struct tcf_action_egdev_cb *egdev_cb;
1403 
1404 	list_for_each_entry(egdev_cb, &egdev->cb_list, list)
1405 		if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv)
1406 			return egdev_cb;
1407 	return NULL;
1408 }
1409 
1410 static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev,
1411 				    enum tc_setup_type type,
1412 				    void *type_data, bool err_stop)
1413 {
1414 	struct tcf_action_egdev_cb *egdev_cb;
1415 	int ok_count = 0;
1416 	int err;
1417 
1418 	list_for_each_entry(egdev_cb, &egdev->cb_list, list) {
1419 		err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv);
1420 		if (err) {
1421 			if (err_stop)
1422 				return err;
1423 		} else {
1424 			ok_count++;
1425 		}
1426 	}
1427 	return ok_count;
1428 }
1429 
1430 static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev,
1431 				   tc_setup_cb_t *cb, void *cb_priv)
1432 {
1433 	struct tcf_action_egdev_cb *egdev_cb;
1434 
1435 	egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
1436 	if (WARN_ON(egdev_cb))
1437 		return -EEXIST;
1438 	egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL);
1439 	if (!egdev_cb)
1440 		return -ENOMEM;
1441 	egdev_cb->cb = cb;
1442 	egdev_cb->cb_priv = cb_priv;
1443 	list_add(&egdev_cb->list, &egdev->cb_list);
1444 	return 0;
1445 }
1446 
1447 static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev,
1448 				    tc_setup_cb_t *cb, void *cb_priv)
1449 {
1450 	struct tcf_action_egdev_cb *egdev_cb;
1451 
1452 	egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
1453 	if (WARN_ON(!egdev_cb))
1454 		return;
1455 	list_del(&egdev_cb->list);
1456 	kfree(egdev_cb);
1457 }
1458 
1459 static int __tc_setup_cb_egdev_register(const struct net_device *dev,
1460 					tc_setup_cb_t *cb, void *cb_priv)
1461 {
1462 	struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev);
1463 	int err;
1464 
1465 	if (!egdev)
1466 		return -ENOMEM;
1467 	err = tcf_action_egdev_cb_add(egdev, cb, cb_priv);
1468 	if (err)
1469 		goto err_cb_add;
1470 	return 0;
1471 
1472 err_cb_add:
1473 	tcf_action_egdev_put(egdev);
1474 	return err;
1475 }
1476 int tc_setup_cb_egdev_register(const struct net_device *dev,
1477 			       tc_setup_cb_t *cb, void *cb_priv)
1478 {
1479 	int err;
1480 
1481 	rtnl_lock();
1482 	err = __tc_setup_cb_egdev_register(dev, cb, cb_priv);
1483 	rtnl_unlock();
1484 	return err;
1485 }
1486 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register);
1487 
1488 static void __tc_setup_cb_egdev_unregister(const struct net_device *dev,
1489 					   tc_setup_cb_t *cb, void *cb_priv)
1490 {
1491 	struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
1492 
1493 	if (WARN_ON(!egdev))
1494 		return;
1495 	tcf_action_egdev_cb_del(egdev, cb, cb_priv);
1496 	tcf_action_egdev_put(egdev);
1497 }
1498 void tc_setup_cb_egdev_unregister(const struct net_device *dev,
1499 				  tc_setup_cb_t *cb, void *cb_priv)
1500 {
1501 	rtnl_lock();
1502 	__tc_setup_cb_egdev_unregister(dev, cb, cb_priv);
1503 	rtnl_unlock();
1504 }
1505 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister);
1506 
1507 int tc_setup_cb_egdev_call(const struct net_device *dev,
1508 			   enum tc_setup_type type, void *type_data,
1509 			   bool err_stop)
1510 {
1511 	struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
1512 
1513 	if (!egdev)
1514 		return 0;
1515 	return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop);
1516 }
1517 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call);
1518 
1519 static __net_init int tcf_action_net_init(struct net *net)
1520 {
1521 	struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1522 
1523 	return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params);
1524 }
1525 
1526 static void __net_exit tcf_action_net_exit(struct net *net)
1527 {
1528 	struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
1529 
1530 	rhashtable_destroy(&tan->egdev_ht);
1531 }
1532 
1533 static struct pernet_operations tcf_action_net_ops = {
1534 	.init = tcf_action_net_init,
1535 	.exit = tcf_action_net_exit,
1536 	.id = &tcf_action_net_id,
1537 	.size = sizeof(struct tcf_action_net),
1538 };
1539 
1540 static int __init tc_action_init(void)
1541 {
1542 	int err;
1543 
1544 	err = register_pernet_subsys(&tcf_action_net_ops);
1545 	if (err)
1546 		return err;
1547 
1548 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1549 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1550 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1551 		      0);
1552 
1553 	return 0;
1554 }
1555 
1556 subsys_initcall(tc_action_init);
1557