xref: /openbmc/linux/net/sched/act_api.c (revision 110e6f26)
1 /*
2  * net/sched/act_api.c	Packet action API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Author:	Jamal Hadi Salim
10  *
11  *
12  */
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/sch_generic.h>
27 #include <net/act_api.h>
28 #include <net/netlink.h>
29 
30 static void free_tcf(struct rcu_head *head)
31 {
32 	struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
33 
34 	free_percpu(p->cpu_bstats);
35 	free_percpu(p->cpu_qstats);
36 	kfree(p);
37 }
38 
39 static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *a)
40 {
41 	struct tcf_common *p = a->priv;
42 
43 	spin_lock_bh(&hinfo->lock);
44 	hlist_del(&p->tcfc_head);
45 	spin_unlock_bh(&hinfo->lock);
46 	gen_kill_estimator(&p->tcfc_bstats,
47 			   &p->tcfc_rate_est);
48 	/*
49 	 * gen_estimator est_timer() might access p->tcfc_lock
50 	 * or bstats, wait a RCU grace period before freeing p
51 	 */
52 	call_rcu(&p->tcfc_rcu, free_tcf);
53 }
54 
55 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
56 {
57 	struct tcf_common *p = a->priv;
58 	int ret = 0;
59 
60 	if (p) {
61 		if (bind)
62 			p->tcfc_bindcnt--;
63 		else if (strict && p->tcfc_bindcnt > 0)
64 			return -EPERM;
65 
66 		p->tcfc_refcnt--;
67 		if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
68 			if (a->ops->cleanup)
69 				a->ops->cleanup(a, bind);
70 			tcf_hash_destroy(a->hinfo, a);
71 			ret = ACT_P_DELETED;
72 		}
73 	}
74 
75 	return ret;
76 }
77 EXPORT_SYMBOL(__tcf_hash_release);
78 
79 static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
80 			   struct netlink_callback *cb, struct tc_action *a)
81 {
82 	struct hlist_head *head;
83 	struct tcf_common *p;
84 	int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
85 	struct nlattr *nest;
86 
87 	spin_lock_bh(&hinfo->lock);
88 
89 	s_i = cb->args[0];
90 
91 	for (i = 0; i < (hinfo->hmask + 1); i++) {
92 		head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
93 
94 		hlist_for_each_entry_rcu(p, head, tcfc_head) {
95 			index++;
96 			if (index < s_i)
97 				continue;
98 			a->priv = p;
99 			a->order = n_i;
100 
101 			nest = nla_nest_start(skb, a->order);
102 			if (nest == NULL)
103 				goto nla_put_failure;
104 			err = tcf_action_dump_1(skb, a, 0, 0);
105 			if (err < 0) {
106 				index--;
107 				nlmsg_trim(skb, nest);
108 				goto done;
109 			}
110 			nla_nest_end(skb, nest);
111 			n_i++;
112 			if (n_i >= TCA_ACT_MAX_PRIO)
113 				goto done;
114 		}
115 	}
116 done:
117 	spin_unlock_bh(&hinfo->lock);
118 	if (n_i)
119 		cb->args[0] += n_i;
120 	return n_i;
121 
122 nla_put_failure:
123 	nla_nest_cancel(skb, nest);
124 	goto done;
125 }
126 
127 static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
128 			  struct tc_action *a)
129 {
130 	struct hlist_head *head;
131 	struct hlist_node *n;
132 	struct tcf_common *p;
133 	struct nlattr *nest;
134 	int i = 0, n_i = 0;
135 	int ret = -EINVAL;
136 
137 	nest = nla_nest_start(skb, a->order);
138 	if (nest == NULL)
139 		goto nla_put_failure;
140 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
141 		goto nla_put_failure;
142 	for (i = 0; i < (hinfo->hmask + 1); i++) {
143 		head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
144 		hlist_for_each_entry_safe(p, n, head, tcfc_head) {
145 			a->priv = p;
146 			ret = __tcf_hash_release(a, false, true);
147 			if (ret == ACT_P_DELETED) {
148 				module_put(a->ops->owner);
149 				n_i++;
150 			} else if (ret < 0)
151 				goto nla_put_failure;
152 		}
153 	}
154 	if (nla_put_u32(skb, TCA_FCNT, n_i))
155 		goto nla_put_failure;
156 	nla_nest_end(skb, nest);
157 
158 	return n_i;
159 nla_put_failure:
160 	nla_nest_cancel(skb, nest);
161 	return ret;
162 }
163 
164 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
165 		       struct netlink_callback *cb, int type,
166 		       struct tc_action *a)
167 {
168 	struct tcf_hashinfo *hinfo = tn->hinfo;
169 
170 	a->hinfo = hinfo;
171 
172 	if (type == RTM_DELACTION) {
173 		return tcf_del_walker(hinfo, skb, a);
174 	} else if (type == RTM_GETACTION) {
175 		return tcf_dump_walker(hinfo, skb, cb, a);
176 	} else {
177 		WARN(1, "tcf_generic_walker: unknown action %d\n", type);
178 		return -EINVAL;
179 	}
180 }
181 EXPORT_SYMBOL(tcf_generic_walker);
182 
183 static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
184 {
185 	struct tcf_common *p = NULL;
186 	struct hlist_head *head;
187 
188 	spin_lock_bh(&hinfo->lock);
189 	head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
190 	hlist_for_each_entry_rcu(p, head, tcfc_head)
191 		if (p->tcfc_index == index)
192 			break;
193 	spin_unlock_bh(&hinfo->lock);
194 
195 	return p;
196 }
197 
198 u32 tcf_hash_new_index(struct tc_action_net *tn)
199 {
200 	struct tcf_hashinfo *hinfo = tn->hinfo;
201 	u32 val = hinfo->index;
202 
203 	do {
204 		if (++val == 0)
205 			val = 1;
206 	} while (tcf_hash_lookup(val, hinfo));
207 
208 	hinfo->index = val;
209 	return val;
210 }
211 EXPORT_SYMBOL(tcf_hash_new_index);
212 
213 int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index)
214 {
215 	struct tcf_hashinfo *hinfo = tn->hinfo;
216 	struct tcf_common *p = tcf_hash_lookup(index, hinfo);
217 
218 	if (p) {
219 		a->priv = p;
220 		a->hinfo = hinfo;
221 		return 1;
222 	}
223 	return 0;
224 }
225 EXPORT_SYMBOL(tcf_hash_search);
226 
227 int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
228 		   int bind)
229 {
230 	struct tcf_hashinfo *hinfo = tn->hinfo;
231 	struct tcf_common *p = NULL;
232 	if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
233 		if (bind)
234 			p->tcfc_bindcnt++;
235 		p->tcfc_refcnt++;
236 		a->priv = p;
237 		a->hinfo = hinfo;
238 		return 1;
239 	}
240 	return 0;
241 }
242 EXPORT_SYMBOL(tcf_hash_check);
243 
244 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
245 {
246 	struct tcf_common *pc = a->priv;
247 	if (est)
248 		gen_kill_estimator(&pc->tcfc_bstats,
249 				   &pc->tcfc_rate_est);
250 	call_rcu(&pc->tcfc_rcu, free_tcf);
251 }
252 EXPORT_SYMBOL(tcf_hash_cleanup);
253 
254 int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
255 		    struct tc_action *a, int size, int bind, bool cpustats)
256 {
257 	struct tcf_common *p = kzalloc(size, GFP_KERNEL);
258 	struct tcf_hashinfo *hinfo = tn->hinfo;
259 	int err = -ENOMEM;
260 
261 	if (unlikely(!p))
262 		return -ENOMEM;
263 	p->tcfc_refcnt = 1;
264 	if (bind)
265 		p->tcfc_bindcnt = 1;
266 
267 	if (cpustats) {
268 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
269 		if (!p->cpu_bstats) {
270 err1:
271 			kfree(p);
272 			return err;
273 		}
274 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
275 		if (!p->cpu_qstats) {
276 err2:
277 			free_percpu(p->cpu_bstats);
278 			goto err1;
279 		}
280 	}
281 	spin_lock_init(&p->tcfc_lock);
282 	INIT_HLIST_NODE(&p->tcfc_head);
283 	p->tcfc_index = index ? index : tcf_hash_new_index(tn);
284 	p->tcfc_tm.install = jiffies;
285 	p->tcfc_tm.lastuse = jiffies;
286 	if (est) {
287 		err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
288 					&p->tcfc_rate_est,
289 					&p->tcfc_lock, est);
290 		if (err) {
291 			free_percpu(p->cpu_qstats);
292 			goto err2;
293 		}
294 	}
295 
296 	a->priv = (void *) p;
297 	a->hinfo = hinfo;
298 	return 0;
299 }
300 EXPORT_SYMBOL(tcf_hash_create);
301 
302 void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a)
303 {
304 	struct tcf_common *p = a->priv;
305 	struct tcf_hashinfo *hinfo = tn->hinfo;
306 	unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
307 
308 	spin_lock_bh(&hinfo->lock);
309 	hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
310 	spin_unlock_bh(&hinfo->lock);
311 }
312 EXPORT_SYMBOL(tcf_hash_insert);
313 
314 void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
315 			  struct tcf_hashinfo *hinfo)
316 {
317 	struct tc_action a = {
318 		.ops = ops,
319 		.hinfo = hinfo,
320 	};
321 	int i;
322 
323 	for (i = 0; i < hinfo->hmask + 1; i++) {
324 		struct tcf_common *p;
325 		struct hlist_node *n;
326 
327 		hlist_for_each_entry_safe(p, n, &hinfo->htab[i], tcfc_head) {
328 			int ret;
329 
330 			a.priv = p;
331 			ret = __tcf_hash_release(&a, false, true);
332 			if (ret == ACT_P_DELETED)
333 				module_put(ops->owner);
334 			else if (ret < 0)
335 				return;
336 		}
337 	}
338 	kfree(hinfo->htab);
339 }
340 EXPORT_SYMBOL(tcf_hashinfo_destroy);
341 
342 static LIST_HEAD(act_base);
343 static DEFINE_RWLOCK(act_mod_lock);
344 
345 int tcf_register_action(struct tc_action_ops *act,
346 			struct pernet_operations *ops)
347 {
348 	struct tc_action_ops *a;
349 	int ret;
350 
351 	if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
352 		return -EINVAL;
353 
354 	write_lock(&act_mod_lock);
355 	list_for_each_entry(a, &act_base, head) {
356 		if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
357 			write_unlock(&act_mod_lock);
358 			return -EEXIST;
359 		}
360 	}
361 	list_add_tail(&act->head, &act_base);
362 	write_unlock(&act_mod_lock);
363 
364 	ret = register_pernet_subsys(ops);
365 	if (ret) {
366 		tcf_unregister_action(act, ops);
367 		return ret;
368 	}
369 
370 	return 0;
371 }
372 EXPORT_SYMBOL(tcf_register_action);
373 
374 int tcf_unregister_action(struct tc_action_ops *act,
375 			  struct pernet_operations *ops)
376 {
377 	struct tc_action_ops *a;
378 	int err = -ENOENT;
379 
380 	unregister_pernet_subsys(ops);
381 
382 	write_lock(&act_mod_lock);
383 	list_for_each_entry(a, &act_base, head) {
384 		if (a == act) {
385 			list_del(&act->head);
386 			err = 0;
387 			break;
388 		}
389 	}
390 	write_unlock(&act_mod_lock);
391 	return err;
392 }
393 EXPORT_SYMBOL(tcf_unregister_action);
394 
395 /* lookup by name */
396 static struct tc_action_ops *tc_lookup_action_n(char *kind)
397 {
398 	struct tc_action_ops *a, *res = NULL;
399 
400 	if (kind) {
401 		read_lock(&act_mod_lock);
402 		list_for_each_entry(a, &act_base, head) {
403 			if (strcmp(kind, a->kind) == 0) {
404 				if (try_module_get(a->owner))
405 					res = a;
406 				break;
407 			}
408 		}
409 		read_unlock(&act_mod_lock);
410 	}
411 	return res;
412 }
413 
414 /* lookup by nlattr */
415 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
416 {
417 	struct tc_action_ops *a, *res = NULL;
418 
419 	if (kind) {
420 		read_lock(&act_mod_lock);
421 		list_for_each_entry(a, &act_base, head) {
422 			if (nla_strcmp(kind, a->kind) == 0) {
423 				if (try_module_get(a->owner))
424 					res = a;
425 				break;
426 			}
427 		}
428 		read_unlock(&act_mod_lock);
429 	}
430 	return res;
431 }
432 
433 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
434 		    struct tcf_result *res)
435 {
436 	const struct tc_action *a;
437 	int ret = -1;
438 
439 	if (skb->tc_verd & TC_NCLS) {
440 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
441 		ret = TC_ACT_OK;
442 		goto exec_done;
443 	}
444 	list_for_each_entry(a, actions, list) {
445 repeat:
446 		ret = a->ops->act(skb, a, res);
447 		if (ret == TC_ACT_REPEAT)
448 			goto repeat;	/* we need a ttl - JHS */
449 		if (ret != TC_ACT_PIPE)
450 			goto exec_done;
451 	}
452 exec_done:
453 	return ret;
454 }
455 EXPORT_SYMBOL(tcf_action_exec);
456 
457 int tcf_action_destroy(struct list_head *actions, int bind)
458 {
459 	struct tc_action *a, *tmp;
460 	int ret = 0;
461 
462 	list_for_each_entry_safe(a, tmp, actions, list) {
463 		ret = __tcf_hash_release(a, bind, true);
464 		if (ret == ACT_P_DELETED)
465 			module_put(a->ops->owner);
466 		else if (ret < 0)
467 			return ret;
468 		list_del(&a->list);
469 		kfree(a);
470 	}
471 	return ret;
472 }
473 
474 int
475 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
476 {
477 	return a->ops->dump(skb, a, bind, ref);
478 }
479 
480 int
481 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
482 {
483 	int err = -EINVAL;
484 	unsigned char *b = skb_tail_pointer(skb);
485 	struct nlattr *nest;
486 
487 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
488 		goto nla_put_failure;
489 	if (tcf_action_copy_stats(skb, a, 0))
490 		goto nla_put_failure;
491 	nest = nla_nest_start(skb, TCA_OPTIONS);
492 	if (nest == NULL)
493 		goto nla_put_failure;
494 	err = tcf_action_dump_old(skb, a, bind, ref);
495 	if (err > 0) {
496 		nla_nest_end(skb, nest);
497 		return err;
498 	}
499 
500 nla_put_failure:
501 	nlmsg_trim(skb, b);
502 	return -1;
503 }
504 EXPORT_SYMBOL(tcf_action_dump_1);
505 
506 int
507 tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
508 {
509 	struct tc_action *a;
510 	int err = -EINVAL;
511 	struct nlattr *nest;
512 
513 	list_for_each_entry(a, actions, list) {
514 		nest = nla_nest_start(skb, a->order);
515 		if (nest == NULL)
516 			goto nla_put_failure;
517 		err = tcf_action_dump_1(skb, a, bind, ref);
518 		if (err < 0)
519 			goto errout;
520 		nla_nest_end(skb, nest);
521 	}
522 
523 	return 0;
524 
525 nla_put_failure:
526 	err = -EINVAL;
527 errout:
528 	nla_nest_cancel(skb, nest);
529 	return err;
530 }
531 
532 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
533 				    struct nlattr *est, char *name, int ovr,
534 				    int bind)
535 {
536 	struct tc_action *a;
537 	struct tc_action_ops *a_o;
538 	char act_name[IFNAMSIZ];
539 	struct nlattr *tb[TCA_ACT_MAX + 1];
540 	struct nlattr *kind;
541 	int err;
542 
543 	if (name == NULL) {
544 		err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
545 		if (err < 0)
546 			goto err_out;
547 		err = -EINVAL;
548 		kind = tb[TCA_ACT_KIND];
549 		if (kind == NULL)
550 			goto err_out;
551 		if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
552 			goto err_out;
553 	} else {
554 		err = -EINVAL;
555 		if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
556 			goto err_out;
557 	}
558 
559 	a_o = tc_lookup_action_n(act_name);
560 	if (a_o == NULL) {
561 #ifdef CONFIG_MODULES
562 		rtnl_unlock();
563 		request_module("act_%s", act_name);
564 		rtnl_lock();
565 
566 		a_o = tc_lookup_action_n(act_name);
567 
568 		/* We dropped the RTNL semaphore in order to
569 		 * perform the module load.  So, even if we
570 		 * succeeded in loading the module we have to
571 		 * tell the caller to replay the request.  We
572 		 * indicate this using -EAGAIN.
573 		 */
574 		if (a_o != NULL) {
575 			err = -EAGAIN;
576 			goto err_mod;
577 		}
578 #endif
579 		err = -ENOENT;
580 		goto err_out;
581 	}
582 
583 	err = -ENOMEM;
584 	a = kzalloc(sizeof(*a), GFP_KERNEL);
585 	if (a == NULL)
586 		goto err_mod;
587 
588 	a->ops = a_o;
589 	INIT_LIST_HEAD(&a->list);
590 	/* backward compatibility for policer */
591 	if (name == NULL)
592 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
593 	else
594 		err = a_o->init(net, nla, est, a, ovr, bind);
595 	if (err < 0)
596 		goto err_free;
597 
598 	/* module count goes up only when brand new policy is created
599 	 * if it exists and is only bound to in a_o->init() then
600 	 * ACT_P_CREATED is not returned (a zero is).
601 	 */
602 	if (err != ACT_P_CREATED)
603 		module_put(a_o->owner);
604 
605 	return a;
606 
607 err_free:
608 	kfree(a);
609 err_mod:
610 	module_put(a_o->owner);
611 err_out:
612 	return ERR_PTR(err);
613 }
614 
615 int tcf_action_init(struct net *net, struct nlattr *nla,
616 				  struct nlattr *est, char *name, int ovr,
617 				  int bind, struct list_head *actions)
618 {
619 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
620 	struct tc_action *act;
621 	int err;
622 	int i;
623 
624 	err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
625 	if (err < 0)
626 		return err;
627 
628 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
629 		act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
630 		if (IS_ERR(act)) {
631 			err = PTR_ERR(act);
632 			goto err;
633 		}
634 		act->order = i;
635 		list_add_tail(&act->list, actions);
636 	}
637 	return 0;
638 
639 err:
640 	tcf_action_destroy(actions, bind);
641 	return err;
642 }
643 
644 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
645 			  int compat_mode)
646 {
647 	int err = 0;
648 	struct gnet_dump d;
649 	struct tcf_common *p = a->priv;
650 
651 	if (p == NULL)
652 		goto errout;
653 
654 	/* compat_mode being true specifies a call that is supposed
655 	 * to add additional backward compatibility statistic TLVs.
656 	 */
657 	if (compat_mode) {
658 		if (a->type == TCA_OLD_COMPAT)
659 			err = gnet_stats_start_copy_compat(skb, 0,
660 				TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d);
661 		else
662 			return 0;
663 	} else
664 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
665 					    &p->tcfc_lock, &d);
666 
667 	if (err < 0)
668 		goto errout;
669 
670 	if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
671 	    gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
672 				     &p->tcfc_rate_est) < 0 ||
673 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
674 				  &p->tcfc_qstats,
675 				  p->tcfc_qstats.qlen) < 0)
676 		goto errout;
677 
678 	if (gnet_stats_finish_copy(&d) < 0)
679 		goto errout;
680 
681 	return 0;
682 
683 errout:
684 	return -1;
685 }
686 
687 static int
688 tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
689 	     u16 flags, int event, int bind, int ref)
690 {
691 	struct tcamsg *t;
692 	struct nlmsghdr *nlh;
693 	unsigned char *b = skb_tail_pointer(skb);
694 	struct nlattr *nest;
695 
696 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
697 	if (!nlh)
698 		goto out_nlmsg_trim;
699 	t = nlmsg_data(nlh);
700 	t->tca_family = AF_UNSPEC;
701 	t->tca__pad1 = 0;
702 	t->tca__pad2 = 0;
703 
704 	nest = nla_nest_start(skb, TCA_ACT_TAB);
705 	if (nest == NULL)
706 		goto out_nlmsg_trim;
707 
708 	if (tcf_action_dump(skb, actions, bind, ref) < 0)
709 		goto out_nlmsg_trim;
710 
711 	nla_nest_end(skb, nest);
712 
713 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
714 	return skb->len;
715 
716 out_nlmsg_trim:
717 	nlmsg_trim(skb, b);
718 	return -1;
719 }
720 
721 static int
722 act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
723 	       struct list_head *actions, int event)
724 {
725 	struct sk_buff *skb;
726 
727 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
728 	if (!skb)
729 		return -ENOBUFS;
730 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
731 		kfree_skb(skb);
732 		return -EINVAL;
733 	}
734 
735 	return rtnl_unicast(skb, net, portid);
736 }
737 
738 static struct tc_action *create_a(int i)
739 {
740 	struct tc_action *act;
741 
742 	act = kzalloc(sizeof(*act), GFP_KERNEL);
743 	if (act == NULL) {
744 		pr_debug("create_a: failed to alloc!\n");
745 		return NULL;
746 	}
747 	act->order = i;
748 	INIT_LIST_HEAD(&act->list);
749 	return act;
750 }
751 
752 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
753 					  struct nlmsghdr *n, u32 portid)
754 {
755 	struct nlattr *tb[TCA_ACT_MAX + 1];
756 	struct tc_action *a;
757 	int index;
758 	int err;
759 
760 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
761 	if (err < 0)
762 		goto err_out;
763 
764 	err = -EINVAL;
765 	if (tb[TCA_ACT_INDEX] == NULL ||
766 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
767 		goto err_out;
768 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
769 
770 	err = -ENOMEM;
771 	a = create_a(0);
772 	if (a == NULL)
773 		goto err_out;
774 
775 	err = -EINVAL;
776 	a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
777 	if (a->ops == NULL) /* could happen in batch of actions */
778 		goto err_free;
779 	err = -ENOENT;
780 	if (a->ops->lookup(net, a, index) == 0)
781 		goto err_mod;
782 
783 	module_put(a->ops->owner);
784 	return a;
785 
786 err_mod:
787 	module_put(a->ops->owner);
788 err_free:
789 	kfree(a);
790 err_out:
791 	return ERR_PTR(err);
792 }
793 
794 static void cleanup_a(struct list_head *actions)
795 {
796 	struct tc_action *a, *tmp;
797 
798 	list_for_each_entry_safe(a, tmp, actions, list) {
799 		list_del(&a->list);
800 		kfree(a);
801 	}
802 }
803 
804 static int tca_action_flush(struct net *net, struct nlattr *nla,
805 			    struct nlmsghdr *n, u32 portid)
806 {
807 	struct sk_buff *skb;
808 	unsigned char *b;
809 	struct nlmsghdr *nlh;
810 	struct tcamsg *t;
811 	struct netlink_callback dcb;
812 	struct nlattr *nest;
813 	struct nlattr *tb[TCA_ACT_MAX + 1];
814 	struct nlattr *kind;
815 	struct tc_action a;
816 	int err = -ENOMEM;
817 
818 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
819 	if (!skb) {
820 		pr_debug("tca_action_flush: failed skb alloc\n");
821 		return err;
822 	}
823 
824 	b = skb_tail_pointer(skb);
825 
826 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
827 	if (err < 0)
828 		goto err_out;
829 
830 	err = -EINVAL;
831 	kind = tb[TCA_ACT_KIND];
832 	memset(&a, 0, sizeof(struct tc_action));
833 	INIT_LIST_HEAD(&a.list);
834 	a.ops = tc_lookup_action(kind);
835 	if (a.ops == NULL) /*some idjot trying to flush unknown action */
836 		goto err_out;
837 
838 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
839 	if (!nlh)
840 		goto out_module_put;
841 	t = nlmsg_data(nlh);
842 	t->tca_family = AF_UNSPEC;
843 	t->tca__pad1 = 0;
844 	t->tca__pad2 = 0;
845 
846 	nest = nla_nest_start(skb, TCA_ACT_TAB);
847 	if (nest == NULL)
848 		goto out_module_put;
849 
850 	err = a.ops->walk(net, skb, &dcb, RTM_DELACTION, &a);
851 	if (err < 0)
852 		goto out_module_put;
853 	if (err == 0)
854 		goto noflush_out;
855 
856 	nla_nest_end(skb, nest);
857 
858 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
859 	nlh->nlmsg_flags |= NLM_F_ROOT;
860 	module_put(a.ops->owner);
861 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
862 			     n->nlmsg_flags & NLM_F_ECHO);
863 	if (err > 0)
864 		return 0;
865 
866 	return err;
867 
868 out_module_put:
869 	module_put(a.ops->owner);
870 err_out:
871 noflush_out:
872 	kfree_skb(skb);
873 	return err;
874 }
875 
876 static int
877 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
878 	       u32 portid)
879 {
880 	int ret;
881 	struct sk_buff *skb;
882 
883 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
884 	if (!skb)
885 		return -ENOBUFS;
886 
887 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
888 			 0, 1) <= 0) {
889 		kfree_skb(skb);
890 		return -EINVAL;
891 	}
892 
893 	/* now do the delete */
894 	ret = tcf_action_destroy(actions, 0);
895 	if (ret < 0) {
896 		kfree_skb(skb);
897 		return ret;
898 	}
899 
900 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
901 			     n->nlmsg_flags & NLM_F_ECHO);
902 	if (ret > 0)
903 		return 0;
904 	return ret;
905 }
906 
907 static int
908 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
909 	      u32 portid, int event)
910 {
911 	int i, ret;
912 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
913 	struct tc_action *act;
914 	LIST_HEAD(actions);
915 
916 	ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
917 	if (ret < 0)
918 		return ret;
919 
920 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
921 		if (tb[1] != NULL)
922 			return tca_action_flush(net, tb[1], n, portid);
923 		else
924 			return -EINVAL;
925 	}
926 
927 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
928 		act = tcf_action_get_1(net, tb[i], n, portid);
929 		if (IS_ERR(act)) {
930 			ret = PTR_ERR(act);
931 			goto err;
932 		}
933 		act->order = i;
934 		list_add_tail(&act->list, &actions);
935 	}
936 
937 	if (event == RTM_GETACTION)
938 		ret = act_get_notify(net, portid, n, &actions, event);
939 	else { /* delete */
940 		ret = tcf_del_notify(net, n, &actions, portid);
941 		if (ret)
942 			goto err;
943 		return ret;
944 	}
945 err:
946 	cleanup_a(&actions);
947 	return ret;
948 }
949 
950 static int
951 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
952 	       u32 portid)
953 {
954 	struct sk_buff *skb;
955 	int err = 0;
956 
957 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
958 	if (!skb)
959 		return -ENOBUFS;
960 
961 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
962 			 RTM_NEWACTION, 0, 0) <= 0) {
963 		kfree_skb(skb);
964 		return -EINVAL;
965 	}
966 
967 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
968 			     n->nlmsg_flags & NLM_F_ECHO);
969 	if (err > 0)
970 		err = 0;
971 	return err;
972 }
973 
974 static int
975 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
976 	       u32 portid, int ovr)
977 {
978 	int ret = 0;
979 	LIST_HEAD(actions);
980 
981 	ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
982 	if (ret)
983 		goto done;
984 
985 	/* dump then free all the actions after update; inserted policy
986 	 * stays intact
987 	 */
988 	ret = tcf_add_notify(net, n, &actions, portid);
989 	cleanup_a(&actions);
990 done:
991 	return ret;
992 }
993 
994 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
995 {
996 	struct net *net = sock_net(skb->sk);
997 	struct nlattr *tca[TCA_ACT_MAX + 1];
998 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
999 	int ret = 0, ovr = 0;
1000 
1001 	if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
1002 		return -EPERM;
1003 
1004 	ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
1005 	if (ret < 0)
1006 		return ret;
1007 
1008 	if (tca[TCA_ACT_TAB] == NULL) {
1009 		pr_notice("tc_ctl_action: received NO action attribs\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	/* n->nlmsg_flags & NLM_F_CREATE */
1014 	switch (n->nlmsg_type) {
1015 	case RTM_NEWACTION:
1016 		/* we are going to assume all other flags
1017 		 * imply create only if it doesn't exist
1018 		 * Note that CREATE | EXCL implies that
1019 		 * but since we want avoid ambiguity (eg when flags
1020 		 * is zero) then just set this
1021 		 */
1022 		if (n->nlmsg_flags & NLM_F_REPLACE)
1023 			ovr = 1;
1024 replay:
1025 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
1026 		if (ret == -EAGAIN)
1027 			goto replay;
1028 		break;
1029 	case RTM_DELACTION:
1030 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1031 				    portid, RTM_DELACTION);
1032 		break;
1033 	case RTM_GETACTION:
1034 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1035 				    portid, RTM_GETACTION);
1036 		break;
1037 	default:
1038 		BUG();
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 static struct nlattr *
1045 find_dump_kind(const struct nlmsghdr *n)
1046 {
1047 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1048 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1049 	struct nlattr *nla[TCAA_MAX + 1];
1050 	struct nlattr *kind;
1051 
1052 	if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
1053 		return NULL;
1054 	tb1 = nla[TCA_ACT_TAB];
1055 	if (tb1 == NULL)
1056 		return NULL;
1057 
1058 	if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1059 		      NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1060 		return NULL;
1061 
1062 	if (tb[1] == NULL)
1063 		return NULL;
1064 	if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1065 		      nla_len(tb[1]), NULL) < 0)
1066 		return NULL;
1067 	kind = tb2[TCA_ACT_KIND];
1068 
1069 	return kind;
1070 }
1071 
1072 static int
1073 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1074 {
1075 	struct net *net = sock_net(skb->sk);
1076 	struct nlmsghdr *nlh;
1077 	unsigned char *b = skb_tail_pointer(skb);
1078 	struct nlattr *nest;
1079 	struct tc_action_ops *a_o;
1080 	struct tc_action a;
1081 	int ret = 0;
1082 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1083 	struct nlattr *kind = find_dump_kind(cb->nlh);
1084 
1085 	if (kind == NULL) {
1086 		pr_info("tc_dump_action: action bad kind\n");
1087 		return 0;
1088 	}
1089 
1090 	a_o = tc_lookup_action(kind);
1091 	if (a_o == NULL)
1092 		return 0;
1093 
1094 	memset(&a, 0, sizeof(struct tc_action));
1095 	a.ops = a_o;
1096 
1097 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1098 			cb->nlh->nlmsg_type, sizeof(*t), 0);
1099 	if (!nlh)
1100 		goto out_module_put;
1101 	t = nlmsg_data(nlh);
1102 	t->tca_family = AF_UNSPEC;
1103 	t->tca__pad1 = 0;
1104 	t->tca__pad2 = 0;
1105 
1106 	nest = nla_nest_start(skb, TCA_ACT_TAB);
1107 	if (nest == NULL)
1108 		goto out_module_put;
1109 
1110 	ret = a_o->walk(net, skb, cb, RTM_GETACTION, &a);
1111 	if (ret < 0)
1112 		goto out_module_put;
1113 
1114 	if (ret > 0) {
1115 		nla_nest_end(skb, nest);
1116 		ret = skb->len;
1117 	} else
1118 		nla_nest_cancel(skb, nest);
1119 
1120 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1121 	if (NETLINK_CB(cb->skb).portid && ret)
1122 		nlh->nlmsg_flags |= NLM_F_MULTI;
1123 	module_put(a_o->owner);
1124 	return skb->len;
1125 
1126 out_module_put:
1127 	module_put(a_o->owner);
1128 	nlmsg_trim(skb, b);
1129 	return skb->len;
1130 }
1131 
1132 static int __init tc_action_init(void)
1133 {
1134 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
1135 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
1136 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1137 		      NULL);
1138 
1139 	return 0;
1140 }
1141 
1142 subsys_initcall(tc_action_init);
1143