xref: /openbmc/linux/net/sched/act_api.c (revision 95b384f9)
1 /*
2  * net/sched/act_api.c	Packet action API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Author:	Jamal Hadi Salim
10  *
11  *
12  */
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/sch_generic.h>
27 #include <net/act_api.h>
28 #include <net/netlink.h>
29 
30 static void free_tcf(struct rcu_head *head)
31 {
32 	struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
33 
34 	free_percpu(p->cpu_bstats);
35 	free_percpu(p->cpu_qstats);
36 	kfree(p);
37 }
38 
39 static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *a)
40 {
41 	struct tcf_common *p = a->priv;
42 
43 	spin_lock_bh(&hinfo->lock);
44 	hlist_del(&p->tcfc_head);
45 	spin_unlock_bh(&hinfo->lock);
46 	gen_kill_estimator(&p->tcfc_bstats,
47 			   &p->tcfc_rate_est);
48 	/*
49 	 * gen_estimator est_timer() might access p->tcfc_lock
50 	 * or bstats, wait a RCU grace period before freeing p
51 	 */
52 	call_rcu(&p->tcfc_rcu, free_tcf);
53 }
54 
55 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
56 {
57 	struct tcf_common *p = a->priv;
58 	int ret = 0;
59 
60 	if (p) {
61 		if (bind)
62 			p->tcfc_bindcnt--;
63 		else if (strict && p->tcfc_bindcnt > 0)
64 			return -EPERM;
65 
66 		p->tcfc_refcnt--;
67 		if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
68 			if (a->ops->cleanup)
69 				a->ops->cleanup(a, bind);
70 			tcf_hash_destroy(a->hinfo, a);
71 			ret = ACT_P_DELETED;
72 		}
73 	}
74 
75 	return ret;
76 }
77 EXPORT_SYMBOL(__tcf_hash_release);
78 
79 static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
80 			   struct netlink_callback *cb, struct tc_action *a)
81 {
82 	struct hlist_head *head;
83 	struct tcf_common *p;
84 	int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
85 	struct nlattr *nest;
86 
87 	spin_lock_bh(&hinfo->lock);
88 
89 	s_i = cb->args[0];
90 
91 	for (i = 0; i < (hinfo->hmask + 1); i++) {
92 		head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
93 
94 		hlist_for_each_entry_rcu(p, head, tcfc_head) {
95 			index++;
96 			if (index < s_i)
97 				continue;
98 			a->priv = p;
99 			a->order = n_i;
100 
101 			nest = nla_nest_start(skb, a->order);
102 			if (nest == NULL)
103 				goto nla_put_failure;
104 			err = tcf_action_dump_1(skb, a, 0, 0);
105 			if (err < 0) {
106 				index--;
107 				nlmsg_trim(skb, nest);
108 				goto done;
109 			}
110 			nla_nest_end(skb, nest);
111 			n_i++;
112 			if (n_i >= TCA_ACT_MAX_PRIO)
113 				goto done;
114 		}
115 	}
116 done:
117 	spin_unlock_bh(&hinfo->lock);
118 	if (n_i)
119 		cb->args[0] += n_i;
120 	return n_i;
121 
122 nla_put_failure:
123 	nla_nest_cancel(skb, nest);
124 	goto done;
125 }
126 
127 static int tcf_del_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb,
128 			  struct tc_action *a)
129 {
130 	struct hlist_head *head;
131 	struct hlist_node *n;
132 	struct tcf_common *p;
133 	struct nlattr *nest;
134 	int i = 0, n_i = 0;
135 	int ret = -EINVAL;
136 
137 	nest = nla_nest_start(skb, a->order);
138 	if (nest == NULL)
139 		goto nla_put_failure;
140 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
141 		goto nla_put_failure;
142 	for (i = 0; i < (hinfo->hmask + 1); i++) {
143 		head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
144 		hlist_for_each_entry_safe(p, n, head, tcfc_head) {
145 			a->priv = p;
146 			ret = __tcf_hash_release(a, false, true);
147 			if (ret == ACT_P_DELETED) {
148 				module_put(a->ops->owner);
149 				n_i++;
150 			} else if (ret < 0)
151 				goto nla_put_failure;
152 		}
153 	}
154 	if (nla_put_u32(skb, TCA_FCNT, n_i))
155 		goto nla_put_failure;
156 	nla_nest_end(skb, nest);
157 
158 	return n_i;
159 nla_put_failure:
160 	nla_nest_cancel(skb, nest);
161 	return ret;
162 }
163 
164 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
165 		       struct netlink_callback *cb, int type,
166 		       struct tc_action *a)
167 {
168 	struct tcf_hashinfo *hinfo = tn->hinfo;
169 
170 	a->hinfo = hinfo;
171 
172 	if (type == RTM_DELACTION) {
173 		return tcf_del_walker(hinfo, skb, a);
174 	} else if (type == RTM_GETACTION) {
175 		return tcf_dump_walker(hinfo, skb, cb, a);
176 	} else {
177 		WARN(1, "tcf_generic_walker: unknown action %d\n", type);
178 		return -EINVAL;
179 	}
180 }
181 EXPORT_SYMBOL(tcf_generic_walker);
182 
183 static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
184 {
185 	struct tcf_common *p = NULL;
186 	struct hlist_head *head;
187 
188 	spin_lock_bh(&hinfo->lock);
189 	head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
190 	hlist_for_each_entry_rcu(p, head, tcfc_head)
191 		if (p->tcfc_index == index)
192 			break;
193 	spin_unlock_bh(&hinfo->lock);
194 
195 	return p;
196 }
197 
198 u32 tcf_hash_new_index(struct tc_action_net *tn)
199 {
200 	struct tcf_hashinfo *hinfo = tn->hinfo;
201 	u32 val = hinfo->index;
202 
203 	do {
204 		if (++val == 0)
205 			val = 1;
206 	} while (tcf_hash_lookup(val, hinfo));
207 
208 	hinfo->index = val;
209 	return val;
210 }
211 EXPORT_SYMBOL(tcf_hash_new_index);
212 
213 int tcf_hash_search(struct tc_action_net *tn, struct tc_action *a, u32 index)
214 {
215 	struct tcf_hashinfo *hinfo = tn->hinfo;
216 	struct tcf_common *p = tcf_hash_lookup(index, hinfo);
217 
218 	if (p) {
219 		a->priv = p;
220 		a->hinfo = hinfo;
221 		return 1;
222 	}
223 	return 0;
224 }
225 EXPORT_SYMBOL(tcf_hash_search);
226 
227 int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a,
228 		   int bind)
229 {
230 	struct tcf_hashinfo *hinfo = tn->hinfo;
231 	struct tcf_common *p = NULL;
232 	if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
233 		if (bind)
234 			p->tcfc_bindcnt++;
235 		p->tcfc_refcnt++;
236 		a->priv = p;
237 		a->hinfo = hinfo;
238 		return 1;
239 	}
240 	return 0;
241 }
242 EXPORT_SYMBOL(tcf_hash_check);
243 
244 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
245 {
246 	struct tcf_common *pc = a->priv;
247 	if (est)
248 		gen_kill_estimator(&pc->tcfc_bstats,
249 				   &pc->tcfc_rate_est);
250 	call_rcu(&pc->tcfc_rcu, free_tcf);
251 }
252 EXPORT_SYMBOL(tcf_hash_cleanup);
253 
254 int tcf_hash_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
255 		    struct tc_action *a, int size, int bind, bool cpustats)
256 {
257 	struct tcf_common *p = kzalloc(size, GFP_KERNEL);
258 	struct tcf_hashinfo *hinfo = tn->hinfo;
259 	int err = -ENOMEM;
260 
261 	if (unlikely(!p))
262 		return -ENOMEM;
263 	p->tcfc_refcnt = 1;
264 	if (bind)
265 		p->tcfc_bindcnt = 1;
266 
267 	if (cpustats) {
268 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
269 		if (!p->cpu_bstats) {
270 err1:
271 			kfree(p);
272 			return err;
273 		}
274 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
275 		if (!p->cpu_qstats) {
276 err2:
277 			free_percpu(p->cpu_bstats);
278 			goto err1;
279 		}
280 	}
281 	spin_lock_init(&p->tcfc_lock);
282 	INIT_HLIST_NODE(&p->tcfc_head);
283 	p->tcfc_index = index ? index : tcf_hash_new_index(tn);
284 	p->tcfc_tm.install = jiffies;
285 	p->tcfc_tm.lastuse = jiffies;
286 	if (est) {
287 		err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
288 					&p->tcfc_rate_est,
289 					&p->tcfc_lock, est);
290 		if (err) {
291 			free_percpu(p->cpu_qstats);
292 			goto err2;
293 		}
294 	}
295 
296 	a->priv = (void *) p;
297 	a->hinfo = hinfo;
298 	return 0;
299 }
300 EXPORT_SYMBOL(tcf_hash_create);
301 
302 void tcf_hash_insert(struct tc_action_net *tn, struct tc_action *a)
303 {
304 	struct tcf_common *p = a->priv;
305 	struct tcf_hashinfo *hinfo = tn->hinfo;
306 	unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
307 
308 	spin_lock_bh(&hinfo->lock);
309 	hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
310 	spin_unlock_bh(&hinfo->lock);
311 }
312 EXPORT_SYMBOL(tcf_hash_insert);
313 
314 void tcf_hashinfo_destroy(const struct tc_action_ops *ops,
315 			  struct tcf_hashinfo *hinfo)
316 {
317 	struct tc_action a = {
318 		.ops = ops,
319 		.hinfo = hinfo,
320 	};
321 	int i;
322 
323 	for (i = 0; i < hinfo->hmask + 1; i++) {
324 		struct tcf_common *p;
325 		struct hlist_node *n;
326 
327 		hlist_for_each_entry_safe(p, n, &hinfo->htab[i], tcfc_head) {
328 			int ret;
329 
330 			a.priv = p;
331 			ret = __tcf_hash_release(&a, false, true);
332 			if (ret == ACT_P_DELETED)
333 				module_put(ops->owner);
334 			else if (ret < 0)
335 				return;
336 		}
337 	}
338 	kfree(hinfo->htab);
339 }
340 EXPORT_SYMBOL(tcf_hashinfo_destroy);
341 
342 static LIST_HEAD(act_base);
343 static DEFINE_RWLOCK(act_mod_lock);
344 
345 int tcf_register_action(struct tc_action_ops *act,
346 			struct pernet_operations *ops)
347 {
348 	struct tc_action_ops *a;
349 	int ret;
350 
351 	if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
352 		return -EINVAL;
353 
354 	write_lock(&act_mod_lock);
355 	list_for_each_entry(a, &act_base, head) {
356 		if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
357 			write_unlock(&act_mod_lock);
358 			return -EEXIST;
359 		}
360 	}
361 	list_add_tail(&act->head, &act_base);
362 	write_unlock(&act_mod_lock);
363 
364 	ret = register_pernet_subsys(ops);
365 	if (ret) {
366 		tcf_unregister_action(act, ops);
367 		return ret;
368 	}
369 
370 	return 0;
371 }
372 EXPORT_SYMBOL(tcf_register_action);
373 
374 int tcf_unregister_action(struct tc_action_ops *act,
375 			  struct pernet_operations *ops)
376 {
377 	struct tc_action_ops *a;
378 	int err = -ENOENT;
379 
380 	unregister_pernet_subsys(ops);
381 
382 	write_lock(&act_mod_lock);
383 	list_for_each_entry(a, &act_base, head) {
384 		if (a == act) {
385 			list_del(&act->head);
386 			err = 0;
387 			break;
388 		}
389 	}
390 	write_unlock(&act_mod_lock);
391 	return err;
392 }
393 EXPORT_SYMBOL(tcf_unregister_action);
394 
395 /* lookup by name */
396 static struct tc_action_ops *tc_lookup_action_n(char *kind)
397 {
398 	struct tc_action_ops *a, *res = NULL;
399 
400 	if (kind) {
401 		read_lock(&act_mod_lock);
402 		list_for_each_entry(a, &act_base, head) {
403 			if (strcmp(kind, a->kind) == 0) {
404 				if (try_module_get(a->owner))
405 					res = a;
406 				break;
407 			}
408 		}
409 		read_unlock(&act_mod_lock);
410 	}
411 	return res;
412 }
413 
414 /* lookup by nlattr */
415 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
416 {
417 	struct tc_action_ops *a, *res = NULL;
418 
419 	if (kind) {
420 		read_lock(&act_mod_lock);
421 		list_for_each_entry(a, &act_base, head) {
422 			if (nla_strcmp(kind, a->kind) == 0) {
423 				if (try_module_get(a->owner))
424 					res = a;
425 				break;
426 			}
427 		}
428 		read_unlock(&act_mod_lock);
429 	}
430 	return res;
431 }
432 
433 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
434 		    struct tcf_result *res)
435 {
436 	const struct tc_action *a;
437 	int ret = -1;
438 
439 	if (skb->tc_verd & TC_NCLS) {
440 		skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
441 		ret = TC_ACT_OK;
442 		goto exec_done;
443 	}
444 	list_for_each_entry(a, actions, list) {
445 repeat:
446 		ret = a->ops->act(skb, a, res);
447 		if (ret == TC_ACT_REPEAT)
448 			goto repeat;	/* we need a ttl - JHS */
449 		if (ret != TC_ACT_PIPE)
450 			goto exec_done;
451 	}
452 exec_done:
453 	return ret;
454 }
455 EXPORT_SYMBOL(tcf_action_exec);
456 
457 int tcf_action_destroy(struct list_head *actions, int bind)
458 {
459 	struct tc_action *a, *tmp;
460 	int ret = 0;
461 
462 	list_for_each_entry_safe(a, tmp, actions, list) {
463 		ret = __tcf_hash_release(a, bind, true);
464 		if (ret == ACT_P_DELETED)
465 			module_put(a->ops->owner);
466 		else if (ret < 0)
467 			return ret;
468 		list_del(&a->list);
469 		kfree(a);
470 	}
471 	return ret;
472 }
473 
474 int
475 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
476 {
477 	return a->ops->dump(skb, a, bind, ref);
478 }
479 
480 int
481 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
482 {
483 	int err = -EINVAL;
484 	unsigned char *b = skb_tail_pointer(skb);
485 	struct nlattr *nest;
486 
487 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
488 		goto nla_put_failure;
489 	if (tcf_action_copy_stats(skb, a, 0))
490 		goto nla_put_failure;
491 	nest = nla_nest_start(skb, TCA_OPTIONS);
492 	if (nest == NULL)
493 		goto nla_put_failure;
494 	err = tcf_action_dump_old(skb, a, bind, ref);
495 	if (err > 0) {
496 		nla_nest_end(skb, nest);
497 		return err;
498 	}
499 
500 nla_put_failure:
501 	nlmsg_trim(skb, b);
502 	return -1;
503 }
504 EXPORT_SYMBOL(tcf_action_dump_1);
505 
506 int
507 tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
508 {
509 	struct tc_action *a;
510 	int err = -EINVAL;
511 	struct nlattr *nest;
512 
513 	list_for_each_entry(a, actions, list) {
514 		nest = nla_nest_start(skb, a->order);
515 		if (nest == NULL)
516 			goto nla_put_failure;
517 		err = tcf_action_dump_1(skb, a, bind, ref);
518 		if (err < 0)
519 			goto errout;
520 		nla_nest_end(skb, nest);
521 	}
522 
523 	return 0;
524 
525 nla_put_failure:
526 	err = -EINVAL;
527 errout:
528 	nla_nest_cancel(skb, nest);
529 	return err;
530 }
531 
532 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
533 				    struct nlattr *est, char *name, int ovr,
534 				    int bind)
535 {
536 	struct tc_action *a;
537 	struct tc_action_ops *a_o;
538 	char act_name[IFNAMSIZ];
539 	struct nlattr *tb[TCA_ACT_MAX + 1];
540 	struct nlattr *kind;
541 	int err;
542 
543 	if (name == NULL) {
544 		err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
545 		if (err < 0)
546 			goto err_out;
547 		err = -EINVAL;
548 		kind = tb[TCA_ACT_KIND];
549 		if (kind == NULL)
550 			goto err_out;
551 		if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
552 			goto err_out;
553 	} else {
554 		err = -EINVAL;
555 		if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
556 			goto err_out;
557 	}
558 
559 	a_o = tc_lookup_action_n(act_name);
560 	if (a_o == NULL) {
561 #ifdef CONFIG_MODULES
562 		rtnl_unlock();
563 		request_module("act_%s", act_name);
564 		rtnl_lock();
565 
566 		a_o = tc_lookup_action_n(act_name);
567 
568 		/* We dropped the RTNL semaphore in order to
569 		 * perform the module load.  So, even if we
570 		 * succeeded in loading the module we have to
571 		 * tell the caller to replay the request.  We
572 		 * indicate this using -EAGAIN.
573 		 */
574 		if (a_o != NULL) {
575 			err = -EAGAIN;
576 			goto err_mod;
577 		}
578 #endif
579 		err = -ENOENT;
580 		goto err_out;
581 	}
582 
583 	err = -ENOMEM;
584 	a = kzalloc(sizeof(*a), GFP_KERNEL);
585 	if (a == NULL)
586 		goto err_mod;
587 
588 	a->ops = a_o;
589 	INIT_LIST_HEAD(&a->list);
590 	/* backward compatibility for policer */
591 	if (name == NULL)
592 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
593 	else
594 		err = a_o->init(net, nla, est, a, ovr, bind);
595 	if (err < 0)
596 		goto err_free;
597 
598 	/* module count goes up only when brand new policy is created
599 	 * if it exists and is only bound to in a_o->init() then
600 	 * ACT_P_CREATED is not returned (a zero is).
601 	 */
602 	if (err != ACT_P_CREATED)
603 		module_put(a_o->owner);
604 
605 	return a;
606 
607 err_free:
608 	kfree(a);
609 err_mod:
610 	module_put(a_o->owner);
611 err_out:
612 	return ERR_PTR(err);
613 }
614 
615 int tcf_action_init(struct net *net, struct nlattr *nla,
616 				  struct nlattr *est, char *name, int ovr,
617 				  int bind, struct list_head *actions)
618 {
619 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
620 	struct tc_action *act;
621 	int err;
622 	int i;
623 
624 	err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
625 	if (err < 0)
626 		return err;
627 
628 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
629 		act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
630 		if (IS_ERR(act)) {
631 			err = PTR_ERR(act);
632 			goto err;
633 		}
634 		act->order = i;
635 		list_add_tail(&act->list, actions);
636 	}
637 	return 0;
638 
639 err:
640 	tcf_action_destroy(actions, bind);
641 	return err;
642 }
643 
644 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
645 			  int compat_mode)
646 {
647 	int err = 0;
648 	struct gnet_dump d;
649 	struct tcf_common *p = a->priv;
650 
651 	if (p == NULL)
652 		goto errout;
653 
654 	/* compat_mode being true specifies a call that is supposed
655 	 * to add additional backward compatibility statistic TLVs.
656 	 */
657 	if (compat_mode) {
658 		if (a->type == TCA_OLD_COMPAT)
659 			err = gnet_stats_start_copy_compat(skb, 0,
660 							   TCA_STATS,
661 							   TCA_XSTATS,
662 							   &p->tcfc_lock, &d,
663 							   TCA_PAD);
664 		else
665 			return 0;
666 	} else
667 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
668 					    &p->tcfc_lock, &d, TCA_ACT_PAD);
669 
670 	if (err < 0)
671 		goto errout;
672 
673 	if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
674 	    gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
675 				     &p->tcfc_rate_est) < 0 ||
676 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
677 				  &p->tcfc_qstats,
678 				  p->tcfc_qstats.qlen) < 0)
679 		goto errout;
680 
681 	if (gnet_stats_finish_copy(&d) < 0)
682 		goto errout;
683 
684 	return 0;
685 
686 errout:
687 	return -1;
688 }
689 
690 static int
691 tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
692 	     u16 flags, int event, int bind, int ref)
693 {
694 	struct tcamsg *t;
695 	struct nlmsghdr *nlh;
696 	unsigned char *b = skb_tail_pointer(skb);
697 	struct nlattr *nest;
698 
699 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
700 	if (!nlh)
701 		goto out_nlmsg_trim;
702 	t = nlmsg_data(nlh);
703 	t->tca_family = AF_UNSPEC;
704 	t->tca__pad1 = 0;
705 	t->tca__pad2 = 0;
706 
707 	nest = nla_nest_start(skb, TCA_ACT_TAB);
708 	if (nest == NULL)
709 		goto out_nlmsg_trim;
710 
711 	if (tcf_action_dump(skb, actions, bind, ref) < 0)
712 		goto out_nlmsg_trim;
713 
714 	nla_nest_end(skb, nest);
715 
716 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
717 	return skb->len;
718 
719 out_nlmsg_trim:
720 	nlmsg_trim(skb, b);
721 	return -1;
722 }
723 
724 static int
725 act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
726 	       struct list_head *actions, int event)
727 {
728 	struct sk_buff *skb;
729 
730 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
731 	if (!skb)
732 		return -ENOBUFS;
733 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
734 		kfree_skb(skb);
735 		return -EINVAL;
736 	}
737 
738 	return rtnl_unicast(skb, net, portid);
739 }
740 
741 static struct tc_action *create_a(int i)
742 {
743 	struct tc_action *act;
744 
745 	act = kzalloc(sizeof(*act), GFP_KERNEL);
746 	if (act == NULL) {
747 		pr_debug("create_a: failed to alloc!\n");
748 		return NULL;
749 	}
750 	act->order = i;
751 	INIT_LIST_HEAD(&act->list);
752 	return act;
753 }
754 
755 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
756 					  struct nlmsghdr *n, u32 portid)
757 {
758 	struct nlattr *tb[TCA_ACT_MAX + 1];
759 	struct tc_action *a;
760 	int index;
761 	int err;
762 
763 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
764 	if (err < 0)
765 		goto err_out;
766 
767 	err = -EINVAL;
768 	if (tb[TCA_ACT_INDEX] == NULL ||
769 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
770 		goto err_out;
771 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
772 
773 	err = -ENOMEM;
774 	a = create_a(0);
775 	if (a == NULL)
776 		goto err_out;
777 
778 	err = -EINVAL;
779 	a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
780 	if (a->ops == NULL) /* could happen in batch of actions */
781 		goto err_free;
782 	err = -ENOENT;
783 	if (a->ops->lookup(net, a, index) == 0)
784 		goto err_mod;
785 
786 	module_put(a->ops->owner);
787 	return a;
788 
789 err_mod:
790 	module_put(a->ops->owner);
791 err_free:
792 	kfree(a);
793 err_out:
794 	return ERR_PTR(err);
795 }
796 
797 static void cleanup_a(struct list_head *actions)
798 {
799 	struct tc_action *a, *tmp;
800 
801 	list_for_each_entry_safe(a, tmp, actions, list) {
802 		list_del(&a->list);
803 		kfree(a);
804 	}
805 }
806 
807 static int tca_action_flush(struct net *net, struct nlattr *nla,
808 			    struct nlmsghdr *n, u32 portid)
809 {
810 	struct sk_buff *skb;
811 	unsigned char *b;
812 	struct nlmsghdr *nlh;
813 	struct tcamsg *t;
814 	struct netlink_callback dcb;
815 	struct nlattr *nest;
816 	struct nlattr *tb[TCA_ACT_MAX + 1];
817 	struct nlattr *kind;
818 	struct tc_action a;
819 	int err = -ENOMEM;
820 
821 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
822 	if (!skb) {
823 		pr_debug("tca_action_flush: failed skb alloc\n");
824 		return err;
825 	}
826 
827 	b = skb_tail_pointer(skb);
828 
829 	err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
830 	if (err < 0)
831 		goto err_out;
832 
833 	err = -EINVAL;
834 	kind = tb[TCA_ACT_KIND];
835 	memset(&a, 0, sizeof(struct tc_action));
836 	INIT_LIST_HEAD(&a.list);
837 	a.ops = tc_lookup_action(kind);
838 	if (a.ops == NULL) /*some idjot trying to flush unknown action */
839 		goto err_out;
840 
841 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
842 	if (!nlh)
843 		goto out_module_put;
844 	t = nlmsg_data(nlh);
845 	t->tca_family = AF_UNSPEC;
846 	t->tca__pad1 = 0;
847 	t->tca__pad2 = 0;
848 
849 	nest = nla_nest_start(skb, TCA_ACT_TAB);
850 	if (nest == NULL)
851 		goto out_module_put;
852 
853 	err = a.ops->walk(net, skb, &dcb, RTM_DELACTION, &a);
854 	if (err < 0)
855 		goto out_module_put;
856 	if (err == 0)
857 		goto noflush_out;
858 
859 	nla_nest_end(skb, nest);
860 
861 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
862 	nlh->nlmsg_flags |= NLM_F_ROOT;
863 	module_put(a.ops->owner);
864 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
865 			     n->nlmsg_flags & NLM_F_ECHO);
866 	if (err > 0)
867 		return 0;
868 
869 	return err;
870 
871 out_module_put:
872 	module_put(a.ops->owner);
873 err_out:
874 noflush_out:
875 	kfree_skb(skb);
876 	return err;
877 }
878 
879 static int
880 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
881 	       u32 portid)
882 {
883 	int ret;
884 	struct sk_buff *skb;
885 
886 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
887 	if (!skb)
888 		return -ENOBUFS;
889 
890 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
891 			 0, 1) <= 0) {
892 		kfree_skb(skb);
893 		return -EINVAL;
894 	}
895 
896 	/* now do the delete */
897 	ret = tcf_action_destroy(actions, 0);
898 	if (ret < 0) {
899 		kfree_skb(skb);
900 		return ret;
901 	}
902 
903 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
904 			     n->nlmsg_flags & NLM_F_ECHO);
905 	if (ret > 0)
906 		return 0;
907 	return ret;
908 }
909 
910 static int
911 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
912 	      u32 portid, int event)
913 {
914 	int i, ret;
915 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
916 	struct tc_action *act;
917 	LIST_HEAD(actions);
918 
919 	ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
920 	if (ret < 0)
921 		return ret;
922 
923 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
924 		if (tb[1] != NULL)
925 			return tca_action_flush(net, tb[1], n, portid);
926 		else
927 			return -EINVAL;
928 	}
929 
930 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
931 		act = tcf_action_get_1(net, tb[i], n, portid);
932 		if (IS_ERR(act)) {
933 			ret = PTR_ERR(act);
934 			goto err;
935 		}
936 		act->order = i;
937 		list_add_tail(&act->list, &actions);
938 	}
939 
940 	if (event == RTM_GETACTION)
941 		ret = act_get_notify(net, portid, n, &actions, event);
942 	else { /* delete */
943 		ret = tcf_del_notify(net, n, &actions, portid);
944 		if (ret)
945 			goto err;
946 		return ret;
947 	}
948 err:
949 	cleanup_a(&actions);
950 	return ret;
951 }
952 
953 static int
954 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
955 	       u32 portid)
956 {
957 	struct sk_buff *skb;
958 	int err = 0;
959 
960 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
961 	if (!skb)
962 		return -ENOBUFS;
963 
964 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
965 			 RTM_NEWACTION, 0, 0) <= 0) {
966 		kfree_skb(skb);
967 		return -EINVAL;
968 	}
969 
970 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
971 			     n->nlmsg_flags & NLM_F_ECHO);
972 	if (err > 0)
973 		err = 0;
974 	return err;
975 }
976 
977 static int
978 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
979 	       u32 portid, int ovr)
980 {
981 	int ret = 0;
982 	LIST_HEAD(actions);
983 
984 	ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
985 	if (ret)
986 		goto done;
987 
988 	/* dump then free all the actions after update; inserted policy
989 	 * stays intact
990 	 */
991 	ret = tcf_add_notify(net, n, &actions, portid);
992 	cleanup_a(&actions);
993 done:
994 	return ret;
995 }
996 
997 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
998 {
999 	struct net *net = sock_net(skb->sk);
1000 	struct nlattr *tca[TCA_ACT_MAX + 1];
1001 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1002 	int ret = 0, ovr = 0;
1003 
1004 	if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
1005 		return -EPERM;
1006 
1007 	ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
1008 	if (ret < 0)
1009 		return ret;
1010 
1011 	if (tca[TCA_ACT_TAB] == NULL) {
1012 		pr_notice("tc_ctl_action: received NO action attribs\n");
1013 		return -EINVAL;
1014 	}
1015 
1016 	/* n->nlmsg_flags & NLM_F_CREATE */
1017 	switch (n->nlmsg_type) {
1018 	case RTM_NEWACTION:
1019 		/* we are going to assume all other flags
1020 		 * imply create only if it doesn't exist
1021 		 * Note that CREATE | EXCL implies that
1022 		 * but since we want avoid ambiguity (eg when flags
1023 		 * is zero) then just set this
1024 		 */
1025 		if (n->nlmsg_flags & NLM_F_REPLACE)
1026 			ovr = 1;
1027 replay:
1028 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
1029 		if (ret == -EAGAIN)
1030 			goto replay;
1031 		break;
1032 	case RTM_DELACTION:
1033 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1034 				    portid, RTM_DELACTION);
1035 		break;
1036 	case RTM_GETACTION:
1037 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1038 				    portid, RTM_GETACTION);
1039 		break;
1040 	default:
1041 		BUG();
1042 	}
1043 
1044 	return ret;
1045 }
1046 
1047 static struct nlattr *
1048 find_dump_kind(const struct nlmsghdr *n)
1049 {
1050 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1051 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1052 	struct nlattr *nla[TCAA_MAX + 1];
1053 	struct nlattr *kind;
1054 
1055 	if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
1056 		return NULL;
1057 	tb1 = nla[TCA_ACT_TAB];
1058 	if (tb1 == NULL)
1059 		return NULL;
1060 
1061 	if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1062 		      NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1063 		return NULL;
1064 
1065 	if (tb[1] == NULL)
1066 		return NULL;
1067 	if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1068 		      nla_len(tb[1]), NULL) < 0)
1069 		return NULL;
1070 	kind = tb2[TCA_ACT_KIND];
1071 
1072 	return kind;
1073 }
1074 
1075 static int
1076 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1077 {
1078 	struct net *net = sock_net(skb->sk);
1079 	struct nlmsghdr *nlh;
1080 	unsigned char *b = skb_tail_pointer(skb);
1081 	struct nlattr *nest;
1082 	struct tc_action_ops *a_o;
1083 	struct tc_action a;
1084 	int ret = 0;
1085 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1086 	struct nlattr *kind = find_dump_kind(cb->nlh);
1087 
1088 	if (kind == NULL) {
1089 		pr_info("tc_dump_action: action bad kind\n");
1090 		return 0;
1091 	}
1092 
1093 	a_o = tc_lookup_action(kind);
1094 	if (a_o == NULL)
1095 		return 0;
1096 
1097 	memset(&a, 0, sizeof(struct tc_action));
1098 	a.ops = a_o;
1099 
1100 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1101 			cb->nlh->nlmsg_type, sizeof(*t), 0);
1102 	if (!nlh)
1103 		goto out_module_put;
1104 	t = nlmsg_data(nlh);
1105 	t->tca_family = AF_UNSPEC;
1106 	t->tca__pad1 = 0;
1107 	t->tca__pad2 = 0;
1108 
1109 	nest = nla_nest_start(skb, TCA_ACT_TAB);
1110 	if (nest == NULL)
1111 		goto out_module_put;
1112 
1113 	ret = a_o->walk(net, skb, cb, RTM_GETACTION, &a);
1114 	if (ret < 0)
1115 		goto out_module_put;
1116 
1117 	if (ret > 0) {
1118 		nla_nest_end(skb, nest);
1119 		ret = skb->len;
1120 	} else
1121 		nla_nest_cancel(skb, nest);
1122 
1123 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1124 	if (NETLINK_CB(cb->skb).portid && ret)
1125 		nlh->nlmsg_flags |= NLM_F_MULTI;
1126 	module_put(a_o->owner);
1127 	return skb->len;
1128 
1129 out_module_put:
1130 	module_put(a_o->owner);
1131 	nlmsg_trim(skb, b);
1132 	return skb->len;
1133 }
1134 
1135 static int __init tc_action_init(void)
1136 {
1137 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
1138 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
1139 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1140 		      NULL);
1141 
1142 	return 0;
1143 }
1144 
1145 subsys_initcall(tc_action_init);
1146