xref: /openbmc/linux/net/sched/cls_api.c (revision 256ac037)
1 /*
2  * net/sched/cls_api.c	Packet classifier API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16 
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/err.h>
27 #include <linux/slab.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
33 
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
36 
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
39 
40 /* Find classifier type by string name */
41 
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
43 {
44 	const struct tcf_proto_ops *t, *res = NULL;
45 
46 	if (kind) {
47 		read_lock(&cls_mod_lock);
48 		list_for_each_entry(t, &tcf_proto_base, head) {
49 			if (strcmp(kind, t->kind) == 0) {
50 				if (try_module_get(t->owner))
51 					res = t;
52 				break;
53 			}
54 		}
55 		read_unlock(&cls_mod_lock);
56 	}
57 	return res;
58 }
59 
60 /* Register(unregister) new classifier type */
61 
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
63 {
64 	struct tcf_proto_ops *t;
65 	int rc = -EEXIST;
66 
67 	write_lock(&cls_mod_lock);
68 	list_for_each_entry(t, &tcf_proto_base, head)
69 		if (!strcmp(ops->kind, t->kind))
70 			goto out;
71 
72 	list_add_tail(&ops->head, &tcf_proto_base);
73 	rc = 0;
74 out:
75 	write_unlock(&cls_mod_lock);
76 	return rc;
77 }
78 EXPORT_SYMBOL(register_tcf_proto_ops);
79 
80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81 {
82 	struct tcf_proto_ops *t;
83 	int rc = -ENOENT;
84 
85 	/* Wait for outstanding call_rcu()s, if any, from a
86 	 * tcf_proto_ops's destroy() handler.
87 	 */
88 	rcu_barrier();
89 
90 	write_lock(&cls_mod_lock);
91 	list_for_each_entry(t, &tcf_proto_base, head) {
92 		if (t == ops) {
93 			list_del(&t->head);
94 			rc = 0;
95 			break;
96 		}
97 	}
98 	write_unlock(&cls_mod_lock);
99 	return rc;
100 }
101 EXPORT_SYMBOL(unregister_tcf_proto_ops);
102 
103 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
104 			  struct nlmsghdr *n, struct tcf_proto *tp,
105 			  unsigned long fh, int event, bool unicast);
106 
107 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
108 				 struct nlmsghdr *n,
109 				 struct tcf_proto __rcu **chain, int event)
110 {
111 	struct tcf_proto __rcu **it_chain;
112 	struct tcf_proto *tp;
113 
114 	for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
115 	     it_chain = &tp->next)
116 		tfilter_notify(net, oskb, n, tp, 0, event, false);
117 }
118 
119 /* Select new prio value from the range, managed by kernel. */
120 
121 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
122 {
123 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
124 
125 	if (tp)
126 		first = tp->prio - 1;
127 
128 	return first;
129 }
130 
131 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
132 					  u32 prio, u32 parent, struct Qdisc *q)
133 {
134 	struct tcf_proto *tp;
135 	int err;
136 
137 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
138 	if (!tp)
139 		return ERR_PTR(-ENOBUFS);
140 
141 	err = -ENOENT;
142 	tp->ops = tcf_proto_lookup_ops(kind);
143 	if (!tp->ops) {
144 #ifdef CONFIG_MODULES
145 		rtnl_unlock();
146 		request_module("cls_%s", kind);
147 		rtnl_lock();
148 		tp->ops = tcf_proto_lookup_ops(kind);
149 		/* We dropped the RTNL semaphore in order to perform
150 		 * the module load. So, even if we succeeded in loading
151 		 * the module we have to replay the request. We indicate
152 		 * this using -EAGAIN.
153 		 */
154 		if (tp->ops) {
155 			module_put(tp->ops->owner);
156 			err = -EAGAIN;
157 		} else {
158 			err = -ENOENT;
159 		}
160 		goto errout;
161 #endif
162 	}
163 	tp->classify = tp->ops->classify;
164 	tp->protocol = protocol;
165 	tp->prio = prio;
166 	tp->classid = parent;
167 	tp->q = q;
168 
169 	err = tp->ops->init(tp);
170 	if (err) {
171 		module_put(tp->ops->owner);
172 		goto errout;
173 	}
174 	return tp;
175 
176 errout:
177 	kfree(tp);
178 	return ERR_PTR(err);
179 }
180 
181 static void tcf_proto_destroy(struct tcf_proto *tp)
182 {
183 	tp->ops->destroy(tp);
184 	module_put(tp->ops->owner);
185 	kfree_rcu(tp, rcu);
186 }
187 
188 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
189 {
190 	struct tcf_proto *tp;
191 
192 	while ((tp = rtnl_dereference(*fl)) != NULL) {
193 		RCU_INIT_POINTER(*fl, tp->next);
194 		tcf_proto_destroy(tp);
195 	}
196 }
197 EXPORT_SYMBOL(tcf_destroy_chain);
198 
199 /* Add/change/delete/get a filter node */
200 
201 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
202 			  struct netlink_ext_ack *extack)
203 {
204 	struct net *net = sock_net(skb->sk);
205 	struct nlattr *tca[TCA_MAX + 1];
206 	struct tcmsg *t;
207 	u32 protocol;
208 	u32 prio;
209 	u32 nprio;
210 	u32 parent;
211 	struct net_device *dev;
212 	struct Qdisc  *q;
213 	struct tcf_proto __rcu **back;
214 	struct tcf_proto __rcu **chain;
215 	struct tcf_proto *next;
216 	struct tcf_proto *tp;
217 	const struct Qdisc_class_ops *cops;
218 	unsigned long cl;
219 	unsigned long fh;
220 	int err;
221 	int tp_created;
222 
223 	if ((n->nlmsg_type != RTM_GETTFILTER) &&
224 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
225 		return -EPERM;
226 
227 replay:
228 	tp_created = 0;
229 
230 	err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
231 	if (err < 0)
232 		return err;
233 
234 	t = nlmsg_data(n);
235 	protocol = TC_H_MIN(t->tcm_info);
236 	prio = TC_H_MAJ(t->tcm_info);
237 	nprio = prio;
238 	parent = t->tcm_parent;
239 	cl = 0;
240 
241 	if (prio == 0) {
242 		switch (n->nlmsg_type) {
243 		case RTM_DELTFILTER:
244 			if (protocol || t->tcm_handle || tca[TCA_KIND])
245 				return -ENOENT;
246 			break;
247 		case RTM_NEWTFILTER:
248 			/* If no priority is provided by the user,
249 			 * we allocate one.
250 			 */
251 			if (n->nlmsg_flags & NLM_F_CREATE) {
252 				prio = TC_H_MAKE(0x80000000U, 0U);
253 				break;
254 			}
255 			/* fall-through */
256 		default:
257 			return -ENOENT;
258 		}
259 	}
260 
261 	/* Find head of filter chain. */
262 
263 	/* Find link */
264 	dev = __dev_get_by_index(net, t->tcm_ifindex);
265 	if (dev == NULL)
266 		return -ENODEV;
267 
268 	/* Find qdisc */
269 	if (!parent) {
270 		q = dev->qdisc;
271 		parent = q->handle;
272 	} else {
273 		q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
274 		if (q == NULL)
275 			return -EINVAL;
276 	}
277 
278 	/* Is it classful? */
279 	cops = q->ops->cl_ops;
280 	if (!cops)
281 		return -EINVAL;
282 
283 	if (cops->tcf_chain == NULL)
284 		return -EOPNOTSUPP;
285 
286 	/* Do we search for filter, attached to class? */
287 	if (TC_H_MIN(parent)) {
288 		cl = cops->get(q, parent);
289 		if (cl == 0)
290 			return -ENOENT;
291 	}
292 
293 	/* And the last stroke */
294 	chain = cops->tcf_chain(q, cl);
295 	if (chain == NULL) {
296 		err = -EINVAL;
297 		goto errout;
298 	}
299 	if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
300 		tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
301 		tcf_destroy_chain(chain);
302 		err = 0;
303 		goto errout;
304 	}
305 
306 	/* Check the chain for existence of proto-tcf with this priority */
307 	for (back = chain;
308 	     (tp = rtnl_dereference(*back)) != NULL;
309 	     back = &tp->next) {
310 		if (tp->prio >= prio) {
311 			if (tp->prio == prio) {
312 				if (!nprio ||
313 				    (tp->protocol != protocol && protocol)) {
314 					err = -EINVAL;
315 					goto errout;
316 				}
317 			} else {
318 				tp = NULL;
319 			}
320 			break;
321 		}
322 	}
323 
324 	if (tp == NULL) {
325 		/* Proto-tcf does not exist, create new one */
326 
327 		if (tca[TCA_KIND] == NULL || !protocol) {
328 			err = -EINVAL;
329 			goto errout;
330 		}
331 
332 		if (n->nlmsg_type != RTM_NEWTFILTER ||
333 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
334 			err = -ENOENT;
335 			goto errout;
336 		}
337 
338 		if (!nprio)
339 			nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
340 
341 		tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
342 				      protocol, nprio, parent, q);
343 		if (IS_ERR(tp)) {
344 			err = PTR_ERR(tp);
345 			goto errout;
346 		}
347 		tp_created = 1;
348 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
349 		err = -EINVAL;
350 		goto errout;
351 	}
352 
353 	fh = tp->ops->get(tp, t->tcm_handle);
354 
355 	if (fh == 0) {
356 		if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
357 			next = rtnl_dereference(tp->next);
358 			RCU_INIT_POINTER(*back, next);
359 			tfilter_notify(net, skb, n, tp, fh,
360 				       RTM_DELTFILTER, false);
361 			tcf_proto_destroy(tp);
362 			err = 0;
363 			goto errout;
364 		}
365 
366 		if (n->nlmsg_type != RTM_NEWTFILTER ||
367 		    !(n->nlmsg_flags & NLM_F_CREATE)) {
368 			err = -ENOENT;
369 			goto errout;
370 		}
371 	} else {
372 		bool last;
373 
374 		switch (n->nlmsg_type) {
375 		case RTM_NEWTFILTER:
376 			if (n->nlmsg_flags & NLM_F_EXCL) {
377 				if (tp_created)
378 					tcf_proto_destroy(tp);
379 				err = -EEXIST;
380 				goto errout;
381 			}
382 			break;
383 		case RTM_DELTFILTER:
384 			err = tp->ops->delete(tp, fh, &last);
385 			if (err)
386 				goto errout;
387 			next = rtnl_dereference(tp->next);
388 			tfilter_notify(net, skb, n, tp, t->tcm_handle,
389 				       RTM_DELTFILTER, false);
390 			if (last) {
391 				RCU_INIT_POINTER(*back, next);
392 				tcf_proto_destroy(tp);
393 			}
394 			goto errout;
395 		case RTM_GETTFILTER:
396 			err = tfilter_notify(net, skb, n, tp, fh,
397 					     RTM_NEWTFILTER, true);
398 			goto errout;
399 		default:
400 			err = -EINVAL;
401 			goto errout;
402 		}
403 	}
404 
405 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
406 			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
407 	if (err == 0) {
408 		if (tp_created) {
409 			RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
410 			rcu_assign_pointer(*back, tp);
411 		}
412 		tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
413 	} else {
414 		if (tp_created)
415 			tcf_proto_destroy(tp);
416 	}
417 
418 errout:
419 	if (cl)
420 		cops->put(q, cl);
421 	if (err == -EAGAIN)
422 		/* Replay the request. */
423 		goto replay;
424 	return err;
425 }
426 
427 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
428 			 struct tcf_proto *tp, unsigned long fh, u32 portid,
429 			 u32 seq, u16 flags, int event)
430 {
431 	struct tcmsg *tcm;
432 	struct nlmsghdr  *nlh;
433 	unsigned char *b = skb_tail_pointer(skb);
434 
435 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
436 	if (!nlh)
437 		goto out_nlmsg_trim;
438 	tcm = nlmsg_data(nlh);
439 	tcm->tcm_family = AF_UNSPEC;
440 	tcm->tcm__pad1 = 0;
441 	tcm->tcm__pad2 = 0;
442 	tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
443 	tcm->tcm_parent = tp->classid;
444 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
445 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
446 		goto nla_put_failure;
447 	tcm->tcm_handle = fh;
448 	if (RTM_DELTFILTER != event) {
449 		tcm->tcm_handle = 0;
450 		if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
451 			goto nla_put_failure;
452 	}
453 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
454 	return skb->len;
455 
456 out_nlmsg_trim:
457 nla_put_failure:
458 	nlmsg_trim(skb, b);
459 	return -1;
460 }
461 
462 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
463 			  struct nlmsghdr *n, struct tcf_proto *tp,
464 			  unsigned long fh, int event, bool unicast)
465 {
466 	struct sk_buff *skb;
467 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
468 
469 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
470 	if (!skb)
471 		return -ENOBUFS;
472 
473 	if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
474 			  n->nlmsg_flags, event) <= 0) {
475 		kfree_skb(skb);
476 		return -EINVAL;
477 	}
478 
479 	if (unicast)
480 		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
481 
482 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
483 			      n->nlmsg_flags & NLM_F_ECHO);
484 }
485 
486 struct tcf_dump_args {
487 	struct tcf_walker w;
488 	struct sk_buff *skb;
489 	struct netlink_callback *cb;
490 };
491 
492 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
493 			 struct tcf_walker *arg)
494 {
495 	struct tcf_dump_args *a = (void *)arg;
496 	struct net *net = sock_net(a->skb->sk);
497 
498 	return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
499 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
500 			     RTM_NEWTFILTER);
501 }
502 
503 /* called with RTNL */
504 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
505 {
506 	struct net *net = sock_net(skb->sk);
507 	int t;
508 	int s_t;
509 	struct net_device *dev;
510 	struct Qdisc *q;
511 	struct tcf_proto *tp, __rcu **chain;
512 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
513 	unsigned long cl = 0;
514 	const struct Qdisc_class_ops *cops;
515 	struct tcf_dump_args arg;
516 
517 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
518 		return skb->len;
519 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
520 	if (!dev)
521 		return skb->len;
522 
523 	if (!tcm->tcm_parent)
524 		q = dev->qdisc;
525 	else
526 		q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
527 	if (!q)
528 		goto out;
529 	cops = q->ops->cl_ops;
530 	if (!cops)
531 		goto errout;
532 	if (cops->tcf_chain == NULL)
533 		goto errout;
534 	if (TC_H_MIN(tcm->tcm_parent)) {
535 		cl = cops->get(q, tcm->tcm_parent);
536 		if (cl == 0)
537 			goto errout;
538 	}
539 	chain = cops->tcf_chain(q, cl);
540 	if (chain == NULL)
541 		goto errout;
542 
543 	s_t = cb->args[0];
544 
545 	for (tp = rtnl_dereference(*chain), t = 0;
546 	     tp; tp = rtnl_dereference(tp->next), t++) {
547 		if (t < s_t)
548 			continue;
549 		if (TC_H_MAJ(tcm->tcm_info) &&
550 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
551 			continue;
552 		if (TC_H_MIN(tcm->tcm_info) &&
553 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
554 			continue;
555 		if (t > s_t)
556 			memset(&cb->args[1], 0,
557 			       sizeof(cb->args)-sizeof(cb->args[0]));
558 		if (cb->args[1] == 0) {
559 			if (tcf_fill_node(net, skb, tp, 0,
560 					  NETLINK_CB(cb->skb).portid,
561 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
562 					  RTM_NEWTFILTER) <= 0)
563 				break;
564 
565 			cb->args[1] = 1;
566 		}
567 		if (tp->ops->walk == NULL)
568 			continue;
569 		arg.w.fn = tcf_node_dump;
570 		arg.skb = skb;
571 		arg.cb = cb;
572 		arg.w.stop = 0;
573 		arg.w.skip = cb->args[1] - 1;
574 		arg.w.count = 0;
575 		tp->ops->walk(tp, &arg.w);
576 		cb->args[1] = arg.w.count + 1;
577 		if (arg.w.stop)
578 			break;
579 	}
580 
581 	cb->args[0] = t;
582 
583 errout:
584 	if (cl)
585 		cops->put(q, cl);
586 out:
587 	return skb->len;
588 }
589 
590 void tcf_exts_destroy(struct tcf_exts *exts)
591 {
592 #ifdef CONFIG_NET_CLS_ACT
593 	LIST_HEAD(actions);
594 
595 	tcf_exts_to_list(exts, &actions);
596 	tcf_action_destroy(&actions, TCA_ACT_UNBIND);
597 	kfree(exts->actions);
598 	exts->nr_actions = 0;
599 #endif
600 }
601 EXPORT_SYMBOL(tcf_exts_destroy);
602 
603 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
604 		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
605 {
606 #ifdef CONFIG_NET_CLS_ACT
607 	{
608 		struct tc_action *act;
609 
610 		if (exts->police && tb[exts->police]) {
611 			act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
612 						"police", ovr, TCA_ACT_BIND);
613 			if (IS_ERR(act))
614 				return PTR_ERR(act);
615 
616 			act->type = exts->type = TCA_OLD_COMPAT;
617 			exts->actions[0] = act;
618 			exts->nr_actions = 1;
619 		} else if (exts->action && tb[exts->action]) {
620 			LIST_HEAD(actions);
621 			int err, i = 0;
622 
623 			err = tcf_action_init(net, tb[exts->action], rate_tlv,
624 					      NULL, ovr, TCA_ACT_BIND,
625 					      &actions);
626 			if (err)
627 				return err;
628 			list_for_each_entry(act, &actions, list)
629 				exts->actions[i++] = act;
630 			exts->nr_actions = i;
631 		}
632 	}
633 #else
634 	if ((exts->action && tb[exts->action]) ||
635 	    (exts->police && tb[exts->police]))
636 		return -EOPNOTSUPP;
637 #endif
638 
639 	return 0;
640 }
641 EXPORT_SYMBOL(tcf_exts_validate);
642 
643 void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
644 		     struct tcf_exts *src)
645 {
646 #ifdef CONFIG_NET_CLS_ACT
647 	struct tcf_exts old = *dst;
648 
649 	tcf_tree_lock(tp);
650 	dst->nr_actions = src->nr_actions;
651 	dst->actions = src->actions;
652 	dst->type = src->type;
653 	tcf_tree_unlock(tp);
654 
655 	tcf_exts_destroy(&old);
656 #endif
657 }
658 EXPORT_SYMBOL(tcf_exts_change);
659 
660 #ifdef CONFIG_NET_CLS_ACT
661 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
662 {
663 	if (exts->nr_actions == 0)
664 		return NULL;
665 	else
666 		return exts->actions[0];
667 }
668 #endif
669 
670 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
671 {
672 #ifdef CONFIG_NET_CLS_ACT
673 	struct nlattr *nest;
674 
675 	if (exts->action && exts->nr_actions) {
676 		/*
677 		 * again for backward compatible mode - we want
678 		 * to work with both old and new modes of entering
679 		 * tc data even if iproute2  was newer - jhs
680 		 */
681 		if (exts->type != TCA_OLD_COMPAT) {
682 			LIST_HEAD(actions);
683 
684 			nest = nla_nest_start(skb, exts->action);
685 			if (nest == NULL)
686 				goto nla_put_failure;
687 
688 			tcf_exts_to_list(exts, &actions);
689 			if (tcf_action_dump(skb, &actions, 0, 0) < 0)
690 				goto nla_put_failure;
691 			nla_nest_end(skb, nest);
692 		} else if (exts->police) {
693 			struct tc_action *act = tcf_exts_first_act(exts);
694 			nest = nla_nest_start(skb, exts->police);
695 			if (nest == NULL || !act)
696 				goto nla_put_failure;
697 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
698 				goto nla_put_failure;
699 			nla_nest_end(skb, nest);
700 		}
701 	}
702 	return 0;
703 
704 nla_put_failure:
705 	nla_nest_cancel(skb, nest);
706 	return -1;
707 #else
708 	return 0;
709 #endif
710 }
711 EXPORT_SYMBOL(tcf_exts_dump);
712 
713 
714 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
715 {
716 #ifdef CONFIG_NET_CLS_ACT
717 	struct tc_action *a = tcf_exts_first_act(exts);
718 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
719 		return -1;
720 #endif
721 	return 0;
722 }
723 EXPORT_SYMBOL(tcf_exts_dump_stats);
724 
725 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts,
726 		     struct net_device **hw_dev)
727 {
728 #ifdef CONFIG_NET_CLS_ACT
729 	const struct tc_action *a;
730 	LIST_HEAD(actions);
731 
732 	if (tc_no_actions(exts))
733 		return -EINVAL;
734 
735 	tcf_exts_to_list(exts, &actions);
736 	list_for_each_entry(a, &actions, list) {
737 		if (a->ops->get_dev) {
738 			a->ops->get_dev(a, dev_net(dev), hw_dev);
739 			break;
740 		}
741 	}
742 	if (*hw_dev)
743 		return 0;
744 #endif
745 	return -EOPNOTSUPP;
746 }
747 EXPORT_SYMBOL(tcf_exts_get_dev);
748 
749 static int __init tc_filter_init(void)
750 {
751 	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
752 	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL);
753 	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
754 		      tc_dump_tfilter, NULL);
755 
756 	return 0;
757 }
758 
759 subsys_initcall(tc_filter_init);
760