xref: /openbmc/linux/net/sched/sch_api.c (revision 110e6f26)
1 /*
2  * net/sched/sch_api.c	Packet scheduler API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Fixes:
12  *
13  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16  */
17 
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 
33 #include <net/net_namespace.h>
34 #include <net/sock.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 
38 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 			struct nlmsghdr *n, u32 clid,
40 			struct Qdisc *old, struct Qdisc *new);
41 static int tclass_notify(struct net *net, struct sk_buff *oskb,
42 			 struct nlmsghdr *n, struct Qdisc *q,
43 			 unsigned long cl, int event);
44 
45 /*
46 
47    Short review.
48    -------------
49 
50    This file consists of two interrelated parts:
51 
52    1. queueing disciplines manager frontend.
53    2. traffic classes manager frontend.
54 
55    Generally, queueing discipline ("qdisc") is a black box,
56    which is able to enqueue packets and to dequeue them (when
57    device is ready to send something) in order and at times
58    determined by algorithm hidden in it.
59 
60    qdisc's are divided to two categories:
61    - "queues", which have no internal structure visible from outside.
62    - "schedulers", which split all the packets to "traffic classes",
63      using "packet classifiers" (look at cls_api.c)
64 
65    In turn, classes may have child qdiscs (as rule, queues)
66    attached to them etc. etc. etc.
67 
68    The goal of the routines in this file is to translate
69    information supplied by user in the form of handles
70    to more intelligible for kernel form, to make some sanity
71    checks and part of work, which is common to all qdiscs
72    and to provide rtnetlink notifications.
73 
74    All real intelligent work is done inside qdisc modules.
75 
76 
77 
78    Every discipline has two major routines: enqueue and dequeue.
79 
80    ---dequeue
81 
82    dequeue usually returns a skb to send. It is allowed to return NULL,
83    but it does not mean that queue is empty, it just means that
84    discipline does not want to send anything this time.
85    Queue is really empty if q->q.qlen == 0.
86    For complicated disciplines with multiple queues q->q is not
87    real packet queue, but however q->q.qlen must be valid.
88 
89    ---enqueue
90 
91    enqueue returns 0, if packet was enqueued successfully.
92    If packet (this one or another one) was dropped, it returns
93    not zero error code.
94    NET_XMIT_DROP 	- this packet dropped
95      Expected action: do not backoff, but wait until queue will clear.
96    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
97      Expected action: backoff or ignore
98    NET_XMIT_POLICED	- dropped by police.
99      Expected action: backoff or error to real-time apps.
100 
101    Auxiliary routines:
102 
103    ---peek
104 
105    like dequeue but without removing a packet from the queue
106 
107    ---reset
108 
109    returns qdisc to initial state: purge all buffers, clear all
110    timers, counters (except for statistics) etc.
111 
112    ---init
113 
114    initializes newly created qdisc.
115 
116    ---destroy
117 
118    destroys resources allocated by init and during lifetime of qdisc.
119 
120    ---change
121 
122    changes qdisc parameters.
123  */
124 
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock);
127 
128 
129 /************************************************
130  *	Queueing disciplines manipulation.	*
131  ************************************************/
132 
133 
134 /* The list of all installed queueing disciplines. */
135 
136 static struct Qdisc_ops *qdisc_base;
137 
138 /* Register/unregister queueing discipline */
139 
140 int register_qdisc(struct Qdisc_ops *qops)
141 {
142 	struct Qdisc_ops *q, **qp;
143 	int rc = -EEXIST;
144 
145 	write_lock(&qdisc_mod_lock);
146 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
147 		if (!strcmp(qops->id, q->id))
148 			goto out;
149 
150 	if (qops->enqueue == NULL)
151 		qops->enqueue = noop_qdisc_ops.enqueue;
152 	if (qops->peek == NULL) {
153 		if (qops->dequeue == NULL)
154 			qops->peek = noop_qdisc_ops.peek;
155 		else
156 			goto out_einval;
157 	}
158 	if (qops->dequeue == NULL)
159 		qops->dequeue = noop_qdisc_ops.dequeue;
160 
161 	if (qops->cl_ops) {
162 		const struct Qdisc_class_ops *cops = qops->cl_ops;
163 
164 		if (!(cops->get && cops->put && cops->walk && cops->leaf))
165 			goto out_einval;
166 
167 		if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
168 			goto out_einval;
169 	}
170 
171 	qops->next = NULL;
172 	*qp = qops;
173 	rc = 0;
174 out:
175 	write_unlock(&qdisc_mod_lock);
176 	return rc;
177 
178 out_einval:
179 	rc = -EINVAL;
180 	goto out;
181 }
182 EXPORT_SYMBOL(register_qdisc);
183 
184 int unregister_qdisc(struct Qdisc_ops *qops)
185 {
186 	struct Qdisc_ops *q, **qp;
187 	int err = -ENOENT;
188 
189 	write_lock(&qdisc_mod_lock);
190 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
191 		if (q == qops)
192 			break;
193 	if (q) {
194 		*qp = q->next;
195 		q->next = NULL;
196 		err = 0;
197 	}
198 	write_unlock(&qdisc_mod_lock);
199 	return err;
200 }
201 EXPORT_SYMBOL(unregister_qdisc);
202 
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name, size_t len)
205 {
206 	read_lock(&qdisc_mod_lock);
207 	strlcpy(name, default_qdisc_ops->id, len);
208 	read_unlock(&qdisc_mod_lock);
209 }
210 
211 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 {
213 	struct Qdisc_ops *q = NULL;
214 
215 	for (q = qdisc_base; q; q = q->next) {
216 		if (!strcmp(name, q->id)) {
217 			if (!try_module_get(q->owner))
218 				q = NULL;
219 			break;
220 		}
221 	}
222 
223 	return q;
224 }
225 
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name)
228 {
229 	const struct Qdisc_ops *ops;
230 
231 	if (!capable(CAP_NET_ADMIN))
232 		return -EPERM;
233 
234 	write_lock(&qdisc_mod_lock);
235 	ops = qdisc_lookup_default(name);
236 	if (!ops) {
237 		/* Not found, drop lock and try to load module */
238 		write_unlock(&qdisc_mod_lock);
239 		request_module("sch_%s", name);
240 		write_lock(&qdisc_mod_lock);
241 
242 		ops = qdisc_lookup_default(name);
243 	}
244 
245 	if (ops) {
246 		/* Set new default */
247 		module_put(default_qdisc_ops->owner);
248 		default_qdisc_ops = ops;
249 	}
250 	write_unlock(&qdisc_mod_lock);
251 
252 	return ops ? 0 : -ENOENT;
253 }
254 
255 /* We know handle. Find qdisc among all qdisc's attached to device
256  * (root qdisc, all its children, children of children etc.)
257  * Note: caller either uses rtnl or rcu_read_lock()
258  */
259 
260 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
261 {
262 	struct Qdisc *q;
263 
264 	if (!(root->flags & TCQ_F_BUILTIN) &&
265 	    root->handle == handle)
266 		return root;
267 
268 	list_for_each_entry_rcu(q, &root->list, list) {
269 		if (q->handle == handle)
270 			return q;
271 	}
272 	return NULL;
273 }
274 
275 void qdisc_list_add(struct Qdisc *q)
276 {
277 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
278 		struct Qdisc *root = qdisc_dev(q)->qdisc;
279 
280 		WARN_ON_ONCE(root == &noop_qdisc);
281 		ASSERT_RTNL();
282 		list_add_tail_rcu(&q->list, &root->list);
283 	}
284 }
285 EXPORT_SYMBOL(qdisc_list_add);
286 
287 void qdisc_list_del(struct Qdisc *q)
288 {
289 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
290 		ASSERT_RTNL();
291 		list_del_rcu(&q->list);
292 	}
293 }
294 EXPORT_SYMBOL(qdisc_list_del);
295 
296 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
297 {
298 	struct Qdisc *q;
299 
300 	q = qdisc_match_from_root(dev->qdisc, handle);
301 	if (q)
302 		goto out;
303 
304 	if (dev_ingress_queue(dev))
305 		q = qdisc_match_from_root(
306 			dev_ingress_queue(dev)->qdisc_sleeping,
307 			handle);
308 out:
309 	return q;
310 }
311 
312 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
313 {
314 	unsigned long cl;
315 	struct Qdisc *leaf;
316 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
317 
318 	if (cops == NULL)
319 		return NULL;
320 	cl = cops->get(p, classid);
321 
322 	if (cl == 0)
323 		return NULL;
324 	leaf = cops->leaf(p, cl);
325 	cops->put(p, cl);
326 	return leaf;
327 }
328 
329 /* Find queueing discipline by name */
330 
331 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
332 {
333 	struct Qdisc_ops *q = NULL;
334 
335 	if (kind) {
336 		read_lock(&qdisc_mod_lock);
337 		for (q = qdisc_base; q; q = q->next) {
338 			if (nla_strcmp(kind, q->id) == 0) {
339 				if (!try_module_get(q->owner))
340 					q = NULL;
341 				break;
342 			}
343 		}
344 		read_unlock(&qdisc_mod_lock);
345 	}
346 	return q;
347 }
348 
349 /* The linklayer setting were not transferred from iproute2, in older
350  * versions, and the rate tables lookup systems have been dropped in
351  * the kernel. To keep backward compatible with older iproute2 tc
352  * utils, we detect the linklayer setting by detecting if the rate
353  * table were modified.
354  *
355  * For linklayer ATM table entries, the rate table will be aligned to
356  * 48 bytes, thus some table entries will contain the same value.  The
357  * mpu (min packet unit) is also encoded into the old rate table, thus
358  * starting from the mpu, we find low and high table entries for
359  * mapping this cell.  If these entries contain the same value, when
360  * the rate tables have been modified for linklayer ATM.
361  *
362  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
363  * and then roundup to the next cell, calc the table entry one below,
364  * and compare.
365  */
366 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
367 {
368 	int low       = roundup(r->mpu, 48);
369 	int high      = roundup(low+1, 48);
370 	int cell_low  = low >> r->cell_log;
371 	int cell_high = (high >> r->cell_log) - 1;
372 
373 	/* rtab is too inaccurate at rates > 100Mbit/s */
374 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
375 		pr_debug("TC linklayer: Giving up ATM detection\n");
376 		return TC_LINKLAYER_ETHERNET;
377 	}
378 
379 	if ((cell_high > cell_low) && (cell_high < 256)
380 	    && (rtab[cell_low] == rtab[cell_high])) {
381 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
382 			 cell_low, cell_high, rtab[cell_high]);
383 		return TC_LINKLAYER_ATM;
384 	}
385 	return TC_LINKLAYER_ETHERNET;
386 }
387 
388 static struct qdisc_rate_table *qdisc_rtab_list;
389 
390 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
391 {
392 	struct qdisc_rate_table *rtab;
393 
394 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
395 	    nla_len(tab) != TC_RTAB_SIZE)
396 		return NULL;
397 
398 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
399 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
400 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
401 			rtab->refcnt++;
402 			return rtab;
403 		}
404 	}
405 
406 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
407 	if (rtab) {
408 		rtab->rate = *r;
409 		rtab->refcnt = 1;
410 		memcpy(rtab->data, nla_data(tab), 1024);
411 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
412 			r->linklayer = __detect_linklayer(r, rtab->data);
413 		rtab->next = qdisc_rtab_list;
414 		qdisc_rtab_list = rtab;
415 	}
416 	return rtab;
417 }
418 EXPORT_SYMBOL(qdisc_get_rtab);
419 
420 void qdisc_put_rtab(struct qdisc_rate_table *tab)
421 {
422 	struct qdisc_rate_table *rtab, **rtabp;
423 
424 	if (!tab || --tab->refcnt)
425 		return;
426 
427 	for (rtabp = &qdisc_rtab_list;
428 	     (rtab = *rtabp) != NULL;
429 	     rtabp = &rtab->next) {
430 		if (rtab == tab) {
431 			*rtabp = rtab->next;
432 			kfree(rtab);
433 			return;
434 		}
435 	}
436 }
437 EXPORT_SYMBOL(qdisc_put_rtab);
438 
439 static LIST_HEAD(qdisc_stab_list);
440 static DEFINE_SPINLOCK(qdisc_stab_lock);
441 
442 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
443 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
444 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
445 };
446 
447 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
448 {
449 	struct nlattr *tb[TCA_STAB_MAX + 1];
450 	struct qdisc_size_table *stab;
451 	struct tc_sizespec *s;
452 	unsigned int tsize = 0;
453 	u16 *tab = NULL;
454 	int err;
455 
456 	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
457 	if (err < 0)
458 		return ERR_PTR(err);
459 	if (!tb[TCA_STAB_BASE])
460 		return ERR_PTR(-EINVAL);
461 
462 	s = nla_data(tb[TCA_STAB_BASE]);
463 
464 	if (s->tsize > 0) {
465 		if (!tb[TCA_STAB_DATA])
466 			return ERR_PTR(-EINVAL);
467 		tab = nla_data(tb[TCA_STAB_DATA]);
468 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
469 	}
470 
471 	if (tsize != s->tsize || (!tab && tsize > 0))
472 		return ERR_PTR(-EINVAL);
473 
474 	spin_lock(&qdisc_stab_lock);
475 
476 	list_for_each_entry(stab, &qdisc_stab_list, list) {
477 		if (memcmp(&stab->szopts, s, sizeof(*s)))
478 			continue;
479 		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
480 			continue;
481 		stab->refcnt++;
482 		spin_unlock(&qdisc_stab_lock);
483 		return stab;
484 	}
485 
486 	spin_unlock(&qdisc_stab_lock);
487 
488 	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
489 	if (!stab)
490 		return ERR_PTR(-ENOMEM);
491 
492 	stab->refcnt = 1;
493 	stab->szopts = *s;
494 	if (tsize > 0)
495 		memcpy(stab->data, tab, tsize * sizeof(u16));
496 
497 	spin_lock(&qdisc_stab_lock);
498 	list_add_tail(&stab->list, &qdisc_stab_list);
499 	spin_unlock(&qdisc_stab_lock);
500 
501 	return stab;
502 }
503 
504 static void stab_kfree_rcu(struct rcu_head *head)
505 {
506 	kfree(container_of(head, struct qdisc_size_table, rcu));
507 }
508 
509 void qdisc_put_stab(struct qdisc_size_table *tab)
510 {
511 	if (!tab)
512 		return;
513 
514 	spin_lock(&qdisc_stab_lock);
515 
516 	if (--tab->refcnt == 0) {
517 		list_del(&tab->list);
518 		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
519 	}
520 
521 	spin_unlock(&qdisc_stab_lock);
522 }
523 EXPORT_SYMBOL(qdisc_put_stab);
524 
525 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
526 {
527 	struct nlattr *nest;
528 
529 	nest = nla_nest_start(skb, TCA_STAB);
530 	if (nest == NULL)
531 		goto nla_put_failure;
532 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
533 		goto nla_put_failure;
534 	nla_nest_end(skb, nest);
535 
536 	return skb->len;
537 
538 nla_put_failure:
539 	return -1;
540 }
541 
542 void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
543 {
544 	int pkt_len, slot;
545 
546 	pkt_len = skb->len + stab->szopts.overhead;
547 	if (unlikely(!stab->szopts.tsize))
548 		goto out;
549 
550 	slot = pkt_len + stab->szopts.cell_align;
551 	if (unlikely(slot < 0))
552 		slot = 0;
553 
554 	slot >>= stab->szopts.cell_log;
555 	if (likely(slot < stab->szopts.tsize))
556 		pkt_len = stab->data[slot];
557 	else
558 		pkt_len = stab->data[stab->szopts.tsize - 1] *
559 				(slot / stab->szopts.tsize) +
560 				stab->data[slot % stab->szopts.tsize];
561 
562 	pkt_len <<= stab->szopts.size_log;
563 out:
564 	if (unlikely(pkt_len < 1))
565 		pkt_len = 1;
566 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
567 }
568 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
569 
570 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
571 {
572 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
573 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
574 			txt, qdisc->ops->id, qdisc->handle >> 16);
575 		qdisc->flags |= TCQ_F_WARN_NONWC;
576 	}
577 }
578 EXPORT_SYMBOL(qdisc_warn_nonwc);
579 
580 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
581 {
582 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
583 						 timer);
584 
585 	rcu_read_lock();
586 	qdisc_unthrottled(wd->qdisc);
587 	__netif_schedule(qdisc_root(wd->qdisc));
588 	rcu_read_unlock();
589 
590 	return HRTIMER_NORESTART;
591 }
592 
593 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
594 {
595 	hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
596 	wd->timer.function = qdisc_watchdog;
597 	wd->qdisc = qdisc;
598 }
599 EXPORT_SYMBOL(qdisc_watchdog_init);
600 
601 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
602 {
603 	if (test_bit(__QDISC_STATE_DEACTIVATED,
604 		     &qdisc_root_sleeping(wd->qdisc)->state))
605 		return;
606 
607 	if (throttle)
608 		qdisc_throttled(wd->qdisc);
609 
610 	hrtimer_start(&wd->timer,
611 		      ns_to_ktime(expires),
612 		      HRTIMER_MODE_ABS_PINNED);
613 }
614 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
615 
616 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
617 {
618 	hrtimer_cancel(&wd->timer);
619 	qdisc_unthrottled(wd->qdisc);
620 }
621 EXPORT_SYMBOL(qdisc_watchdog_cancel);
622 
623 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
624 {
625 	unsigned int size = n * sizeof(struct hlist_head), i;
626 	struct hlist_head *h;
627 
628 	if (size <= PAGE_SIZE)
629 		h = kmalloc(size, GFP_KERNEL);
630 	else
631 		h = (struct hlist_head *)
632 			__get_free_pages(GFP_KERNEL, get_order(size));
633 
634 	if (h != NULL) {
635 		for (i = 0; i < n; i++)
636 			INIT_HLIST_HEAD(&h[i]);
637 	}
638 	return h;
639 }
640 
641 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
642 {
643 	unsigned int size = n * sizeof(struct hlist_head);
644 
645 	if (size <= PAGE_SIZE)
646 		kfree(h);
647 	else
648 		free_pages((unsigned long)h, get_order(size));
649 }
650 
651 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
652 {
653 	struct Qdisc_class_common *cl;
654 	struct hlist_node *next;
655 	struct hlist_head *nhash, *ohash;
656 	unsigned int nsize, nmask, osize;
657 	unsigned int i, h;
658 
659 	/* Rehash when load factor exceeds 0.75 */
660 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
661 		return;
662 	nsize = clhash->hashsize * 2;
663 	nmask = nsize - 1;
664 	nhash = qdisc_class_hash_alloc(nsize);
665 	if (nhash == NULL)
666 		return;
667 
668 	ohash = clhash->hash;
669 	osize = clhash->hashsize;
670 
671 	sch_tree_lock(sch);
672 	for (i = 0; i < osize; i++) {
673 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
674 			h = qdisc_class_hash(cl->classid, nmask);
675 			hlist_add_head(&cl->hnode, &nhash[h]);
676 		}
677 	}
678 	clhash->hash     = nhash;
679 	clhash->hashsize = nsize;
680 	clhash->hashmask = nmask;
681 	sch_tree_unlock(sch);
682 
683 	qdisc_class_hash_free(ohash, osize);
684 }
685 EXPORT_SYMBOL(qdisc_class_hash_grow);
686 
687 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
688 {
689 	unsigned int size = 4;
690 
691 	clhash->hash = qdisc_class_hash_alloc(size);
692 	if (clhash->hash == NULL)
693 		return -ENOMEM;
694 	clhash->hashsize  = size;
695 	clhash->hashmask  = size - 1;
696 	clhash->hashelems = 0;
697 	return 0;
698 }
699 EXPORT_SYMBOL(qdisc_class_hash_init);
700 
701 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
702 {
703 	qdisc_class_hash_free(clhash->hash, clhash->hashsize);
704 }
705 EXPORT_SYMBOL(qdisc_class_hash_destroy);
706 
707 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
708 			     struct Qdisc_class_common *cl)
709 {
710 	unsigned int h;
711 
712 	INIT_HLIST_NODE(&cl->hnode);
713 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
714 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
715 	clhash->hashelems++;
716 }
717 EXPORT_SYMBOL(qdisc_class_hash_insert);
718 
719 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
720 			     struct Qdisc_class_common *cl)
721 {
722 	hlist_del(&cl->hnode);
723 	clhash->hashelems--;
724 }
725 EXPORT_SYMBOL(qdisc_class_hash_remove);
726 
727 /* Allocate an unique handle from space managed by kernel
728  * Possible range is [8000-FFFF]:0000 (0x8000 values)
729  */
730 static u32 qdisc_alloc_handle(struct net_device *dev)
731 {
732 	int i = 0x8000;
733 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
734 
735 	do {
736 		autohandle += TC_H_MAKE(0x10000U, 0);
737 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
738 			autohandle = TC_H_MAKE(0x80000000U, 0);
739 		if (!qdisc_lookup(dev, autohandle))
740 			return autohandle;
741 		cond_resched();
742 	} while	(--i > 0);
743 
744 	return 0;
745 }
746 
747 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
748 			       unsigned int len)
749 {
750 	const struct Qdisc_class_ops *cops;
751 	unsigned long cl;
752 	u32 parentid;
753 	int drops;
754 
755 	if (n == 0 && len == 0)
756 		return;
757 	drops = max_t(int, n, 0);
758 	rcu_read_lock();
759 	while ((parentid = sch->parent)) {
760 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
761 			break;
762 
763 		if (sch->flags & TCQ_F_NOPARENT)
764 			break;
765 		/* TODO: perform the search on a per txq basis */
766 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
767 		if (sch == NULL) {
768 			WARN_ON_ONCE(parentid != TC_H_ROOT);
769 			break;
770 		}
771 		cops = sch->ops->cl_ops;
772 		if (cops->qlen_notify) {
773 			cl = cops->get(sch, parentid);
774 			cops->qlen_notify(sch, cl);
775 			cops->put(sch, cl);
776 		}
777 		sch->q.qlen -= n;
778 		sch->qstats.backlog -= len;
779 		__qdisc_qstats_drop(sch, drops);
780 	}
781 	rcu_read_unlock();
782 }
783 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
784 
785 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
786 			       struct nlmsghdr *n, u32 clid,
787 			       struct Qdisc *old, struct Qdisc *new)
788 {
789 	if (new || old)
790 		qdisc_notify(net, skb, n, clid, old, new);
791 
792 	if (old)
793 		qdisc_destroy(old);
794 }
795 
796 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
797  * to device "dev".
798  *
799  * When appropriate send a netlink notification using 'skb'
800  * and "n".
801  *
802  * On success, destroy old qdisc.
803  */
804 
805 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
806 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
807 		       struct Qdisc *new, struct Qdisc *old)
808 {
809 	struct Qdisc *q = old;
810 	struct net *net = dev_net(dev);
811 	int err = 0;
812 
813 	if (parent == NULL) {
814 		unsigned int i, num_q, ingress;
815 
816 		ingress = 0;
817 		num_q = dev->num_tx_queues;
818 		if ((q && q->flags & TCQ_F_INGRESS) ||
819 		    (new && new->flags & TCQ_F_INGRESS)) {
820 			num_q = 1;
821 			ingress = 1;
822 			if (!dev_ingress_queue(dev))
823 				return -ENOENT;
824 		}
825 
826 		if (dev->flags & IFF_UP)
827 			dev_deactivate(dev);
828 
829 		if (new && new->ops->attach)
830 			goto skip;
831 
832 		for (i = 0; i < num_q; i++) {
833 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
834 
835 			if (!ingress)
836 				dev_queue = netdev_get_tx_queue(dev, i);
837 
838 			old = dev_graft_qdisc(dev_queue, new);
839 			if (new && i > 0)
840 				atomic_inc(&new->refcnt);
841 
842 			if (!ingress)
843 				qdisc_destroy(old);
844 		}
845 
846 skip:
847 		if (!ingress) {
848 			notify_and_destroy(net, skb, n, classid,
849 					   dev->qdisc, new);
850 			if (new && !new->ops->attach)
851 				atomic_inc(&new->refcnt);
852 			dev->qdisc = new ? : &noop_qdisc;
853 
854 			if (new && new->ops->attach)
855 				new->ops->attach(new);
856 		} else {
857 			notify_and_destroy(net, skb, n, classid, old, new);
858 		}
859 
860 		if (dev->flags & IFF_UP)
861 			dev_activate(dev);
862 	} else {
863 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
864 
865 		err = -EOPNOTSUPP;
866 		if (cops && cops->graft) {
867 			unsigned long cl = cops->get(parent, classid);
868 			if (cl) {
869 				err = cops->graft(parent, cl, new, &old);
870 				cops->put(parent, cl);
871 			} else
872 				err = -ENOENT;
873 		}
874 		if (!err)
875 			notify_and_destroy(net, skb, n, classid, old, new);
876 	}
877 	return err;
878 }
879 
880 /* lockdep annotation is needed for ingress; egress gets it only for name */
881 static struct lock_class_key qdisc_tx_lock;
882 static struct lock_class_key qdisc_rx_lock;
883 
884 /*
885    Allocate and initialize new qdisc.
886 
887    Parameters are passed via opt.
888  */
889 
890 static struct Qdisc *
891 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
892 	     struct Qdisc *p, u32 parent, u32 handle,
893 	     struct nlattr **tca, int *errp)
894 {
895 	int err;
896 	struct nlattr *kind = tca[TCA_KIND];
897 	struct Qdisc *sch;
898 	struct Qdisc_ops *ops;
899 	struct qdisc_size_table *stab;
900 
901 	ops = qdisc_lookup_ops(kind);
902 #ifdef CONFIG_MODULES
903 	if (ops == NULL && kind != NULL) {
904 		char name[IFNAMSIZ];
905 		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
906 			/* We dropped the RTNL semaphore in order to
907 			 * perform the module load.  So, even if we
908 			 * succeeded in loading the module we have to
909 			 * tell the caller to replay the request.  We
910 			 * indicate this using -EAGAIN.
911 			 * We replay the request because the device may
912 			 * go away in the mean time.
913 			 */
914 			rtnl_unlock();
915 			request_module("sch_%s", name);
916 			rtnl_lock();
917 			ops = qdisc_lookup_ops(kind);
918 			if (ops != NULL) {
919 				/* We will try again qdisc_lookup_ops,
920 				 * so don't keep a reference.
921 				 */
922 				module_put(ops->owner);
923 				err = -EAGAIN;
924 				goto err_out;
925 			}
926 		}
927 	}
928 #endif
929 
930 	err = -ENOENT;
931 	if (ops == NULL)
932 		goto err_out;
933 
934 	sch = qdisc_alloc(dev_queue, ops);
935 	if (IS_ERR(sch)) {
936 		err = PTR_ERR(sch);
937 		goto err_out2;
938 	}
939 
940 	sch->parent = parent;
941 
942 	if (handle == TC_H_INGRESS) {
943 		sch->flags |= TCQ_F_INGRESS;
944 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
945 		lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
946 	} else {
947 		if (handle == 0) {
948 			handle = qdisc_alloc_handle(dev);
949 			err = -ENOMEM;
950 			if (handle == 0)
951 				goto err_out3;
952 		}
953 		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
954 		if (!netif_is_multiqueue(dev))
955 			sch->flags |= TCQ_F_ONETXQUEUE;
956 	}
957 
958 	sch->handle = handle;
959 
960 	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
961 		if (qdisc_is_percpu_stats(sch)) {
962 			sch->cpu_bstats =
963 				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
964 			if (!sch->cpu_bstats)
965 				goto err_out4;
966 
967 			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
968 			if (!sch->cpu_qstats)
969 				goto err_out4;
970 		}
971 
972 		if (tca[TCA_STAB]) {
973 			stab = qdisc_get_stab(tca[TCA_STAB]);
974 			if (IS_ERR(stab)) {
975 				err = PTR_ERR(stab);
976 				goto err_out4;
977 			}
978 			rcu_assign_pointer(sch->stab, stab);
979 		}
980 		if (tca[TCA_RATE]) {
981 			spinlock_t *root_lock;
982 
983 			err = -EOPNOTSUPP;
984 			if (sch->flags & TCQ_F_MQROOT)
985 				goto err_out4;
986 
987 			if ((sch->parent != TC_H_ROOT) &&
988 			    !(sch->flags & TCQ_F_INGRESS) &&
989 			    (!p || !(p->flags & TCQ_F_MQROOT)))
990 				root_lock = qdisc_root_sleeping_lock(sch);
991 			else
992 				root_lock = qdisc_lock(sch);
993 
994 			err = gen_new_estimator(&sch->bstats,
995 						sch->cpu_bstats,
996 						&sch->rate_est,
997 						root_lock,
998 						tca[TCA_RATE]);
999 			if (err)
1000 				goto err_out4;
1001 		}
1002 
1003 		qdisc_list_add(sch);
1004 
1005 		return sch;
1006 	}
1007 err_out3:
1008 	dev_put(dev);
1009 	kfree((char *) sch - sch->padded);
1010 err_out2:
1011 	module_put(ops->owner);
1012 err_out:
1013 	*errp = err;
1014 	return NULL;
1015 
1016 err_out4:
1017 	free_percpu(sch->cpu_bstats);
1018 	free_percpu(sch->cpu_qstats);
1019 	/*
1020 	 * Any broken qdiscs that would require a ops->reset() here?
1021 	 * The qdisc was never in action so it shouldn't be necessary.
1022 	 */
1023 	qdisc_put_stab(rtnl_dereference(sch->stab));
1024 	if (ops->destroy)
1025 		ops->destroy(sch);
1026 	goto err_out3;
1027 }
1028 
1029 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1030 {
1031 	struct qdisc_size_table *ostab, *stab = NULL;
1032 	int err = 0;
1033 
1034 	if (tca[TCA_OPTIONS]) {
1035 		if (sch->ops->change == NULL)
1036 			return -EINVAL;
1037 		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1038 		if (err)
1039 			return err;
1040 	}
1041 
1042 	if (tca[TCA_STAB]) {
1043 		stab = qdisc_get_stab(tca[TCA_STAB]);
1044 		if (IS_ERR(stab))
1045 			return PTR_ERR(stab);
1046 	}
1047 
1048 	ostab = rtnl_dereference(sch->stab);
1049 	rcu_assign_pointer(sch->stab, stab);
1050 	qdisc_put_stab(ostab);
1051 
1052 	if (tca[TCA_RATE]) {
1053 		/* NB: ignores errors from replace_estimator
1054 		   because change can't be undone. */
1055 		if (sch->flags & TCQ_F_MQROOT)
1056 			goto out;
1057 		gen_replace_estimator(&sch->bstats,
1058 				      sch->cpu_bstats,
1059 				      &sch->rate_est,
1060 				      qdisc_root_sleeping_lock(sch),
1061 				      tca[TCA_RATE]);
1062 	}
1063 out:
1064 	return 0;
1065 }
1066 
1067 struct check_loop_arg {
1068 	struct qdisc_walker	w;
1069 	struct Qdisc		*p;
1070 	int			depth;
1071 };
1072 
1073 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
1074 
1075 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1076 {
1077 	struct check_loop_arg	arg;
1078 
1079 	if (q->ops->cl_ops == NULL)
1080 		return 0;
1081 
1082 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1083 	arg.w.fn = check_loop_fn;
1084 	arg.depth = depth;
1085 	arg.p = p;
1086 	q->ops->cl_ops->walk(q, &arg.w);
1087 	return arg.w.stop ? -ELOOP : 0;
1088 }
1089 
1090 static int
1091 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1092 {
1093 	struct Qdisc *leaf;
1094 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1095 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1096 
1097 	leaf = cops->leaf(q, cl);
1098 	if (leaf) {
1099 		if (leaf == arg->p || arg->depth > 7)
1100 			return -ELOOP;
1101 		return check_loop(leaf, arg->p, arg->depth + 1);
1102 	}
1103 	return 0;
1104 }
1105 
1106 /*
1107  * Delete/get qdisc.
1108  */
1109 
1110 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1111 {
1112 	struct net *net = sock_net(skb->sk);
1113 	struct tcmsg *tcm = nlmsg_data(n);
1114 	struct nlattr *tca[TCA_MAX + 1];
1115 	struct net_device *dev;
1116 	u32 clid;
1117 	struct Qdisc *q = NULL;
1118 	struct Qdisc *p = NULL;
1119 	int err;
1120 
1121 	if ((n->nlmsg_type != RTM_GETQDISC) &&
1122 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1123 		return -EPERM;
1124 
1125 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1126 	if (err < 0)
1127 		return err;
1128 
1129 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1130 	if (!dev)
1131 		return -ENODEV;
1132 
1133 	clid = tcm->tcm_parent;
1134 	if (clid) {
1135 		if (clid != TC_H_ROOT) {
1136 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1137 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1138 				if (!p)
1139 					return -ENOENT;
1140 				q = qdisc_leaf(p, clid);
1141 			} else if (dev_ingress_queue(dev)) {
1142 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1143 			}
1144 		} else {
1145 			q = dev->qdisc;
1146 		}
1147 		if (!q)
1148 			return -ENOENT;
1149 
1150 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1151 			return -EINVAL;
1152 	} else {
1153 		q = qdisc_lookup(dev, tcm->tcm_handle);
1154 		if (!q)
1155 			return -ENOENT;
1156 	}
1157 
1158 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1159 		return -EINVAL;
1160 
1161 	if (n->nlmsg_type == RTM_DELQDISC) {
1162 		if (!clid)
1163 			return -EINVAL;
1164 		if (q->handle == 0)
1165 			return -ENOENT;
1166 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1167 		if (err != 0)
1168 			return err;
1169 	} else {
1170 		qdisc_notify(net, skb, n, clid, NULL, q);
1171 	}
1172 	return 0;
1173 }
1174 
1175 /*
1176  * Create/change qdisc.
1177  */
1178 
1179 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1180 {
1181 	struct net *net = sock_net(skb->sk);
1182 	struct tcmsg *tcm;
1183 	struct nlattr *tca[TCA_MAX + 1];
1184 	struct net_device *dev;
1185 	u32 clid;
1186 	struct Qdisc *q, *p;
1187 	int err;
1188 
1189 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1190 		return -EPERM;
1191 
1192 replay:
1193 	/* Reinit, just in case something touches this. */
1194 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1195 	if (err < 0)
1196 		return err;
1197 
1198 	tcm = nlmsg_data(n);
1199 	clid = tcm->tcm_parent;
1200 	q = p = NULL;
1201 
1202 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1203 	if (!dev)
1204 		return -ENODEV;
1205 
1206 
1207 	if (clid) {
1208 		if (clid != TC_H_ROOT) {
1209 			if (clid != TC_H_INGRESS) {
1210 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1211 				if (!p)
1212 					return -ENOENT;
1213 				q = qdisc_leaf(p, clid);
1214 			} else if (dev_ingress_queue_create(dev)) {
1215 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1216 			}
1217 		} else {
1218 			q = dev->qdisc;
1219 		}
1220 
1221 		/* It may be default qdisc, ignore it */
1222 		if (q && q->handle == 0)
1223 			q = NULL;
1224 
1225 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1226 			if (tcm->tcm_handle) {
1227 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1228 					return -EEXIST;
1229 				if (TC_H_MIN(tcm->tcm_handle))
1230 					return -EINVAL;
1231 				q = qdisc_lookup(dev, tcm->tcm_handle);
1232 				if (!q)
1233 					goto create_n_graft;
1234 				if (n->nlmsg_flags & NLM_F_EXCL)
1235 					return -EEXIST;
1236 				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1237 					return -EINVAL;
1238 				if (q == p ||
1239 				    (p && check_loop(q, p, 0)))
1240 					return -ELOOP;
1241 				atomic_inc(&q->refcnt);
1242 				goto graft;
1243 			} else {
1244 				if (!q)
1245 					goto create_n_graft;
1246 
1247 				/* This magic test requires explanation.
1248 				 *
1249 				 *   We know, that some child q is already
1250 				 *   attached to this parent and have choice:
1251 				 *   either to change it or to create/graft new one.
1252 				 *
1253 				 *   1. We are allowed to create/graft only
1254 				 *   if CREATE and REPLACE flags are set.
1255 				 *
1256 				 *   2. If EXCL is set, requestor wanted to say,
1257 				 *   that qdisc tcm_handle is not expected
1258 				 *   to exist, so that we choose create/graft too.
1259 				 *
1260 				 *   3. The last case is when no flags are set.
1261 				 *   Alas, it is sort of hole in API, we
1262 				 *   cannot decide what to do unambiguously.
1263 				 *   For now we select create/graft, if
1264 				 *   user gave KIND, which does not match existing.
1265 				 */
1266 				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1267 				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1268 				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1269 				     (tca[TCA_KIND] &&
1270 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1271 					goto create_n_graft;
1272 			}
1273 		}
1274 	} else {
1275 		if (!tcm->tcm_handle)
1276 			return -EINVAL;
1277 		q = qdisc_lookup(dev, tcm->tcm_handle);
1278 	}
1279 
1280 	/* Change qdisc parameters */
1281 	if (q == NULL)
1282 		return -ENOENT;
1283 	if (n->nlmsg_flags & NLM_F_EXCL)
1284 		return -EEXIST;
1285 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1286 		return -EINVAL;
1287 	err = qdisc_change(q, tca);
1288 	if (err == 0)
1289 		qdisc_notify(net, skb, n, clid, NULL, q);
1290 	return err;
1291 
1292 create_n_graft:
1293 	if (!(n->nlmsg_flags & NLM_F_CREATE))
1294 		return -ENOENT;
1295 	if (clid == TC_H_INGRESS) {
1296 		if (dev_ingress_queue(dev))
1297 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1298 					 tcm->tcm_parent, tcm->tcm_parent,
1299 					 tca, &err);
1300 		else
1301 			err = -ENOENT;
1302 	} else {
1303 		struct netdev_queue *dev_queue;
1304 
1305 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1306 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1307 		else if (p)
1308 			dev_queue = p->dev_queue;
1309 		else
1310 			dev_queue = netdev_get_tx_queue(dev, 0);
1311 
1312 		q = qdisc_create(dev, dev_queue, p,
1313 				 tcm->tcm_parent, tcm->tcm_handle,
1314 				 tca, &err);
1315 	}
1316 	if (q == NULL) {
1317 		if (err == -EAGAIN)
1318 			goto replay;
1319 		return err;
1320 	}
1321 
1322 graft:
1323 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1324 	if (err) {
1325 		if (q)
1326 			qdisc_destroy(q);
1327 		return err;
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1334 			 u32 portid, u32 seq, u16 flags, int event)
1335 {
1336 	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1337 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1338 	struct tcmsg *tcm;
1339 	struct nlmsghdr  *nlh;
1340 	unsigned char *b = skb_tail_pointer(skb);
1341 	struct gnet_dump d;
1342 	struct qdisc_size_table *stab;
1343 	__u32 qlen;
1344 
1345 	cond_resched();
1346 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1347 	if (!nlh)
1348 		goto out_nlmsg_trim;
1349 	tcm = nlmsg_data(nlh);
1350 	tcm->tcm_family = AF_UNSPEC;
1351 	tcm->tcm__pad1 = 0;
1352 	tcm->tcm__pad2 = 0;
1353 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1354 	tcm->tcm_parent = clid;
1355 	tcm->tcm_handle = q->handle;
1356 	tcm->tcm_info = atomic_read(&q->refcnt);
1357 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1358 		goto nla_put_failure;
1359 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
1360 		goto nla_put_failure;
1361 	qlen = q->q.qlen;
1362 
1363 	stab = rtnl_dereference(q->stab);
1364 	if (stab && qdisc_dump_stab(skb, stab) < 0)
1365 		goto nla_put_failure;
1366 
1367 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1368 					 qdisc_root_sleeping_lock(q), &d) < 0)
1369 		goto nla_put_failure;
1370 
1371 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1372 		goto nla_put_failure;
1373 
1374 	if (qdisc_is_percpu_stats(q)) {
1375 		cpu_bstats = q->cpu_bstats;
1376 		cpu_qstats = q->cpu_qstats;
1377 	}
1378 
1379 	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
1380 	    gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1381 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1382 		goto nla_put_failure;
1383 
1384 	if (gnet_stats_finish_copy(&d) < 0)
1385 		goto nla_put_failure;
1386 
1387 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1388 	return skb->len;
1389 
1390 out_nlmsg_trim:
1391 nla_put_failure:
1392 	nlmsg_trim(skb, b);
1393 	return -1;
1394 }
1395 
1396 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1397 {
1398 	return (q->flags & TCQ_F_BUILTIN) ? true : false;
1399 }
1400 
1401 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1402 			struct nlmsghdr *n, u32 clid,
1403 			struct Qdisc *old, struct Qdisc *new)
1404 {
1405 	struct sk_buff *skb;
1406 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1407 
1408 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1409 	if (!skb)
1410 		return -ENOBUFS;
1411 
1412 	if (old && !tc_qdisc_dump_ignore(old)) {
1413 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1414 				  0, RTM_DELQDISC) < 0)
1415 			goto err_out;
1416 	}
1417 	if (new && !tc_qdisc_dump_ignore(new)) {
1418 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1419 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1420 			goto err_out;
1421 	}
1422 
1423 	if (skb->len)
1424 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1425 				      n->nlmsg_flags & NLM_F_ECHO);
1426 
1427 err_out:
1428 	kfree_skb(skb);
1429 	return -EINVAL;
1430 }
1431 
1432 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1433 			      struct netlink_callback *cb,
1434 			      int *q_idx_p, int s_q_idx)
1435 {
1436 	int ret = 0, q_idx = *q_idx_p;
1437 	struct Qdisc *q;
1438 
1439 	if (!root)
1440 		return 0;
1441 
1442 	q = root;
1443 	if (q_idx < s_q_idx) {
1444 		q_idx++;
1445 	} else {
1446 		if (!tc_qdisc_dump_ignore(q) &&
1447 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1448 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1449 			goto done;
1450 		q_idx++;
1451 	}
1452 	list_for_each_entry(q, &root->list, list) {
1453 		if (q_idx < s_q_idx) {
1454 			q_idx++;
1455 			continue;
1456 		}
1457 		if (!tc_qdisc_dump_ignore(q) &&
1458 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1459 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1460 			goto done;
1461 		q_idx++;
1462 	}
1463 
1464 out:
1465 	*q_idx_p = q_idx;
1466 	return ret;
1467 done:
1468 	ret = -1;
1469 	goto out;
1470 }
1471 
1472 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1473 {
1474 	struct net *net = sock_net(skb->sk);
1475 	int idx, q_idx;
1476 	int s_idx, s_q_idx;
1477 	struct net_device *dev;
1478 
1479 	s_idx = cb->args[0];
1480 	s_q_idx = q_idx = cb->args[1];
1481 
1482 	idx = 0;
1483 	ASSERT_RTNL();
1484 	for_each_netdev(net, dev) {
1485 		struct netdev_queue *dev_queue;
1486 
1487 		if (idx < s_idx)
1488 			goto cont;
1489 		if (idx > s_idx)
1490 			s_q_idx = 0;
1491 		q_idx = 0;
1492 
1493 		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1494 			goto done;
1495 
1496 		dev_queue = dev_ingress_queue(dev);
1497 		if (dev_queue &&
1498 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1499 				       &q_idx, s_q_idx) < 0)
1500 			goto done;
1501 
1502 cont:
1503 		idx++;
1504 	}
1505 
1506 done:
1507 	cb->args[0] = idx;
1508 	cb->args[1] = q_idx;
1509 
1510 	return skb->len;
1511 }
1512 
1513 
1514 
1515 /************************************************
1516  *	Traffic classes manipulation.		*
1517  ************************************************/
1518 
1519 
1520 
1521 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1522 {
1523 	struct net *net = sock_net(skb->sk);
1524 	struct tcmsg *tcm = nlmsg_data(n);
1525 	struct nlattr *tca[TCA_MAX + 1];
1526 	struct net_device *dev;
1527 	struct Qdisc *q = NULL;
1528 	const struct Qdisc_class_ops *cops;
1529 	unsigned long cl = 0;
1530 	unsigned long new_cl;
1531 	u32 portid;
1532 	u32 clid;
1533 	u32 qid;
1534 	int err;
1535 
1536 	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1537 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1538 		return -EPERM;
1539 
1540 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1541 	if (err < 0)
1542 		return err;
1543 
1544 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1545 	if (!dev)
1546 		return -ENODEV;
1547 
1548 	/*
1549 	   parent == TC_H_UNSPEC - unspecified parent.
1550 	   parent == TC_H_ROOT   - class is root, which has no parent.
1551 	   parent == X:0	 - parent is root class.
1552 	   parent == X:Y	 - parent is a node in hierarchy.
1553 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1554 
1555 	   handle == 0:0	 - generate handle from kernel pool.
1556 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1557 	   handle == X:Y	 - clear.
1558 	   handle == X:0	 - root class.
1559 	 */
1560 
1561 	/* Step 1. Determine qdisc handle X:0 */
1562 
1563 	portid = tcm->tcm_parent;
1564 	clid = tcm->tcm_handle;
1565 	qid = TC_H_MAJ(clid);
1566 
1567 	if (portid != TC_H_ROOT) {
1568 		u32 qid1 = TC_H_MAJ(portid);
1569 
1570 		if (qid && qid1) {
1571 			/* If both majors are known, they must be identical. */
1572 			if (qid != qid1)
1573 				return -EINVAL;
1574 		} else if (qid1) {
1575 			qid = qid1;
1576 		} else if (qid == 0)
1577 			qid = dev->qdisc->handle;
1578 
1579 		/* Now qid is genuine qdisc handle consistent
1580 		 * both with parent and child.
1581 		 *
1582 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1583 		 */
1584 		if (portid)
1585 			portid = TC_H_MAKE(qid, portid);
1586 	} else {
1587 		if (qid == 0)
1588 			qid = dev->qdisc->handle;
1589 	}
1590 
1591 	/* OK. Locate qdisc */
1592 	q = qdisc_lookup(dev, qid);
1593 	if (!q)
1594 		return -ENOENT;
1595 
1596 	/* An check that it supports classes */
1597 	cops = q->ops->cl_ops;
1598 	if (cops == NULL)
1599 		return -EINVAL;
1600 
1601 	/* Now try to get class */
1602 	if (clid == 0) {
1603 		if (portid == TC_H_ROOT)
1604 			clid = qid;
1605 	} else
1606 		clid = TC_H_MAKE(qid, clid);
1607 
1608 	if (clid)
1609 		cl = cops->get(q, clid);
1610 
1611 	if (cl == 0) {
1612 		err = -ENOENT;
1613 		if (n->nlmsg_type != RTM_NEWTCLASS ||
1614 		    !(n->nlmsg_flags & NLM_F_CREATE))
1615 			goto out;
1616 	} else {
1617 		switch (n->nlmsg_type) {
1618 		case RTM_NEWTCLASS:
1619 			err = -EEXIST;
1620 			if (n->nlmsg_flags & NLM_F_EXCL)
1621 				goto out;
1622 			break;
1623 		case RTM_DELTCLASS:
1624 			err = -EOPNOTSUPP;
1625 			if (cops->delete)
1626 				err = cops->delete(q, cl);
1627 			if (err == 0)
1628 				tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1629 			goto out;
1630 		case RTM_GETTCLASS:
1631 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1632 			goto out;
1633 		default:
1634 			err = -EINVAL;
1635 			goto out;
1636 		}
1637 	}
1638 
1639 	new_cl = cl;
1640 	err = -EOPNOTSUPP;
1641 	if (cops->change)
1642 		err = cops->change(q, clid, portid, tca, &new_cl);
1643 	if (err == 0)
1644 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1645 
1646 out:
1647 	if (cl)
1648 		cops->put(q, cl);
1649 
1650 	return err;
1651 }
1652 
1653 
1654 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1655 			  unsigned long cl,
1656 			  u32 portid, u32 seq, u16 flags, int event)
1657 {
1658 	struct tcmsg *tcm;
1659 	struct nlmsghdr  *nlh;
1660 	unsigned char *b = skb_tail_pointer(skb);
1661 	struct gnet_dump d;
1662 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1663 
1664 	cond_resched();
1665 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1666 	if (!nlh)
1667 		goto out_nlmsg_trim;
1668 	tcm = nlmsg_data(nlh);
1669 	tcm->tcm_family = AF_UNSPEC;
1670 	tcm->tcm__pad1 = 0;
1671 	tcm->tcm__pad2 = 0;
1672 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1673 	tcm->tcm_parent = q->handle;
1674 	tcm->tcm_handle = q->handle;
1675 	tcm->tcm_info = 0;
1676 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1677 		goto nla_put_failure;
1678 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1679 		goto nla_put_failure;
1680 
1681 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1682 					 qdisc_root_sleeping_lock(q), &d) < 0)
1683 		goto nla_put_failure;
1684 
1685 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1686 		goto nla_put_failure;
1687 
1688 	if (gnet_stats_finish_copy(&d) < 0)
1689 		goto nla_put_failure;
1690 
1691 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1692 	return skb->len;
1693 
1694 out_nlmsg_trim:
1695 nla_put_failure:
1696 	nlmsg_trim(skb, b);
1697 	return -1;
1698 }
1699 
1700 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1701 			 struct nlmsghdr *n, struct Qdisc *q,
1702 			 unsigned long cl, int event)
1703 {
1704 	struct sk_buff *skb;
1705 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1706 
1707 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1708 	if (!skb)
1709 		return -ENOBUFS;
1710 
1711 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1712 		kfree_skb(skb);
1713 		return -EINVAL;
1714 	}
1715 
1716 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1717 			      n->nlmsg_flags & NLM_F_ECHO);
1718 }
1719 
1720 struct qdisc_dump_args {
1721 	struct qdisc_walker	w;
1722 	struct sk_buff		*skb;
1723 	struct netlink_callback	*cb;
1724 };
1725 
1726 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1727 {
1728 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1729 
1730 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1731 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1732 }
1733 
1734 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1735 				struct tcmsg *tcm, struct netlink_callback *cb,
1736 				int *t_p, int s_t)
1737 {
1738 	struct qdisc_dump_args arg;
1739 
1740 	if (tc_qdisc_dump_ignore(q) ||
1741 	    *t_p < s_t || !q->ops->cl_ops ||
1742 	    (tcm->tcm_parent &&
1743 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1744 		(*t_p)++;
1745 		return 0;
1746 	}
1747 	if (*t_p > s_t)
1748 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1749 	arg.w.fn = qdisc_class_dump;
1750 	arg.skb = skb;
1751 	arg.cb = cb;
1752 	arg.w.stop  = 0;
1753 	arg.w.skip = cb->args[1];
1754 	arg.w.count = 0;
1755 	q->ops->cl_ops->walk(q, &arg.w);
1756 	cb->args[1] = arg.w.count;
1757 	if (arg.w.stop)
1758 		return -1;
1759 	(*t_p)++;
1760 	return 0;
1761 }
1762 
1763 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1764 			       struct tcmsg *tcm, struct netlink_callback *cb,
1765 			       int *t_p, int s_t)
1766 {
1767 	struct Qdisc *q;
1768 
1769 	if (!root)
1770 		return 0;
1771 
1772 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1773 		return -1;
1774 
1775 	list_for_each_entry(q, &root->list, list) {
1776 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1777 			return -1;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1784 {
1785 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
1786 	struct net *net = sock_net(skb->sk);
1787 	struct netdev_queue *dev_queue;
1788 	struct net_device *dev;
1789 	int t, s_t;
1790 
1791 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1792 		return 0;
1793 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
1794 	if (!dev)
1795 		return 0;
1796 
1797 	s_t = cb->args[0];
1798 	t = 0;
1799 
1800 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1801 		goto done;
1802 
1803 	dev_queue = dev_ingress_queue(dev);
1804 	if (dev_queue &&
1805 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1806 				&t, s_t) < 0)
1807 		goto done;
1808 
1809 done:
1810 	cb->args[0] = t;
1811 
1812 	dev_put(dev);
1813 	return skb->len;
1814 }
1815 
1816 /* Main classifier routine: scans classifier chain attached
1817  * to this qdisc, (optionally) tests for protocol and asks
1818  * specific classifiers.
1819  */
1820 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1821 		struct tcf_result *res, bool compat_mode)
1822 {
1823 	__be16 protocol = tc_skb_protocol(skb);
1824 #ifdef CONFIG_NET_CLS_ACT
1825 	const struct tcf_proto *old_tp = tp;
1826 	int limit = 0;
1827 
1828 reclassify:
1829 #endif
1830 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1831 		int err;
1832 
1833 		if (tp->protocol != protocol &&
1834 		    tp->protocol != htons(ETH_P_ALL))
1835 			continue;
1836 
1837 		err = tp->classify(skb, tp, res);
1838 #ifdef CONFIG_NET_CLS_ACT
1839 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1840 			goto reset;
1841 #endif
1842 		if (err >= 0)
1843 			return err;
1844 	}
1845 
1846 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1847 #ifdef CONFIG_NET_CLS_ACT
1848 reset:
1849 	if (unlikely(limit++ >= MAX_REC_LOOP)) {
1850 		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1851 				       tp->q->ops->id, tp->prio & 0xffff,
1852 				       ntohs(tp->protocol));
1853 		return TC_ACT_SHOT;
1854 	}
1855 
1856 	tp = old_tp;
1857 	protocol = tc_skb_protocol(skb);
1858 	goto reclassify;
1859 #endif
1860 }
1861 EXPORT_SYMBOL(tc_classify);
1862 
1863 bool tcf_destroy(struct tcf_proto *tp, bool force)
1864 {
1865 	if (tp->ops->destroy(tp, force)) {
1866 		module_put(tp->ops->owner);
1867 		kfree_rcu(tp, rcu);
1868 		return true;
1869 	}
1870 
1871 	return false;
1872 }
1873 
1874 void tcf_destroy_chain(struct tcf_proto __rcu **fl)
1875 {
1876 	struct tcf_proto *tp;
1877 
1878 	while ((tp = rtnl_dereference(*fl)) != NULL) {
1879 		RCU_INIT_POINTER(*fl, tp->next);
1880 		tcf_destroy(tp, true);
1881 	}
1882 }
1883 EXPORT_SYMBOL(tcf_destroy_chain);
1884 
1885 #ifdef CONFIG_PROC_FS
1886 static int psched_show(struct seq_file *seq, void *v)
1887 {
1888 	seq_printf(seq, "%08x %08x %08x %08x\n",
1889 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1890 		   1000000,
1891 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
1892 
1893 	return 0;
1894 }
1895 
1896 static int psched_open(struct inode *inode, struct file *file)
1897 {
1898 	return single_open(file, psched_show, NULL);
1899 }
1900 
1901 static const struct file_operations psched_fops = {
1902 	.owner = THIS_MODULE,
1903 	.open = psched_open,
1904 	.read  = seq_read,
1905 	.llseek = seq_lseek,
1906 	.release = single_release,
1907 };
1908 
1909 static int __net_init psched_net_init(struct net *net)
1910 {
1911 	struct proc_dir_entry *e;
1912 
1913 	e = proc_create("psched", 0, net->proc_net, &psched_fops);
1914 	if (e == NULL)
1915 		return -ENOMEM;
1916 
1917 	return 0;
1918 }
1919 
1920 static void __net_exit psched_net_exit(struct net *net)
1921 {
1922 	remove_proc_entry("psched", net->proc_net);
1923 }
1924 #else
1925 static int __net_init psched_net_init(struct net *net)
1926 {
1927 	return 0;
1928 }
1929 
1930 static void __net_exit psched_net_exit(struct net *net)
1931 {
1932 }
1933 #endif
1934 
1935 static struct pernet_operations psched_net_ops = {
1936 	.init = psched_net_init,
1937 	.exit = psched_net_exit,
1938 };
1939 
1940 static int __init pktsched_init(void)
1941 {
1942 	int err;
1943 
1944 	err = register_pernet_subsys(&psched_net_ops);
1945 	if (err) {
1946 		pr_err("pktsched_init: "
1947 		       "cannot initialize per netns operations\n");
1948 		return err;
1949 	}
1950 
1951 	register_qdisc(&pfifo_fast_ops);
1952 	register_qdisc(&pfifo_qdisc_ops);
1953 	register_qdisc(&bfifo_qdisc_ops);
1954 	register_qdisc(&pfifo_head_drop_qdisc_ops);
1955 	register_qdisc(&mq_qdisc_ops);
1956 	register_qdisc(&noqueue_qdisc_ops);
1957 
1958 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1959 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1960 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1961 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1962 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1963 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1964 
1965 	return 0;
1966 }
1967 
1968 subsys_initcall(pktsched_init);
1969