xref: /openbmc/linux/net/sched/sch_api.c (revision dac9c9790e542777079999900594fd069ba10489)
1 /*
2  * net/sched/sch_api.c	Packet scheduler API.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Fixes:
12  *
13  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16  */
17 
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/slab.h>
31 #include <linux/hashtable.h>
32 
33 #include <net/net_namespace.h>
34 #include <net/sock.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 #include <net/pkt_cls.h>
38 
39 /*
40 
41    Short review.
42    -------------
43 
44    This file consists of two interrelated parts:
45 
46    1. queueing disciplines manager frontend.
47    2. traffic classes manager frontend.
48 
49    Generally, queueing discipline ("qdisc") is a black box,
50    which is able to enqueue packets and to dequeue them (when
51    device is ready to send something) in order and at times
52    determined by algorithm hidden in it.
53 
54    qdisc's are divided to two categories:
55    - "queues", which have no internal structure visible from outside.
56    - "schedulers", which split all the packets to "traffic classes",
57      using "packet classifiers" (look at cls_api.c)
58 
59    In turn, classes may have child qdiscs (as rule, queues)
60    attached to them etc. etc. etc.
61 
62    The goal of the routines in this file is to translate
63    information supplied by user in the form of handles
64    to more intelligible for kernel form, to make some sanity
65    checks and part of work, which is common to all qdiscs
66    and to provide rtnetlink notifications.
67 
68    All real intelligent work is done inside qdisc modules.
69 
70 
71 
72    Every discipline has two major routines: enqueue and dequeue.
73 
74    ---dequeue
75 
76    dequeue usually returns a skb to send. It is allowed to return NULL,
77    but it does not mean that queue is empty, it just means that
78    discipline does not want to send anything this time.
79    Queue is really empty if q->q.qlen == 0.
80    For complicated disciplines with multiple queues q->q is not
81    real packet queue, but however q->q.qlen must be valid.
82 
83    ---enqueue
84 
85    enqueue returns 0, if packet was enqueued successfully.
86    If packet (this one or another one) was dropped, it returns
87    not zero error code.
88    NET_XMIT_DROP 	- this packet dropped
89      Expected action: do not backoff, but wait until queue will clear.
90    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
91      Expected action: backoff or ignore
92 
93    Auxiliary routines:
94 
95    ---peek
96 
97    like dequeue but without removing a packet from the queue
98 
99    ---reset
100 
101    returns qdisc to initial state: purge all buffers, clear all
102    timers, counters (except for statistics) etc.
103 
104    ---init
105 
106    initializes newly created qdisc.
107 
108    ---destroy
109 
110    destroys resources allocated by init and during lifetime of qdisc.
111 
112    ---change
113 
114    changes qdisc parameters.
115  */
116 
117 /* Protects list of registered TC modules. It is pure SMP lock. */
118 static DEFINE_RWLOCK(qdisc_mod_lock);
119 
120 
121 /************************************************
122  *	Queueing disciplines manipulation.	*
123  ************************************************/
124 
125 
126 /* The list of all installed queueing disciplines. */
127 
128 static struct Qdisc_ops *qdisc_base;
129 
130 /* Register/unregister queueing discipline */
131 
132 int register_qdisc(struct Qdisc_ops *qops)
133 {
134 	struct Qdisc_ops *q, **qp;
135 	int rc = -EEXIST;
136 
137 	write_lock(&qdisc_mod_lock);
138 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
139 		if (!strcmp(qops->id, q->id))
140 			goto out;
141 
142 	if (qops->enqueue == NULL)
143 		qops->enqueue = noop_qdisc_ops.enqueue;
144 	if (qops->peek == NULL) {
145 		if (qops->dequeue == NULL)
146 			qops->peek = noop_qdisc_ops.peek;
147 		else
148 			goto out_einval;
149 	}
150 	if (qops->dequeue == NULL)
151 		qops->dequeue = noop_qdisc_ops.dequeue;
152 
153 	if (qops->cl_ops) {
154 		const struct Qdisc_class_ops *cops = qops->cl_ops;
155 
156 		if (!(cops->find && cops->walk && cops->leaf))
157 			goto out_einval;
158 
159 		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
160 			goto out_einval;
161 	}
162 
163 	qops->next = NULL;
164 	*qp = qops;
165 	rc = 0;
166 out:
167 	write_unlock(&qdisc_mod_lock);
168 	return rc;
169 
170 out_einval:
171 	rc = -EINVAL;
172 	goto out;
173 }
174 EXPORT_SYMBOL(register_qdisc);
175 
176 int unregister_qdisc(struct Qdisc_ops *qops)
177 {
178 	struct Qdisc_ops *q, **qp;
179 	int err = -ENOENT;
180 
181 	write_lock(&qdisc_mod_lock);
182 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
183 		if (q == qops)
184 			break;
185 	if (q) {
186 		*qp = q->next;
187 		q->next = NULL;
188 		err = 0;
189 	}
190 	write_unlock(&qdisc_mod_lock);
191 	return err;
192 }
193 EXPORT_SYMBOL(unregister_qdisc);
194 
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
197 {
198 	read_lock(&qdisc_mod_lock);
199 	strlcpy(name, default_qdisc_ops->id, len);
200 	read_unlock(&qdisc_mod_lock);
201 }
202 
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204 {
205 	struct Qdisc_ops *q = NULL;
206 
207 	for (q = qdisc_base; q; q = q->next) {
208 		if (!strcmp(name, q->id)) {
209 			if (!try_module_get(q->owner))
210 				q = NULL;
211 			break;
212 		}
213 	}
214 
215 	return q;
216 }
217 
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
220 {
221 	const struct Qdisc_ops *ops;
222 
223 	if (!capable(CAP_NET_ADMIN))
224 		return -EPERM;
225 
226 	write_lock(&qdisc_mod_lock);
227 	ops = qdisc_lookup_default(name);
228 	if (!ops) {
229 		/* Not found, drop lock and try to load module */
230 		write_unlock(&qdisc_mod_lock);
231 		request_module("sch_%s", name);
232 		write_lock(&qdisc_mod_lock);
233 
234 		ops = qdisc_lookup_default(name);
235 	}
236 
237 	if (ops) {
238 		/* Set new default */
239 		module_put(default_qdisc_ops->owner);
240 		default_qdisc_ops = ops;
241 	}
242 	write_unlock(&qdisc_mod_lock);
243 
244 	return ops ? 0 : -ENOENT;
245 }
246 
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
250 {
251 	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252 }
253 late_initcall(sch_default_qdisc);
254 #endif
255 
256 /* We know handle. Find qdisc among all qdisc's attached to device
257  * (root qdisc, all its children, children of children etc.)
258  * Note: caller either uses rtnl or rcu_read_lock()
259  */
260 
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262 {
263 	struct Qdisc *q;
264 
265 	if (!qdisc_dev(root))
266 		return (root->handle == handle ? root : NULL);
267 
268 	if (!(root->flags & TCQ_F_BUILTIN) &&
269 	    root->handle == handle)
270 		return root;
271 
272 	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
273 		if (q->handle == handle)
274 			return q;
275 	}
276 	return NULL;
277 }
278 
279 void qdisc_hash_add(struct Qdisc *q, bool invisible)
280 {
281 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
282 		ASSERT_RTNL();
283 		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
284 		if (invisible)
285 			q->flags |= TCQ_F_INVISIBLE;
286 	}
287 }
288 EXPORT_SYMBOL(qdisc_hash_add);
289 
290 void qdisc_hash_del(struct Qdisc *q)
291 {
292 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
293 		ASSERT_RTNL();
294 		hash_del_rcu(&q->hash);
295 	}
296 }
297 EXPORT_SYMBOL(qdisc_hash_del);
298 
299 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
300 {
301 	struct Qdisc *q;
302 
303 	if (!handle)
304 		return NULL;
305 	q = qdisc_match_from_root(dev->qdisc, handle);
306 	if (q)
307 		goto out;
308 
309 	if (dev_ingress_queue(dev))
310 		q = qdisc_match_from_root(
311 			dev_ingress_queue(dev)->qdisc_sleeping,
312 			handle);
313 out:
314 	return q;
315 }
316 
317 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
318 {
319 	struct netdev_queue *nq;
320 	struct Qdisc *q;
321 
322 	if (!handle)
323 		return NULL;
324 	q = qdisc_match_from_root(dev->qdisc, handle);
325 	if (q)
326 		goto out;
327 
328 	nq = dev_ingress_queue_rcu(dev);
329 	if (nq)
330 		q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
331 out:
332 	return q;
333 }
334 
335 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
336 {
337 	unsigned long cl;
338 	struct Qdisc *leaf;
339 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
340 
341 	if (cops == NULL)
342 		return NULL;
343 	cl = cops->find(p, classid);
344 
345 	if (cl == 0)
346 		return NULL;
347 	leaf = cops->leaf(p, cl);
348 	return leaf;
349 }
350 
351 /* Find queueing discipline by name */
352 
353 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 {
355 	struct Qdisc_ops *q = NULL;
356 
357 	if (kind) {
358 		read_lock(&qdisc_mod_lock);
359 		for (q = qdisc_base; q; q = q->next) {
360 			if (nla_strcmp(kind, q->id) == 0) {
361 				if (!try_module_get(q->owner))
362 					q = NULL;
363 				break;
364 			}
365 		}
366 		read_unlock(&qdisc_mod_lock);
367 	}
368 	return q;
369 }
370 
371 /* The linklayer setting were not transferred from iproute2, in older
372  * versions, and the rate tables lookup systems have been dropped in
373  * the kernel. To keep backward compatible with older iproute2 tc
374  * utils, we detect the linklayer setting by detecting if the rate
375  * table were modified.
376  *
377  * For linklayer ATM table entries, the rate table will be aligned to
378  * 48 bytes, thus some table entries will contain the same value.  The
379  * mpu (min packet unit) is also encoded into the old rate table, thus
380  * starting from the mpu, we find low and high table entries for
381  * mapping this cell.  If these entries contain the same value, when
382  * the rate tables have been modified for linklayer ATM.
383  *
384  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385  * and then roundup to the next cell, calc the table entry one below,
386  * and compare.
387  */
388 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 {
390 	int low       = roundup(r->mpu, 48);
391 	int high      = roundup(low+1, 48);
392 	int cell_low  = low >> r->cell_log;
393 	int cell_high = (high >> r->cell_log) - 1;
394 
395 	/* rtab is too inaccurate at rates > 100Mbit/s */
396 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 		pr_debug("TC linklayer: Giving up ATM detection\n");
398 		return TC_LINKLAYER_ETHERNET;
399 	}
400 
401 	if ((cell_high > cell_low) && (cell_high < 256)
402 	    && (rtab[cell_low] == rtab[cell_high])) {
403 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 			 cell_low, cell_high, rtab[cell_high]);
405 		return TC_LINKLAYER_ATM;
406 	}
407 	return TC_LINKLAYER_ETHERNET;
408 }
409 
410 static struct qdisc_rate_table *qdisc_rtab_list;
411 
412 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 					struct nlattr *tab,
414 					struct netlink_ext_ack *extack)
415 {
416 	struct qdisc_rate_table *rtab;
417 
418 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
419 	    nla_len(tab) != TC_RTAB_SIZE) {
420 		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
421 		return NULL;
422 	}
423 
424 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
425 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
426 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
427 			rtab->refcnt++;
428 			return rtab;
429 		}
430 	}
431 
432 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
433 	if (rtab) {
434 		rtab->rate = *r;
435 		rtab->refcnt = 1;
436 		memcpy(rtab->data, nla_data(tab), 1024);
437 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
438 			r->linklayer = __detect_linklayer(r, rtab->data);
439 		rtab->next = qdisc_rtab_list;
440 		qdisc_rtab_list = rtab;
441 	} else {
442 		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
443 	}
444 	return rtab;
445 }
446 EXPORT_SYMBOL(qdisc_get_rtab);
447 
448 void qdisc_put_rtab(struct qdisc_rate_table *tab)
449 {
450 	struct qdisc_rate_table *rtab, **rtabp;
451 
452 	if (!tab || --tab->refcnt)
453 		return;
454 
455 	for (rtabp = &qdisc_rtab_list;
456 	     (rtab = *rtabp) != NULL;
457 	     rtabp = &rtab->next) {
458 		if (rtab == tab) {
459 			*rtabp = rtab->next;
460 			kfree(rtab);
461 			return;
462 		}
463 	}
464 }
465 EXPORT_SYMBOL(qdisc_put_rtab);
466 
467 static LIST_HEAD(qdisc_stab_list);
468 
469 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
470 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
471 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
472 };
473 
474 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
475 					       struct netlink_ext_ack *extack)
476 {
477 	struct nlattr *tb[TCA_STAB_MAX + 1];
478 	struct qdisc_size_table *stab;
479 	struct tc_sizespec *s;
480 	unsigned int tsize = 0;
481 	u16 *tab = NULL;
482 	int err;
483 
484 	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
485 	if (err < 0)
486 		return ERR_PTR(err);
487 	if (!tb[TCA_STAB_BASE]) {
488 		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
489 		return ERR_PTR(-EINVAL);
490 	}
491 
492 	s = nla_data(tb[TCA_STAB_BASE]);
493 
494 	if (s->tsize > 0) {
495 		if (!tb[TCA_STAB_DATA]) {
496 			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
497 			return ERR_PTR(-EINVAL);
498 		}
499 		tab = nla_data(tb[TCA_STAB_DATA]);
500 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
501 	}
502 
503 	if (tsize != s->tsize || (!tab && tsize > 0)) {
504 		NL_SET_ERR_MSG(extack, "Invalid size of size table");
505 		return ERR_PTR(-EINVAL);
506 	}
507 
508 	list_for_each_entry(stab, &qdisc_stab_list, list) {
509 		if (memcmp(&stab->szopts, s, sizeof(*s)))
510 			continue;
511 		if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
512 			continue;
513 		stab->refcnt++;
514 		return stab;
515 	}
516 
517 	stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
518 	if (!stab)
519 		return ERR_PTR(-ENOMEM);
520 
521 	stab->refcnt = 1;
522 	stab->szopts = *s;
523 	if (tsize > 0)
524 		memcpy(stab->data, tab, tsize * sizeof(u16));
525 
526 	list_add_tail(&stab->list, &qdisc_stab_list);
527 
528 	return stab;
529 }
530 
531 static void stab_kfree_rcu(struct rcu_head *head)
532 {
533 	kfree(container_of(head, struct qdisc_size_table, rcu));
534 }
535 
536 void qdisc_put_stab(struct qdisc_size_table *tab)
537 {
538 	if (!tab)
539 		return;
540 
541 	if (--tab->refcnt == 0) {
542 		list_del(&tab->list);
543 		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
544 	}
545 }
546 EXPORT_SYMBOL(qdisc_put_stab);
547 
548 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
549 {
550 	struct nlattr *nest;
551 
552 	nest = nla_nest_start(skb, TCA_STAB);
553 	if (nest == NULL)
554 		goto nla_put_failure;
555 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
556 		goto nla_put_failure;
557 	nla_nest_end(skb, nest);
558 
559 	return skb->len;
560 
561 nla_put_failure:
562 	return -1;
563 }
564 
565 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
566 			       const struct qdisc_size_table *stab)
567 {
568 	int pkt_len, slot;
569 
570 	pkt_len = skb->len + stab->szopts.overhead;
571 	if (unlikely(!stab->szopts.tsize))
572 		goto out;
573 
574 	slot = pkt_len + stab->szopts.cell_align;
575 	if (unlikely(slot < 0))
576 		slot = 0;
577 
578 	slot >>= stab->szopts.cell_log;
579 	if (likely(slot < stab->szopts.tsize))
580 		pkt_len = stab->data[slot];
581 	else
582 		pkt_len = stab->data[stab->szopts.tsize - 1] *
583 				(slot / stab->szopts.tsize) +
584 				stab->data[slot % stab->szopts.tsize];
585 
586 	pkt_len <<= stab->szopts.size_log;
587 out:
588 	if (unlikely(pkt_len < 1))
589 		pkt_len = 1;
590 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
591 }
592 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
593 
594 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
595 {
596 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
597 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
598 			txt, qdisc->ops->id, qdisc->handle >> 16);
599 		qdisc->flags |= TCQ_F_WARN_NONWC;
600 	}
601 }
602 EXPORT_SYMBOL(qdisc_warn_nonwc);
603 
604 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
605 {
606 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
607 						 timer);
608 
609 	rcu_read_lock();
610 	__netif_schedule(qdisc_root(wd->qdisc));
611 	rcu_read_unlock();
612 
613 	return HRTIMER_NORESTART;
614 }
615 
616 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
617 				 clockid_t clockid)
618 {
619 	hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
620 	wd->timer.function = qdisc_watchdog;
621 	wd->qdisc = qdisc;
622 }
623 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
624 
625 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
626 {
627 	qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
628 }
629 EXPORT_SYMBOL(qdisc_watchdog_init);
630 
631 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
632 {
633 	if (test_bit(__QDISC_STATE_DEACTIVATED,
634 		     &qdisc_root_sleeping(wd->qdisc)->state))
635 		return;
636 
637 	if (wd->last_expires == expires)
638 		return;
639 
640 	wd->last_expires = expires;
641 	hrtimer_start(&wd->timer,
642 		      ns_to_ktime(expires),
643 		      HRTIMER_MODE_ABS_PINNED);
644 }
645 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
646 
647 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
648 {
649 	hrtimer_cancel(&wd->timer);
650 }
651 EXPORT_SYMBOL(qdisc_watchdog_cancel);
652 
653 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
654 {
655 	struct hlist_head *h;
656 	unsigned int i;
657 
658 	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
659 
660 	if (h != NULL) {
661 		for (i = 0; i < n; i++)
662 			INIT_HLIST_HEAD(&h[i]);
663 	}
664 	return h;
665 }
666 
667 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
668 {
669 	struct Qdisc_class_common *cl;
670 	struct hlist_node *next;
671 	struct hlist_head *nhash, *ohash;
672 	unsigned int nsize, nmask, osize;
673 	unsigned int i, h;
674 
675 	/* Rehash when load factor exceeds 0.75 */
676 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
677 		return;
678 	nsize = clhash->hashsize * 2;
679 	nmask = nsize - 1;
680 	nhash = qdisc_class_hash_alloc(nsize);
681 	if (nhash == NULL)
682 		return;
683 
684 	ohash = clhash->hash;
685 	osize = clhash->hashsize;
686 
687 	sch_tree_lock(sch);
688 	for (i = 0; i < osize; i++) {
689 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
690 			h = qdisc_class_hash(cl->classid, nmask);
691 			hlist_add_head(&cl->hnode, &nhash[h]);
692 		}
693 	}
694 	clhash->hash     = nhash;
695 	clhash->hashsize = nsize;
696 	clhash->hashmask = nmask;
697 	sch_tree_unlock(sch);
698 
699 	kvfree(ohash);
700 }
701 EXPORT_SYMBOL(qdisc_class_hash_grow);
702 
703 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
704 {
705 	unsigned int size = 4;
706 
707 	clhash->hash = qdisc_class_hash_alloc(size);
708 	if (!clhash->hash)
709 		return -ENOMEM;
710 	clhash->hashsize  = size;
711 	clhash->hashmask  = size - 1;
712 	clhash->hashelems = 0;
713 	return 0;
714 }
715 EXPORT_SYMBOL(qdisc_class_hash_init);
716 
717 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
718 {
719 	kvfree(clhash->hash);
720 }
721 EXPORT_SYMBOL(qdisc_class_hash_destroy);
722 
723 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
724 			     struct Qdisc_class_common *cl)
725 {
726 	unsigned int h;
727 
728 	INIT_HLIST_NODE(&cl->hnode);
729 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
730 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
731 	clhash->hashelems++;
732 }
733 EXPORT_SYMBOL(qdisc_class_hash_insert);
734 
735 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
736 			     struct Qdisc_class_common *cl)
737 {
738 	hlist_del(&cl->hnode);
739 	clhash->hashelems--;
740 }
741 EXPORT_SYMBOL(qdisc_class_hash_remove);
742 
743 /* Allocate an unique handle from space managed by kernel
744  * Possible range is [8000-FFFF]:0000 (0x8000 values)
745  */
746 static u32 qdisc_alloc_handle(struct net_device *dev)
747 {
748 	int i = 0x8000;
749 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
750 
751 	do {
752 		autohandle += TC_H_MAKE(0x10000U, 0);
753 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
754 			autohandle = TC_H_MAKE(0x80000000U, 0);
755 		if (!qdisc_lookup(dev, autohandle))
756 			return autohandle;
757 		cond_resched();
758 	} while	(--i > 0);
759 
760 	return 0;
761 }
762 
763 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
764 			       unsigned int len)
765 {
766 	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
767 	const struct Qdisc_class_ops *cops;
768 	unsigned long cl;
769 	u32 parentid;
770 	bool notify;
771 	int drops;
772 
773 	if (n == 0 && len == 0)
774 		return;
775 	drops = max_t(int, n, 0);
776 	rcu_read_lock();
777 	while ((parentid = sch->parent)) {
778 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
779 			break;
780 
781 		if (sch->flags & TCQ_F_NOPARENT)
782 			break;
783 		/* Notify parent qdisc only if child qdisc becomes empty.
784 		 *
785 		 * If child was empty even before update then backlog
786 		 * counter is screwed and we skip notification because
787 		 * parent class is already passive.
788 		 *
789 		 * If the original child was offloaded then it is allowed
790 		 * to be seem as empty, so the parent is notified anyway.
791 		 */
792 		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
793 						       !qdisc_is_offloaded);
794 		/* TODO: perform the search on a per txq basis */
795 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
796 		if (sch == NULL) {
797 			WARN_ON_ONCE(parentid != TC_H_ROOT);
798 			break;
799 		}
800 		cops = sch->ops->cl_ops;
801 		if (notify && cops->qlen_notify) {
802 			cl = cops->find(sch, parentid);
803 			cops->qlen_notify(sch, cl);
804 		}
805 		sch->q.qlen -= n;
806 		sch->qstats.backlog -= len;
807 		__qdisc_qstats_drop(sch, drops);
808 	}
809 	rcu_read_unlock();
810 }
811 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
812 
813 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
814 			 u32 portid, u32 seq, u16 flags, int event)
815 {
816 	struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
817 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
818 	struct tcmsg *tcm;
819 	struct nlmsghdr  *nlh;
820 	unsigned char *b = skb_tail_pointer(skb);
821 	struct gnet_dump d;
822 	struct qdisc_size_table *stab;
823 	u32 block_index;
824 	__u32 qlen;
825 
826 	cond_resched();
827 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
828 	if (!nlh)
829 		goto out_nlmsg_trim;
830 	tcm = nlmsg_data(nlh);
831 	tcm->tcm_family = AF_UNSPEC;
832 	tcm->tcm__pad1 = 0;
833 	tcm->tcm__pad2 = 0;
834 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
835 	tcm->tcm_parent = clid;
836 	tcm->tcm_handle = q->handle;
837 	tcm->tcm_info = refcount_read(&q->refcnt);
838 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
839 		goto nla_put_failure;
840 	if (q->ops->ingress_block_get) {
841 		block_index = q->ops->ingress_block_get(q);
842 		if (block_index &&
843 		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
844 			goto nla_put_failure;
845 	}
846 	if (q->ops->egress_block_get) {
847 		block_index = q->ops->egress_block_get(q);
848 		if (block_index &&
849 		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
850 			goto nla_put_failure;
851 	}
852 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
853 		goto nla_put_failure;
854 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
855 		goto nla_put_failure;
856 	qlen = qdisc_qlen_sum(q);
857 
858 	stab = rtnl_dereference(q->stab);
859 	if (stab && qdisc_dump_stab(skb, stab) < 0)
860 		goto nla_put_failure;
861 
862 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
863 					 NULL, &d, TCA_PAD) < 0)
864 		goto nla_put_failure;
865 
866 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
867 		goto nla_put_failure;
868 
869 	if (qdisc_is_percpu_stats(q)) {
870 		cpu_bstats = q->cpu_bstats;
871 		cpu_qstats = q->cpu_qstats;
872 	}
873 
874 	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
875 				  &d, cpu_bstats, &q->bstats) < 0 ||
876 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
877 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
878 		goto nla_put_failure;
879 
880 	if (gnet_stats_finish_copy(&d) < 0)
881 		goto nla_put_failure;
882 
883 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
884 	return skb->len;
885 
886 out_nlmsg_trim:
887 nla_put_failure:
888 	nlmsg_trim(skb, b);
889 	return -1;
890 }
891 
892 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
893 {
894 	if (q->flags & TCQ_F_BUILTIN)
895 		return true;
896 	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
897 		return true;
898 
899 	return false;
900 }
901 
902 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
903 			struct nlmsghdr *n, u32 clid,
904 			struct Qdisc *old, struct Qdisc *new)
905 {
906 	struct sk_buff *skb;
907 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
908 
909 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
910 	if (!skb)
911 		return -ENOBUFS;
912 
913 	if (old && !tc_qdisc_dump_ignore(old, false)) {
914 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
915 				  0, RTM_DELQDISC) < 0)
916 			goto err_out;
917 	}
918 	if (new && !tc_qdisc_dump_ignore(new, false)) {
919 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
920 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
921 			goto err_out;
922 	}
923 
924 	if (skb->len)
925 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
926 				      n->nlmsg_flags & NLM_F_ECHO);
927 
928 err_out:
929 	kfree_skb(skb);
930 	return -EINVAL;
931 }
932 
933 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
934 			       struct nlmsghdr *n, u32 clid,
935 			       struct Qdisc *old, struct Qdisc *new)
936 {
937 	if (new || old)
938 		qdisc_notify(net, skb, n, clid, old, new);
939 
940 	if (old)
941 		qdisc_put(old);
942 }
943 
944 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
945  * to device "dev".
946  *
947  * When appropriate send a netlink notification using 'skb'
948  * and "n".
949  *
950  * On success, destroy old qdisc.
951  */
952 
953 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
954 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
955 		       struct Qdisc *new, struct Qdisc *old,
956 		       struct netlink_ext_ack *extack)
957 {
958 	struct Qdisc *q = old;
959 	struct net *net = dev_net(dev);
960 	int err = 0;
961 
962 	if (parent == NULL) {
963 		unsigned int i, num_q, ingress;
964 
965 		ingress = 0;
966 		num_q = dev->num_tx_queues;
967 		if ((q && q->flags & TCQ_F_INGRESS) ||
968 		    (new && new->flags & TCQ_F_INGRESS)) {
969 			num_q = 1;
970 			ingress = 1;
971 			if (!dev_ingress_queue(dev)) {
972 				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
973 				return -ENOENT;
974 			}
975 		}
976 
977 		if (dev->flags & IFF_UP)
978 			dev_deactivate(dev);
979 
980 		if (new && new->ops->attach)
981 			goto skip;
982 
983 		for (i = 0; i < num_q; i++) {
984 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
985 
986 			if (!ingress)
987 				dev_queue = netdev_get_tx_queue(dev, i);
988 
989 			old = dev_graft_qdisc(dev_queue, new);
990 			if (new && i > 0)
991 				qdisc_refcount_inc(new);
992 
993 			if (!ingress)
994 				qdisc_put(old);
995 		}
996 
997 skip:
998 		if (!ingress) {
999 			notify_and_destroy(net, skb, n, classid,
1000 					   dev->qdisc, new);
1001 			if (new && !new->ops->attach)
1002 				qdisc_refcount_inc(new);
1003 			dev->qdisc = new ? : &noop_qdisc;
1004 
1005 			if (new && new->ops->attach)
1006 				new->ops->attach(new);
1007 		} else {
1008 			notify_and_destroy(net, skb, n, classid, old, new);
1009 		}
1010 
1011 		if (dev->flags & IFF_UP)
1012 			dev_activate(dev);
1013 	} else {
1014 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1015 
1016 		/* Only support running class lockless if parent is lockless */
1017 		if (new && (new->flags & TCQ_F_NOLOCK) &&
1018 		    parent && !(parent->flags & TCQ_F_NOLOCK))
1019 			new->flags &= ~TCQ_F_NOLOCK;
1020 
1021 		err = -EOPNOTSUPP;
1022 		if (cops && cops->graft) {
1023 			unsigned long cl = cops->find(parent, classid);
1024 
1025 			if (cl) {
1026 				err = cops->graft(parent, cl, new, &old,
1027 						  extack);
1028 			} else {
1029 				NL_SET_ERR_MSG(extack, "Specified class not found");
1030 				err = -ENOENT;
1031 			}
1032 		}
1033 		if (!err)
1034 			notify_and_destroy(net, skb, n, classid, old, new);
1035 	}
1036 	return err;
1037 }
1038 
1039 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1040 				   struct netlink_ext_ack *extack)
1041 {
1042 	u32 block_index;
1043 
1044 	if (tca[TCA_INGRESS_BLOCK]) {
1045 		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1046 
1047 		if (!block_index) {
1048 			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1049 			return -EINVAL;
1050 		}
1051 		if (!sch->ops->ingress_block_set) {
1052 			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1053 			return -EOPNOTSUPP;
1054 		}
1055 		sch->ops->ingress_block_set(sch, block_index);
1056 	}
1057 	if (tca[TCA_EGRESS_BLOCK]) {
1058 		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1059 
1060 		if (!block_index) {
1061 			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1062 			return -EINVAL;
1063 		}
1064 		if (!sch->ops->egress_block_set) {
1065 			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1066 			return -EOPNOTSUPP;
1067 		}
1068 		sch->ops->egress_block_set(sch, block_index);
1069 	}
1070 	return 0;
1071 }
1072 
1073 /*
1074    Allocate and initialize new qdisc.
1075 
1076    Parameters are passed via opt.
1077  */
1078 
1079 static struct Qdisc *qdisc_create(struct net_device *dev,
1080 				  struct netdev_queue *dev_queue,
1081 				  struct Qdisc *p, u32 parent, u32 handle,
1082 				  struct nlattr **tca, int *errp,
1083 				  struct netlink_ext_ack *extack)
1084 {
1085 	int err;
1086 	struct nlattr *kind = tca[TCA_KIND];
1087 	struct Qdisc *sch;
1088 	struct Qdisc_ops *ops;
1089 	struct qdisc_size_table *stab;
1090 
1091 	ops = qdisc_lookup_ops(kind);
1092 #ifdef CONFIG_MODULES
1093 	if (ops == NULL && kind != NULL) {
1094 		char name[IFNAMSIZ];
1095 		if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1096 			/* We dropped the RTNL semaphore in order to
1097 			 * perform the module load.  So, even if we
1098 			 * succeeded in loading the module we have to
1099 			 * tell the caller to replay the request.  We
1100 			 * indicate this using -EAGAIN.
1101 			 * We replay the request because the device may
1102 			 * go away in the mean time.
1103 			 */
1104 			rtnl_unlock();
1105 			request_module("sch_%s", name);
1106 			rtnl_lock();
1107 			ops = qdisc_lookup_ops(kind);
1108 			if (ops != NULL) {
1109 				/* We will try again qdisc_lookup_ops,
1110 				 * so don't keep a reference.
1111 				 */
1112 				module_put(ops->owner);
1113 				err = -EAGAIN;
1114 				goto err_out;
1115 			}
1116 		}
1117 	}
1118 #endif
1119 
1120 	err = -ENOENT;
1121 	if (!ops) {
1122 		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1123 		goto err_out;
1124 	}
1125 
1126 	sch = qdisc_alloc(dev_queue, ops, extack);
1127 	if (IS_ERR(sch)) {
1128 		err = PTR_ERR(sch);
1129 		goto err_out2;
1130 	}
1131 
1132 	sch->parent = parent;
1133 
1134 	if (handle == TC_H_INGRESS) {
1135 		sch->flags |= TCQ_F_INGRESS;
1136 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
1137 	} else {
1138 		if (handle == 0) {
1139 			handle = qdisc_alloc_handle(dev);
1140 			err = -ENOMEM;
1141 			if (handle == 0)
1142 				goto err_out3;
1143 		}
1144 		if (!netif_is_multiqueue(dev))
1145 			sch->flags |= TCQ_F_ONETXQUEUE;
1146 	}
1147 
1148 	sch->handle = handle;
1149 
1150 	/* This exist to keep backward compatible with a userspace
1151 	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1152 	 * facility on older kernels by setting tx_queue_len=0 (prior
1153 	 * to qdisc init), and then forgot to reinit tx_queue_len
1154 	 * before again attaching a qdisc.
1155 	 */
1156 	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1157 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1158 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1159 	}
1160 
1161 	err = qdisc_block_indexes_set(sch, tca, extack);
1162 	if (err)
1163 		goto err_out3;
1164 
1165 	if (ops->init) {
1166 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1167 		if (err != 0)
1168 			goto err_out5;
1169 	}
1170 
1171 	if (tca[TCA_STAB]) {
1172 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1173 		if (IS_ERR(stab)) {
1174 			err = PTR_ERR(stab);
1175 			goto err_out4;
1176 		}
1177 		rcu_assign_pointer(sch->stab, stab);
1178 	}
1179 	if (tca[TCA_RATE]) {
1180 		seqcount_t *running;
1181 
1182 		err = -EOPNOTSUPP;
1183 		if (sch->flags & TCQ_F_MQROOT) {
1184 			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1185 			goto err_out4;
1186 		}
1187 
1188 		if (sch->parent != TC_H_ROOT &&
1189 		    !(sch->flags & TCQ_F_INGRESS) &&
1190 		    (!p || !(p->flags & TCQ_F_MQROOT)))
1191 			running = qdisc_root_sleeping_running(sch);
1192 		else
1193 			running = &sch->running;
1194 
1195 		err = gen_new_estimator(&sch->bstats,
1196 					sch->cpu_bstats,
1197 					&sch->rate_est,
1198 					NULL,
1199 					running,
1200 					tca[TCA_RATE]);
1201 		if (err) {
1202 			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1203 			goto err_out4;
1204 		}
1205 	}
1206 
1207 	qdisc_hash_add(sch, false);
1208 
1209 	return sch;
1210 
1211 err_out5:
1212 	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1213 	if (ops->destroy)
1214 		ops->destroy(sch);
1215 err_out3:
1216 	dev_put(dev);
1217 	qdisc_free(sch);
1218 err_out2:
1219 	module_put(ops->owner);
1220 err_out:
1221 	*errp = err;
1222 	return NULL;
1223 
1224 err_out4:
1225 	/*
1226 	 * Any broken qdiscs that would require a ops->reset() here?
1227 	 * The qdisc was never in action so it shouldn't be necessary.
1228 	 */
1229 	qdisc_put_stab(rtnl_dereference(sch->stab));
1230 	if (ops->destroy)
1231 		ops->destroy(sch);
1232 	goto err_out3;
1233 }
1234 
1235 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1236 			struct netlink_ext_ack *extack)
1237 {
1238 	struct qdisc_size_table *ostab, *stab = NULL;
1239 	int err = 0;
1240 
1241 	if (tca[TCA_OPTIONS]) {
1242 		if (!sch->ops->change) {
1243 			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1244 			return -EINVAL;
1245 		}
1246 		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1247 			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1248 			return -EOPNOTSUPP;
1249 		}
1250 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1251 		if (err)
1252 			return err;
1253 	}
1254 
1255 	if (tca[TCA_STAB]) {
1256 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1257 		if (IS_ERR(stab))
1258 			return PTR_ERR(stab);
1259 	}
1260 
1261 	ostab = rtnl_dereference(sch->stab);
1262 	rcu_assign_pointer(sch->stab, stab);
1263 	qdisc_put_stab(ostab);
1264 
1265 	if (tca[TCA_RATE]) {
1266 		/* NB: ignores errors from replace_estimator
1267 		   because change can't be undone. */
1268 		if (sch->flags & TCQ_F_MQROOT)
1269 			goto out;
1270 		gen_replace_estimator(&sch->bstats,
1271 				      sch->cpu_bstats,
1272 				      &sch->rate_est,
1273 				      NULL,
1274 				      qdisc_root_sleeping_running(sch),
1275 				      tca[TCA_RATE]);
1276 	}
1277 out:
1278 	return 0;
1279 }
1280 
1281 struct check_loop_arg {
1282 	struct qdisc_walker	w;
1283 	struct Qdisc		*p;
1284 	int			depth;
1285 };
1286 
1287 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1288 			 struct qdisc_walker *w);
1289 
1290 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1291 {
1292 	struct check_loop_arg	arg;
1293 
1294 	if (q->ops->cl_ops == NULL)
1295 		return 0;
1296 
1297 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1298 	arg.w.fn = check_loop_fn;
1299 	arg.depth = depth;
1300 	arg.p = p;
1301 	q->ops->cl_ops->walk(q, &arg.w);
1302 	return arg.w.stop ? -ELOOP : 0;
1303 }
1304 
1305 static int
1306 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1307 {
1308 	struct Qdisc *leaf;
1309 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1310 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1311 
1312 	leaf = cops->leaf(q, cl);
1313 	if (leaf) {
1314 		if (leaf == arg->p || arg->depth > 7)
1315 			return -ELOOP;
1316 		return check_loop(leaf, arg->p, arg->depth + 1);
1317 	}
1318 	return 0;
1319 }
1320 
1321 /*
1322  * Delete/get qdisc.
1323  */
1324 
1325 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1326 	[TCA_KIND]		= { .type = NLA_STRING },
1327 	[TCA_OPTIONS]		= { .type = NLA_NESTED },
1328 	[TCA_RATE]		= { .type = NLA_BINARY,
1329 				    .len = sizeof(struct tc_estimator) },
1330 	[TCA_STAB]		= { .type = NLA_NESTED },
1331 	[TCA_DUMP_INVISIBLE]	= { .type = NLA_FLAG },
1332 	[TCA_CHAIN]		= { .type = NLA_U32 },
1333 	[TCA_INGRESS_BLOCK]	= { .type = NLA_U32 },
1334 	[TCA_EGRESS_BLOCK]	= { .type = NLA_U32 },
1335 };
1336 
1337 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1338 			struct netlink_ext_ack *extack)
1339 {
1340 	struct net *net = sock_net(skb->sk);
1341 	struct tcmsg *tcm = nlmsg_data(n);
1342 	struct nlattr *tca[TCA_MAX + 1];
1343 	struct net_device *dev;
1344 	u32 clid;
1345 	struct Qdisc *q = NULL;
1346 	struct Qdisc *p = NULL;
1347 	int err;
1348 
1349 	if ((n->nlmsg_type != RTM_GETQDISC) &&
1350 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1351 		return -EPERM;
1352 
1353 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1354 			  extack);
1355 	if (err < 0)
1356 		return err;
1357 
1358 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1359 	if (!dev)
1360 		return -ENODEV;
1361 
1362 	clid = tcm->tcm_parent;
1363 	if (clid) {
1364 		if (clid != TC_H_ROOT) {
1365 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1366 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1367 				if (!p) {
1368 					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1369 					return -ENOENT;
1370 				}
1371 				q = qdisc_leaf(p, clid);
1372 			} else if (dev_ingress_queue(dev)) {
1373 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1374 			}
1375 		} else {
1376 			q = dev->qdisc;
1377 		}
1378 		if (!q) {
1379 			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1380 			return -ENOENT;
1381 		}
1382 
1383 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1384 			NL_SET_ERR_MSG(extack, "Invalid handle");
1385 			return -EINVAL;
1386 		}
1387 	} else {
1388 		q = qdisc_lookup(dev, tcm->tcm_handle);
1389 		if (!q) {
1390 			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1391 			return -ENOENT;
1392 		}
1393 	}
1394 
1395 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1396 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1397 		return -EINVAL;
1398 	}
1399 
1400 	if (n->nlmsg_type == RTM_DELQDISC) {
1401 		if (!clid) {
1402 			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1403 			return -EINVAL;
1404 		}
1405 		if (q->handle == 0) {
1406 			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1407 			return -ENOENT;
1408 		}
1409 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1410 		if (err != 0)
1411 			return err;
1412 	} else {
1413 		qdisc_notify(net, skb, n, clid, NULL, q);
1414 	}
1415 	return 0;
1416 }
1417 
1418 /*
1419  * Create/change qdisc.
1420  */
1421 
1422 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1423 			   struct netlink_ext_ack *extack)
1424 {
1425 	struct net *net = sock_net(skb->sk);
1426 	struct tcmsg *tcm;
1427 	struct nlattr *tca[TCA_MAX + 1];
1428 	struct net_device *dev;
1429 	u32 clid;
1430 	struct Qdisc *q, *p;
1431 	int err;
1432 
1433 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1434 		return -EPERM;
1435 
1436 replay:
1437 	/* Reinit, just in case something touches this. */
1438 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1439 			  extack);
1440 	if (err < 0)
1441 		return err;
1442 
1443 	tcm = nlmsg_data(n);
1444 	clid = tcm->tcm_parent;
1445 	q = p = NULL;
1446 
1447 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1448 	if (!dev)
1449 		return -ENODEV;
1450 
1451 
1452 	if (clid) {
1453 		if (clid != TC_H_ROOT) {
1454 			if (clid != TC_H_INGRESS) {
1455 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1456 				if (!p) {
1457 					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1458 					return -ENOENT;
1459 				}
1460 				q = qdisc_leaf(p, clid);
1461 			} else if (dev_ingress_queue_create(dev)) {
1462 				q = dev_ingress_queue(dev)->qdisc_sleeping;
1463 			}
1464 		} else {
1465 			q = dev->qdisc;
1466 		}
1467 
1468 		/* It may be default qdisc, ignore it */
1469 		if (q && q->handle == 0)
1470 			q = NULL;
1471 
1472 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1473 			if (tcm->tcm_handle) {
1474 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1475 					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1476 					return -EEXIST;
1477 				}
1478 				if (TC_H_MIN(tcm->tcm_handle)) {
1479 					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1480 					return -EINVAL;
1481 				}
1482 				q = qdisc_lookup(dev, tcm->tcm_handle);
1483 				if (!q)
1484 					goto create_n_graft;
1485 				if (n->nlmsg_flags & NLM_F_EXCL) {
1486 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1487 					return -EEXIST;
1488 				}
1489 				if (tca[TCA_KIND] &&
1490 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1491 					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1492 					return -EINVAL;
1493 				}
1494 				if (q == p ||
1495 				    (p && check_loop(q, p, 0))) {
1496 					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1497 					return -ELOOP;
1498 				}
1499 				qdisc_refcount_inc(q);
1500 				goto graft;
1501 			} else {
1502 				if (!q)
1503 					goto create_n_graft;
1504 
1505 				/* This magic test requires explanation.
1506 				 *
1507 				 *   We know, that some child q is already
1508 				 *   attached to this parent and have choice:
1509 				 *   either to change it or to create/graft new one.
1510 				 *
1511 				 *   1. We are allowed to create/graft only
1512 				 *   if CREATE and REPLACE flags are set.
1513 				 *
1514 				 *   2. If EXCL is set, requestor wanted to say,
1515 				 *   that qdisc tcm_handle is not expected
1516 				 *   to exist, so that we choose create/graft too.
1517 				 *
1518 				 *   3. The last case is when no flags are set.
1519 				 *   Alas, it is sort of hole in API, we
1520 				 *   cannot decide what to do unambiguously.
1521 				 *   For now we select create/graft, if
1522 				 *   user gave KIND, which does not match existing.
1523 				 */
1524 				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1525 				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1526 				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1527 				     (tca[TCA_KIND] &&
1528 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1529 					goto create_n_graft;
1530 			}
1531 		}
1532 	} else {
1533 		if (!tcm->tcm_handle) {
1534 			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1535 			return -EINVAL;
1536 		}
1537 		q = qdisc_lookup(dev, tcm->tcm_handle);
1538 	}
1539 
1540 	/* Change qdisc parameters */
1541 	if (!q) {
1542 		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1543 		return -ENOENT;
1544 	}
1545 	if (n->nlmsg_flags & NLM_F_EXCL) {
1546 		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1547 		return -EEXIST;
1548 	}
1549 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1550 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1551 		return -EINVAL;
1552 	}
1553 	err = qdisc_change(q, tca, extack);
1554 	if (err == 0)
1555 		qdisc_notify(net, skb, n, clid, NULL, q);
1556 	return err;
1557 
1558 create_n_graft:
1559 	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1560 		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1561 		return -ENOENT;
1562 	}
1563 	if (clid == TC_H_INGRESS) {
1564 		if (dev_ingress_queue(dev)) {
1565 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
1566 					 tcm->tcm_parent, tcm->tcm_parent,
1567 					 tca, &err, extack);
1568 		} else {
1569 			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1570 			err = -ENOENT;
1571 		}
1572 	} else {
1573 		struct netdev_queue *dev_queue;
1574 
1575 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1576 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1577 		else if (p)
1578 			dev_queue = p->dev_queue;
1579 		else
1580 			dev_queue = netdev_get_tx_queue(dev, 0);
1581 
1582 		q = qdisc_create(dev, dev_queue, p,
1583 				 tcm->tcm_parent, tcm->tcm_handle,
1584 				 tca, &err, extack);
1585 	}
1586 	if (q == NULL) {
1587 		if (err == -EAGAIN)
1588 			goto replay;
1589 		return err;
1590 	}
1591 
1592 graft:
1593 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1594 	if (err) {
1595 		if (q)
1596 			qdisc_put(q);
1597 		return err;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1604 			      struct netlink_callback *cb,
1605 			      int *q_idx_p, int s_q_idx, bool recur,
1606 			      bool dump_invisible)
1607 {
1608 	int ret = 0, q_idx = *q_idx_p;
1609 	struct Qdisc *q;
1610 	int b;
1611 
1612 	if (!root)
1613 		return 0;
1614 
1615 	q = root;
1616 	if (q_idx < s_q_idx) {
1617 		q_idx++;
1618 	} else {
1619 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1620 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1621 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1622 				  RTM_NEWQDISC) <= 0)
1623 			goto done;
1624 		q_idx++;
1625 	}
1626 
1627 	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1628 	 * itself has already been dumped.
1629 	 *
1630 	 * If we've already dumped the top-level (ingress) qdisc above and the global
1631 	 * qdisc hashtable, we don't want to hit it again
1632 	 */
1633 	if (!qdisc_dev(root) || !recur)
1634 		goto out;
1635 
1636 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1637 		if (q_idx < s_q_idx) {
1638 			q_idx++;
1639 			continue;
1640 		}
1641 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1642 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1643 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1644 				  RTM_NEWQDISC) <= 0)
1645 			goto done;
1646 		q_idx++;
1647 	}
1648 
1649 out:
1650 	*q_idx_p = q_idx;
1651 	return ret;
1652 done:
1653 	ret = -1;
1654 	goto out;
1655 }
1656 
1657 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1658 {
1659 	struct net *net = sock_net(skb->sk);
1660 	int idx, q_idx;
1661 	int s_idx, s_q_idx;
1662 	struct net_device *dev;
1663 	const struct nlmsghdr *nlh = cb->nlh;
1664 	struct nlattr *tca[TCA_MAX + 1];
1665 	int err;
1666 
1667 	s_idx = cb->args[0];
1668 	s_q_idx = q_idx = cb->args[1];
1669 
1670 	idx = 0;
1671 	ASSERT_RTNL();
1672 
1673 	err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1674 			  rtm_tca_policy, cb->extack);
1675 	if (err < 0)
1676 		return err;
1677 
1678 	for_each_netdev(net, dev) {
1679 		struct netdev_queue *dev_queue;
1680 
1681 		if (idx < s_idx)
1682 			goto cont;
1683 		if (idx > s_idx)
1684 			s_q_idx = 0;
1685 		q_idx = 0;
1686 
1687 		if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1688 				       true, tca[TCA_DUMP_INVISIBLE]) < 0)
1689 			goto done;
1690 
1691 		dev_queue = dev_ingress_queue(dev);
1692 		if (dev_queue &&
1693 		    tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1694 				       &q_idx, s_q_idx, false,
1695 				       tca[TCA_DUMP_INVISIBLE]) < 0)
1696 			goto done;
1697 
1698 cont:
1699 		idx++;
1700 	}
1701 
1702 done:
1703 	cb->args[0] = idx;
1704 	cb->args[1] = q_idx;
1705 
1706 	return skb->len;
1707 }
1708 
1709 
1710 
1711 /************************************************
1712  *	Traffic classes manipulation.		*
1713  ************************************************/
1714 
1715 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1716 			  unsigned long cl,
1717 			  u32 portid, u32 seq, u16 flags, int event)
1718 {
1719 	struct tcmsg *tcm;
1720 	struct nlmsghdr  *nlh;
1721 	unsigned char *b = skb_tail_pointer(skb);
1722 	struct gnet_dump d;
1723 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1724 
1725 	cond_resched();
1726 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1727 	if (!nlh)
1728 		goto out_nlmsg_trim;
1729 	tcm = nlmsg_data(nlh);
1730 	tcm->tcm_family = AF_UNSPEC;
1731 	tcm->tcm__pad1 = 0;
1732 	tcm->tcm__pad2 = 0;
1733 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1734 	tcm->tcm_parent = q->handle;
1735 	tcm->tcm_handle = q->handle;
1736 	tcm->tcm_info = 0;
1737 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1738 		goto nla_put_failure;
1739 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1740 		goto nla_put_failure;
1741 
1742 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1743 					 NULL, &d, TCA_PAD) < 0)
1744 		goto nla_put_failure;
1745 
1746 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1747 		goto nla_put_failure;
1748 
1749 	if (gnet_stats_finish_copy(&d) < 0)
1750 		goto nla_put_failure;
1751 
1752 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1753 	return skb->len;
1754 
1755 out_nlmsg_trim:
1756 nla_put_failure:
1757 	nlmsg_trim(skb, b);
1758 	return -1;
1759 }
1760 
1761 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1762 			 struct nlmsghdr *n, struct Qdisc *q,
1763 			 unsigned long cl, int event)
1764 {
1765 	struct sk_buff *skb;
1766 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1767 
1768 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1769 	if (!skb)
1770 		return -ENOBUFS;
1771 
1772 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1773 		kfree_skb(skb);
1774 		return -EINVAL;
1775 	}
1776 
1777 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1778 			      n->nlmsg_flags & NLM_F_ECHO);
1779 }
1780 
1781 static int tclass_del_notify(struct net *net,
1782 			     const struct Qdisc_class_ops *cops,
1783 			     struct sk_buff *oskb, struct nlmsghdr *n,
1784 			     struct Qdisc *q, unsigned long cl)
1785 {
1786 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1787 	struct sk_buff *skb;
1788 	int err = 0;
1789 
1790 	if (!cops->delete)
1791 		return -EOPNOTSUPP;
1792 
1793 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1794 	if (!skb)
1795 		return -ENOBUFS;
1796 
1797 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1798 			   RTM_DELTCLASS) < 0) {
1799 		kfree_skb(skb);
1800 		return -EINVAL;
1801 	}
1802 
1803 	err = cops->delete(q, cl);
1804 	if (err) {
1805 		kfree_skb(skb);
1806 		return err;
1807 	}
1808 
1809 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1810 			      n->nlmsg_flags & NLM_F_ECHO);
1811 }
1812 
1813 #ifdef CONFIG_NET_CLS
1814 
1815 struct tcf_bind_args {
1816 	struct tcf_walker w;
1817 	u32 classid;
1818 	unsigned long cl;
1819 };
1820 
1821 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1822 {
1823 	struct tcf_bind_args *a = (void *)arg;
1824 
1825 	if (tp->ops->bind_class) {
1826 		struct Qdisc *q = tcf_block_q(tp->chain->block);
1827 
1828 		sch_tree_lock(q);
1829 		tp->ops->bind_class(n, a->classid, a->cl);
1830 		sch_tree_unlock(q);
1831 	}
1832 	return 0;
1833 }
1834 
1835 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1836 			   unsigned long new_cl)
1837 {
1838 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1839 	struct tcf_block *block;
1840 	struct tcf_chain *chain;
1841 	unsigned long cl;
1842 
1843 	cl = cops->find(q, portid);
1844 	if (!cl)
1845 		return;
1846 	block = cops->tcf_block(q, cl, NULL);
1847 	if (!block)
1848 		return;
1849 	list_for_each_entry(chain, &block->chain_list, list) {
1850 		struct tcf_proto *tp;
1851 
1852 		for (tp = rtnl_dereference(chain->filter_chain);
1853 		     tp; tp = rtnl_dereference(tp->next)) {
1854 			struct tcf_bind_args arg = {};
1855 
1856 			arg.w.fn = tcf_node_bind;
1857 			arg.classid = clid;
1858 			arg.cl = new_cl;
1859 			tp->ops->walk(tp, &arg.w);
1860 		}
1861 	}
1862 }
1863 
1864 #else
1865 
1866 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1867 			   unsigned long new_cl)
1868 {
1869 }
1870 
1871 #endif
1872 
1873 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1874 			 struct netlink_ext_ack *extack)
1875 {
1876 	struct net *net = sock_net(skb->sk);
1877 	struct tcmsg *tcm = nlmsg_data(n);
1878 	struct nlattr *tca[TCA_MAX + 1];
1879 	struct net_device *dev;
1880 	struct Qdisc *q = NULL;
1881 	const struct Qdisc_class_ops *cops;
1882 	unsigned long cl = 0;
1883 	unsigned long new_cl;
1884 	u32 portid;
1885 	u32 clid;
1886 	u32 qid;
1887 	int err;
1888 
1889 	if ((n->nlmsg_type != RTM_GETTCLASS) &&
1890 	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1891 		return -EPERM;
1892 
1893 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1894 			  extack);
1895 	if (err < 0)
1896 		return err;
1897 
1898 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1899 	if (!dev)
1900 		return -ENODEV;
1901 
1902 	/*
1903 	   parent == TC_H_UNSPEC - unspecified parent.
1904 	   parent == TC_H_ROOT   - class is root, which has no parent.
1905 	   parent == X:0	 - parent is root class.
1906 	   parent == X:Y	 - parent is a node in hierarchy.
1907 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
1908 
1909 	   handle == 0:0	 - generate handle from kernel pool.
1910 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
1911 	   handle == X:Y	 - clear.
1912 	   handle == X:0	 - root class.
1913 	 */
1914 
1915 	/* Step 1. Determine qdisc handle X:0 */
1916 
1917 	portid = tcm->tcm_parent;
1918 	clid = tcm->tcm_handle;
1919 	qid = TC_H_MAJ(clid);
1920 
1921 	if (portid != TC_H_ROOT) {
1922 		u32 qid1 = TC_H_MAJ(portid);
1923 
1924 		if (qid && qid1) {
1925 			/* If both majors are known, they must be identical. */
1926 			if (qid != qid1)
1927 				return -EINVAL;
1928 		} else if (qid1) {
1929 			qid = qid1;
1930 		} else if (qid == 0)
1931 			qid = dev->qdisc->handle;
1932 
1933 		/* Now qid is genuine qdisc handle consistent
1934 		 * both with parent and child.
1935 		 *
1936 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1937 		 */
1938 		if (portid)
1939 			portid = TC_H_MAKE(qid, portid);
1940 	} else {
1941 		if (qid == 0)
1942 			qid = dev->qdisc->handle;
1943 	}
1944 
1945 	/* OK. Locate qdisc */
1946 	q = qdisc_lookup(dev, qid);
1947 	if (!q)
1948 		return -ENOENT;
1949 
1950 	/* An check that it supports classes */
1951 	cops = q->ops->cl_ops;
1952 	if (cops == NULL)
1953 		return -EINVAL;
1954 
1955 	/* Now try to get class */
1956 	if (clid == 0) {
1957 		if (portid == TC_H_ROOT)
1958 			clid = qid;
1959 	} else
1960 		clid = TC_H_MAKE(qid, clid);
1961 
1962 	if (clid)
1963 		cl = cops->find(q, clid);
1964 
1965 	if (cl == 0) {
1966 		err = -ENOENT;
1967 		if (n->nlmsg_type != RTM_NEWTCLASS ||
1968 		    !(n->nlmsg_flags & NLM_F_CREATE))
1969 			goto out;
1970 	} else {
1971 		switch (n->nlmsg_type) {
1972 		case RTM_NEWTCLASS:
1973 			err = -EEXIST;
1974 			if (n->nlmsg_flags & NLM_F_EXCL)
1975 				goto out;
1976 			break;
1977 		case RTM_DELTCLASS:
1978 			err = tclass_del_notify(net, cops, skb, n, q, cl);
1979 			/* Unbind the class with flilters with 0 */
1980 			tc_bind_tclass(q, portid, clid, 0);
1981 			goto out;
1982 		case RTM_GETTCLASS:
1983 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1984 			goto out;
1985 		default:
1986 			err = -EINVAL;
1987 			goto out;
1988 		}
1989 	}
1990 
1991 	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1992 		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
1993 		return -EOPNOTSUPP;
1994 	}
1995 
1996 	new_cl = cl;
1997 	err = -EOPNOTSUPP;
1998 	if (cops->change)
1999 		err = cops->change(q, clid, portid, tca, &new_cl, extack);
2000 	if (err == 0) {
2001 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2002 		/* We just create a new class, need to do reverse binding. */
2003 		if (cl != new_cl)
2004 			tc_bind_tclass(q, portid, clid, new_cl);
2005 	}
2006 out:
2007 	return err;
2008 }
2009 
2010 struct qdisc_dump_args {
2011 	struct qdisc_walker	w;
2012 	struct sk_buff		*skb;
2013 	struct netlink_callback	*cb;
2014 };
2015 
2016 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2017 			    struct qdisc_walker *arg)
2018 {
2019 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2020 
2021 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2022 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2023 			      RTM_NEWTCLASS);
2024 }
2025 
2026 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2027 				struct tcmsg *tcm, struct netlink_callback *cb,
2028 				int *t_p, int s_t)
2029 {
2030 	struct qdisc_dump_args arg;
2031 
2032 	if (tc_qdisc_dump_ignore(q, false) ||
2033 	    *t_p < s_t || !q->ops->cl_ops ||
2034 	    (tcm->tcm_parent &&
2035 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2036 		(*t_p)++;
2037 		return 0;
2038 	}
2039 	if (*t_p > s_t)
2040 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2041 	arg.w.fn = qdisc_class_dump;
2042 	arg.skb = skb;
2043 	arg.cb = cb;
2044 	arg.w.stop  = 0;
2045 	arg.w.skip = cb->args[1];
2046 	arg.w.count = 0;
2047 	q->ops->cl_ops->walk(q, &arg.w);
2048 	cb->args[1] = arg.w.count;
2049 	if (arg.w.stop)
2050 		return -1;
2051 	(*t_p)++;
2052 	return 0;
2053 }
2054 
2055 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2056 			       struct tcmsg *tcm, struct netlink_callback *cb,
2057 			       int *t_p, int s_t)
2058 {
2059 	struct Qdisc *q;
2060 	int b;
2061 
2062 	if (!root)
2063 		return 0;
2064 
2065 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2066 		return -1;
2067 
2068 	if (!qdisc_dev(root))
2069 		return 0;
2070 
2071 	if (tcm->tcm_parent) {
2072 		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2073 		if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2074 			return -1;
2075 		return 0;
2076 	}
2077 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2078 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2079 			return -1;
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2086 {
2087 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2088 	struct net *net = sock_net(skb->sk);
2089 	struct netdev_queue *dev_queue;
2090 	struct net_device *dev;
2091 	int t, s_t;
2092 
2093 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2094 		return 0;
2095 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2096 	if (!dev)
2097 		return 0;
2098 
2099 	s_t = cb->args[0];
2100 	t = 0;
2101 
2102 	if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2103 		goto done;
2104 
2105 	dev_queue = dev_ingress_queue(dev);
2106 	if (dev_queue &&
2107 	    tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2108 				&t, s_t) < 0)
2109 		goto done;
2110 
2111 done:
2112 	cb->args[0] = t;
2113 
2114 	dev_put(dev);
2115 	return skb->len;
2116 }
2117 
2118 #ifdef CONFIG_PROC_FS
2119 static int psched_show(struct seq_file *seq, void *v)
2120 {
2121 	seq_printf(seq, "%08x %08x %08x %08x\n",
2122 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2123 		   1000000,
2124 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2125 
2126 	return 0;
2127 }
2128 
2129 static int __net_init psched_net_init(struct net *net)
2130 {
2131 	struct proc_dir_entry *e;
2132 
2133 	e = proc_create_single("psched", 0, net->proc_net, psched_show);
2134 	if (e == NULL)
2135 		return -ENOMEM;
2136 
2137 	return 0;
2138 }
2139 
2140 static void __net_exit psched_net_exit(struct net *net)
2141 {
2142 	remove_proc_entry("psched", net->proc_net);
2143 }
2144 #else
2145 static int __net_init psched_net_init(struct net *net)
2146 {
2147 	return 0;
2148 }
2149 
2150 static void __net_exit psched_net_exit(struct net *net)
2151 {
2152 }
2153 #endif
2154 
2155 static struct pernet_operations psched_net_ops = {
2156 	.init = psched_net_init,
2157 	.exit = psched_net_exit,
2158 };
2159 
2160 static int __init pktsched_init(void)
2161 {
2162 	int err;
2163 
2164 	err = register_pernet_subsys(&psched_net_ops);
2165 	if (err) {
2166 		pr_err("pktsched_init: "
2167 		       "cannot initialize per netns operations\n");
2168 		return err;
2169 	}
2170 
2171 	register_qdisc(&pfifo_fast_ops);
2172 	register_qdisc(&pfifo_qdisc_ops);
2173 	register_qdisc(&bfifo_qdisc_ops);
2174 	register_qdisc(&pfifo_head_drop_qdisc_ops);
2175 	register_qdisc(&mq_qdisc_ops);
2176 	register_qdisc(&noqueue_qdisc_ops);
2177 
2178 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2179 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2180 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2181 		      0);
2182 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2183 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2184 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2185 		      0);
2186 
2187 	return 0;
2188 }
2189 
2190 subsys_initcall(pktsched_init);
2191