xref: /openbmc/linux/net/sched/sch_api.c (revision 4ebdac060e5e24a89a7b3ec33ec46a41621e57fe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_api.c	Packet scheduler API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Fixes:
8  *
9  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12  */
13 
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
28 
29 #include <net/net_namespace.h>
30 #include <net/sock.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
35 
36 #include <trace/events/qdisc.h>
37 
38 /*
39 
40    Short review.
41    -------------
42 
43    This file consists of two interrelated parts:
44 
45    1. queueing disciplines manager frontend.
46    2. traffic classes manager frontend.
47 
48    Generally, queueing discipline ("qdisc") is a black box,
49    which is able to enqueue packets and to dequeue them (when
50    device is ready to send something) in order and at times
51    determined by algorithm hidden in it.
52 
53    qdisc's are divided to two categories:
54    - "queues", which have no internal structure visible from outside.
55    - "schedulers", which split all the packets to "traffic classes",
56      using "packet classifiers" (look at cls_api.c)
57 
58    In turn, classes may have child qdiscs (as rule, queues)
59    attached to them etc. etc. etc.
60 
61    The goal of the routines in this file is to translate
62    information supplied by user in the form of handles
63    to more intelligible for kernel form, to make some sanity
64    checks and part of work, which is common to all qdiscs
65    and to provide rtnetlink notifications.
66 
67    All real intelligent work is done inside qdisc modules.
68 
69 
70 
71    Every discipline has two major routines: enqueue and dequeue.
72 
73    ---dequeue
74 
75    dequeue usually returns a skb to send. It is allowed to return NULL,
76    but it does not mean that queue is empty, it just means that
77    discipline does not want to send anything this time.
78    Queue is really empty if q->q.qlen == 0.
79    For complicated disciplines with multiple queues q->q is not
80    real packet queue, but however q->q.qlen must be valid.
81 
82    ---enqueue
83 
84    enqueue returns 0, if packet was enqueued successfully.
85    If packet (this one or another one) was dropped, it returns
86    not zero error code.
87    NET_XMIT_DROP 	- this packet dropped
88      Expected action: do not backoff, but wait until queue will clear.
89    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
90      Expected action: backoff or ignore
91 
92    Auxiliary routines:
93 
94    ---peek
95 
96    like dequeue but without removing a packet from the queue
97 
98    ---reset
99 
100    returns qdisc to initial state: purge all buffers, clear all
101    timers, counters (except for statistics) etc.
102 
103    ---init
104 
105    initializes newly created qdisc.
106 
107    ---destroy
108 
109    destroys resources allocated by init and during lifetime of qdisc.
110 
111    ---change
112 
113    changes qdisc parameters.
114  */
115 
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
118 
119 
120 /************************************************
121  *	Queueing disciplines manipulation.	*
122  ************************************************/
123 
124 
125 /* The list of all installed queueing disciplines. */
126 
127 static struct Qdisc_ops *qdisc_base;
128 
129 /* Register/unregister queueing discipline */
130 
register_qdisc(struct Qdisc_ops * qops)131 int register_qdisc(struct Qdisc_ops *qops)
132 {
133 	struct Qdisc_ops *q, **qp;
134 	int rc = -EEXIST;
135 
136 	write_lock(&qdisc_mod_lock);
137 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 		if (!strcmp(qops->id, q->id))
139 			goto out;
140 
141 	if (qops->enqueue == NULL)
142 		qops->enqueue = noop_qdisc_ops.enqueue;
143 	if (qops->peek == NULL) {
144 		if (qops->dequeue == NULL)
145 			qops->peek = noop_qdisc_ops.peek;
146 		else
147 			goto out_einval;
148 	}
149 	if (qops->dequeue == NULL)
150 		qops->dequeue = noop_qdisc_ops.dequeue;
151 
152 	if (qops->cl_ops) {
153 		const struct Qdisc_class_ops *cops = qops->cl_ops;
154 
155 		if (!(cops->find && cops->walk && cops->leaf))
156 			goto out_einval;
157 
158 		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
159 			goto out_einval;
160 	}
161 
162 	qops->next = NULL;
163 	*qp = qops;
164 	rc = 0;
165 out:
166 	write_unlock(&qdisc_mod_lock);
167 	return rc;
168 
169 out_einval:
170 	rc = -EINVAL;
171 	goto out;
172 }
173 EXPORT_SYMBOL(register_qdisc);
174 
unregister_qdisc(struct Qdisc_ops * qops)175 void unregister_qdisc(struct Qdisc_ops *qops)
176 {
177 	struct Qdisc_ops *q, **qp;
178 	int err = -ENOENT;
179 
180 	write_lock(&qdisc_mod_lock);
181 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
182 		if (q == qops)
183 			break;
184 	if (q) {
185 		*qp = q->next;
186 		q->next = NULL;
187 		err = 0;
188 	}
189 	write_unlock(&qdisc_mod_lock);
190 
191 	WARN(err, "unregister qdisc(%s) failed\n", qops->id);
192 }
193 EXPORT_SYMBOL(unregister_qdisc);
194 
195 /* Get default qdisc if not otherwise specified */
qdisc_get_default(char * name,size_t len)196 void qdisc_get_default(char *name, size_t len)
197 {
198 	read_lock(&qdisc_mod_lock);
199 	strscpy(name, default_qdisc_ops->id, len);
200 	read_unlock(&qdisc_mod_lock);
201 }
202 
qdisc_lookup_default(const char * name)203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204 {
205 	struct Qdisc_ops *q = NULL;
206 
207 	for (q = qdisc_base; q; q = q->next) {
208 		if (!strcmp(name, q->id)) {
209 			if (!try_module_get(q->owner))
210 				q = NULL;
211 			break;
212 		}
213 	}
214 
215 	return q;
216 }
217 
218 /* Set new default qdisc to use */
qdisc_set_default(const char * name)219 int qdisc_set_default(const char *name)
220 {
221 	const struct Qdisc_ops *ops;
222 
223 	if (!capable(CAP_NET_ADMIN))
224 		return -EPERM;
225 
226 	write_lock(&qdisc_mod_lock);
227 	ops = qdisc_lookup_default(name);
228 	if (!ops) {
229 		/* Not found, drop lock and try to load module */
230 		write_unlock(&qdisc_mod_lock);
231 		request_module("sch_%s", name);
232 		write_lock(&qdisc_mod_lock);
233 
234 		ops = qdisc_lookup_default(name);
235 	}
236 
237 	if (ops) {
238 		/* Set new default */
239 		module_put(default_qdisc_ops->owner);
240 		default_qdisc_ops = ops;
241 	}
242 	write_unlock(&qdisc_mod_lock);
243 
244 	return ops ? 0 : -ENOENT;
245 }
246 
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
sch_default_qdisc(void)249 static int __init sch_default_qdisc(void)
250 {
251 	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252 }
253 late_initcall(sch_default_qdisc);
254 #endif
255 
256 /* We know handle. Find qdisc among all qdisc's attached to device
257  * (root qdisc, all its children, children of children etc.)
258  * Note: caller either uses rtnl or rcu_read_lock()
259  */
260 
qdisc_match_from_root(struct Qdisc * root,u32 handle)261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262 {
263 	struct Qdisc *q;
264 
265 	if (!qdisc_dev(root))
266 		return (root->handle == handle ? root : NULL);
267 
268 	if (!(root->flags & TCQ_F_BUILTIN) &&
269 	    root->handle == handle)
270 		return root;
271 
272 	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 				   lockdep_rtnl_is_held()) {
274 		if (q->handle == handle)
275 			return q;
276 	}
277 	return NULL;
278 }
279 
qdisc_hash_add(struct Qdisc * q,bool invisible)280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 {
282 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 		ASSERT_RTNL();
284 		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 		if (invisible)
286 			q->flags |= TCQ_F_INVISIBLE;
287 	}
288 }
289 EXPORT_SYMBOL(qdisc_hash_add);
290 
qdisc_hash_del(struct Qdisc * q)291 void qdisc_hash_del(struct Qdisc *q)
292 {
293 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 		ASSERT_RTNL();
295 		hash_del_rcu(&q->hash);
296 	}
297 }
298 EXPORT_SYMBOL(qdisc_hash_del);
299 
qdisc_lookup(struct net_device * dev,u32 handle)300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301 {
302 	struct Qdisc *q;
303 
304 	if (!handle)
305 		return NULL;
306 	q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
307 	if (q)
308 		goto out;
309 
310 	if (dev_ingress_queue(dev))
311 		q = qdisc_match_from_root(
312 			rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
313 			handle);
314 out:
315 	return q;
316 }
317 
qdisc_lookup_rcu(struct net_device * dev,u32 handle)318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319 {
320 	struct netdev_queue *nq;
321 	struct Qdisc *q;
322 
323 	if (!handle)
324 		return NULL;
325 	q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
326 	if (q)
327 		goto out;
328 
329 	nq = dev_ingress_queue_rcu(dev);
330 	if (nq)
331 		q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
332 					  handle);
333 out:
334 	return q;
335 }
336 
qdisc_leaf(struct Qdisc * p,u32 classid)337 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338 {
339 	unsigned long cl;
340 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341 
342 	if (cops == NULL)
343 		return NULL;
344 	cl = cops->find(p, classid);
345 
346 	if (cl == 0)
347 		return NULL;
348 	return cops->leaf(p, cl);
349 }
350 
351 /* Find queueing discipline by name */
352 
qdisc_lookup_ops(struct nlattr * kind)353 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 {
355 	struct Qdisc_ops *q = NULL;
356 
357 	if (kind) {
358 		read_lock(&qdisc_mod_lock);
359 		for (q = qdisc_base; q; q = q->next) {
360 			if (nla_strcmp(kind, q->id) == 0) {
361 				if (!try_module_get(q->owner))
362 					q = NULL;
363 				break;
364 			}
365 		}
366 		read_unlock(&qdisc_mod_lock);
367 	}
368 	return q;
369 }
370 
371 /* The linklayer setting were not transferred from iproute2, in older
372  * versions, and the rate tables lookup systems have been dropped in
373  * the kernel. To keep backward compatible with older iproute2 tc
374  * utils, we detect the linklayer setting by detecting if the rate
375  * table were modified.
376  *
377  * For linklayer ATM table entries, the rate table will be aligned to
378  * 48 bytes, thus some table entries will contain the same value.  The
379  * mpu (min packet unit) is also encoded into the old rate table, thus
380  * starting from the mpu, we find low and high table entries for
381  * mapping this cell.  If these entries contain the same value, when
382  * the rate tables have been modified for linklayer ATM.
383  *
384  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385  * and then roundup to the next cell, calc the table entry one below,
386  * and compare.
387  */
__detect_linklayer(struct tc_ratespec * r,__u32 * rtab)388 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 {
390 	int low       = roundup(r->mpu, 48);
391 	int high      = roundup(low+1, 48);
392 	int cell_low  = low >> r->cell_log;
393 	int cell_high = (high >> r->cell_log) - 1;
394 
395 	/* rtab is too inaccurate at rates > 100Mbit/s */
396 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 		pr_debug("TC linklayer: Giving up ATM detection\n");
398 		return TC_LINKLAYER_ETHERNET;
399 	}
400 
401 	if ((cell_high > cell_low) && (cell_high < 256)
402 	    && (rtab[cell_low] == rtab[cell_high])) {
403 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 			 cell_low, cell_high, rtab[cell_high]);
405 		return TC_LINKLAYER_ATM;
406 	}
407 	return TC_LINKLAYER_ETHERNET;
408 }
409 
410 static struct qdisc_rate_table *qdisc_rtab_list;
411 
qdisc_get_rtab(struct tc_ratespec * r,struct nlattr * tab,struct netlink_ext_ack * extack)412 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 					struct nlattr *tab,
414 					struct netlink_ext_ack *extack)
415 {
416 	struct qdisc_rate_table *rtab;
417 
418 	if (tab == NULL || r->rate == 0 ||
419 	    r->cell_log == 0 || r->cell_log >= 32 ||
420 	    nla_len(tab) != TC_RTAB_SIZE) {
421 		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 		return NULL;
423 	}
424 
425 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
426 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
427 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
428 			rtab->refcnt++;
429 			return rtab;
430 		}
431 	}
432 
433 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 	if (rtab) {
435 		rtab->rate = *r;
436 		rtab->refcnt = 1;
437 		memcpy(rtab->data, nla_data(tab), 1024);
438 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
439 			r->linklayer = __detect_linklayer(r, rtab->data);
440 		rtab->next = qdisc_rtab_list;
441 		qdisc_rtab_list = rtab;
442 	} else {
443 		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 	}
445 	return rtab;
446 }
447 EXPORT_SYMBOL(qdisc_get_rtab);
448 
qdisc_put_rtab(struct qdisc_rate_table * tab)449 void qdisc_put_rtab(struct qdisc_rate_table *tab)
450 {
451 	struct qdisc_rate_table *rtab, **rtabp;
452 
453 	if (!tab || --tab->refcnt)
454 		return;
455 
456 	for (rtabp = &qdisc_rtab_list;
457 	     (rtab = *rtabp) != NULL;
458 	     rtabp = &rtab->next) {
459 		if (rtab == tab) {
460 			*rtabp = rtab->next;
461 			kfree(rtab);
462 			return;
463 		}
464 	}
465 }
466 EXPORT_SYMBOL(qdisc_put_rtab);
467 
468 static LIST_HEAD(qdisc_stab_list);
469 
470 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
471 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
472 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
473 };
474 
qdisc_get_stab(struct nlattr * opt,struct netlink_ext_ack * extack)475 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
476 					       struct netlink_ext_ack *extack)
477 {
478 	struct nlattr *tb[TCA_STAB_MAX + 1];
479 	struct qdisc_size_table *stab;
480 	struct tc_sizespec *s;
481 	unsigned int tsize = 0;
482 	u16 *tab = NULL;
483 	int err;
484 
485 	err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 					  extack);
487 	if (err < 0)
488 		return ERR_PTR(err);
489 	if (!tb[TCA_STAB_BASE]) {
490 		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
491 		return ERR_PTR(-EINVAL);
492 	}
493 
494 	s = nla_data(tb[TCA_STAB_BASE]);
495 
496 	if (s->tsize > 0) {
497 		if (!tb[TCA_STAB_DATA]) {
498 			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
499 			return ERR_PTR(-EINVAL);
500 		}
501 		tab = nla_data(tb[TCA_STAB_DATA]);
502 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
503 	}
504 
505 	if (tsize != s->tsize || (!tab && tsize > 0)) {
506 		NL_SET_ERR_MSG(extack, "Invalid size of size table");
507 		return ERR_PTR(-EINVAL);
508 	}
509 
510 	list_for_each_entry(stab, &qdisc_stab_list, list) {
511 		if (memcmp(&stab->szopts, s, sizeof(*s)))
512 			continue;
513 		if (tsize > 0 &&
514 		    memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
515 			continue;
516 		stab->refcnt++;
517 		return stab;
518 	}
519 
520 	if (s->size_log > STAB_SIZE_LOG_MAX ||
521 	    s->cell_log > STAB_SIZE_LOG_MAX) {
522 		NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
523 		return ERR_PTR(-EINVAL);
524 	}
525 
526 	stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 	if (!stab)
528 		return ERR_PTR(-ENOMEM);
529 
530 	stab->refcnt = 1;
531 	stab->szopts = *s;
532 	if (tsize > 0)
533 		memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534 
535 	list_add_tail(&stab->list, &qdisc_stab_list);
536 
537 	return stab;
538 }
539 
qdisc_put_stab(struct qdisc_size_table * tab)540 void qdisc_put_stab(struct qdisc_size_table *tab)
541 {
542 	if (!tab)
543 		return;
544 
545 	if (--tab->refcnt == 0) {
546 		list_del(&tab->list);
547 		kfree_rcu(tab, rcu);
548 	}
549 }
550 EXPORT_SYMBOL(qdisc_put_stab);
551 
qdisc_dump_stab(struct sk_buff * skb,struct qdisc_size_table * stab)552 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
553 {
554 	struct nlattr *nest;
555 
556 	nest = nla_nest_start_noflag(skb, TCA_STAB);
557 	if (nest == NULL)
558 		goto nla_put_failure;
559 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
560 		goto nla_put_failure;
561 	nla_nest_end(skb, nest);
562 
563 	return skb->len;
564 
565 nla_put_failure:
566 	return -1;
567 }
568 
__qdisc_calculate_pkt_len(struct sk_buff * skb,const struct qdisc_size_table * stab)569 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
570 			       const struct qdisc_size_table *stab)
571 {
572 	int pkt_len, slot;
573 
574 	pkt_len = skb->len + stab->szopts.overhead;
575 	if (unlikely(!stab->szopts.tsize))
576 		goto out;
577 
578 	slot = pkt_len + stab->szopts.cell_align;
579 	if (unlikely(slot < 0))
580 		slot = 0;
581 
582 	slot >>= stab->szopts.cell_log;
583 	if (likely(slot < stab->szopts.tsize))
584 		pkt_len = stab->data[slot];
585 	else
586 		pkt_len = stab->data[stab->szopts.tsize - 1] *
587 				(slot / stab->szopts.tsize) +
588 				stab->data[slot % stab->szopts.tsize];
589 
590 	pkt_len <<= stab->szopts.size_log;
591 out:
592 	if (unlikely(pkt_len < 1))
593 		pkt_len = 1;
594 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
595 }
596 
qdisc_warn_nonwc(const char * txt,struct Qdisc * qdisc)597 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
598 {
599 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
600 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
601 			txt, qdisc->ops->id, qdisc->handle >> 16);
602 		qdisc->flags |= TCQ_F_WARN_NONWC;
603 	}
604 }
605 EXPORT_SYMBOL(qdisc_warn_nonwc);
606 
qdisc_watchdog(struct hrtimer * timer)607 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
608 {
609 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
610 						 timer);
611 
612 	rcu_read_lock();
613 	__netif_schedule(qdisc_root(wd->qdisc));
614 	rcu_read_unlock();
615 
616 	return HRTIMER_NORESTART;
617 }
618 
qdisc_watchdog_init_clockid(struct qdisc_watchdog * wd,struct Qdisc * qdisc,clockid_t clockid)619 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
620 				 clockid_t clockid)
621 {
622 	hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
623 	wd->timer.function = qdisc_watchdog;
624 	wd->qdisc = qdisc;
625 }
626 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
627 
qdisc_watchdog_init(struct qdisc_watchdog * wd,struct Qdisc * qdisc)628 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
629 {
630 	qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
631 }
632 EXPORT_SYMBOL(qdisc_watchdog_init);
633 
qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog * wd,u64 expires,u64 delta_ns)634 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
635 				      u64 delta_ns)
636 {
637 	bool deactivated;
638 
639 	rcu_read_lock();
640 	deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
641 			       &qdisc_root_sleeping(wd->qdisc)->state);
642 	rcu_read_unlock();
643 	if (deactivated)
644 		return;
645 
646 	if (hrtimer_is_queued(&wd->timer)) {
647 		u64 softexpires;
648 
649 		softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
650 		/* If timer is already set in [expires, expires + delta_ns],
651 		 * do not reprogram it.
652 		 */
653 		if (softexpires - expires <= delta_ns)
654 			return;
655 	}
656 
657 	hrtimer_start_range_ns(&wd->timer,
658 			       ns_to_ktime(expires),
659 			       delta_ns,
660 			       HRTIMER_MODE_ABS_PINNED);
661 }
662 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
663 
qdisc_watchdog_cancel(struct qdisc_watchdog * wd)664 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
665 {
666 	hrtimer_cancel(&wd->timer);
667 }
668 EXPORT_SYMBOL(qdisc_watchdog_cancel);
669 
qdisc_class_hash_alloc(unsigned int n)670 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
671 {
672 	struct hlist_head *h;
673 	unsigned int i;
674 
675 	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
676 
677 	if (h != NULL) {
678 		for (i = 0; i < n; i++)
679 			INIT_HLIST_HEAD(&h[i]);
680 	}
681 	return h;
682 }
683 
qdisc_class_hash_grow(struct Qdisc * sch,struct Qdisc_class_hash * clhash)684 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
685 {
686 	struct Qdisc_class_common *cl;
687 	struct hlist_node *next;
688 	struct hlist_head *nhash, *ohash;
689 	unsigned int nsize, nmask, osize;
690 	unsigned int i, h;
691 
692 	/* Rehash when load factor exceeds 0.75 */
693 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
694 		return;
695 	nsize = clhash->hashsize * 2;
696 	nmask = nsize - 1;
697 	nhash = qdisc_class_hash_alloc(nsize);
698 	if (nhash == NULL)
699 		return;
700 
701 	ohash = clhash->hash;
702 	osize = clhash->hashsize;
703 
704 	sch_tree_lock(sch);
705 	for (i = 0; i < osize; i++) {
706 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
707 			h = qdisc_class_hash(cl->classid, nmask);
708 			hlist_add_head(&cl->hnode, &nhash[h]);
709 		}
710 	}
711 	clhash->hash     = nhash;
712 	clhash->hashsize = nsize;
713 	clhash->hashmask = nmask;
714 	sch_tree_unlock(sch);
715 
716 	kvfree(ohash);
717 }
718 EXPORT_SYMBOL(qdisc_class_hash_grow);
719 
qdisc_class_hash_init(struct Qdisc_class_hash * clhash)720 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
721 {
722 	unsigned int size = 4;
723 
724 	clhash->hash = qdisc_class_hash_alloc(size);
725 	if (!clhash->hash)
726 		return -ENOMEM;
727 	clhash->hashsize  = size;
728 	clhash->hashmask  = size - 1;
729 	clhash->hashelems = 0;
730 	return 0;
731 }
732 EXPORT_SYMBOL(qdisc_class_hash_init);
733 
qdisc_class_hash_destroy(struct Qdisc_class_hash * clhash)734 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
735 {
736 	kvfree(clhash->hash);
737 }
738 EXPORT_SYMBOL(qdisc_class_hash_destroy);
739 
qdisc_class_hash_insert(struct Qdisc_class_hash * clhash,struct Qdisc_class_common * cl)740 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
741 			     struct Qdisc_class_common *cl)
742 {
743 	unsigned int h;
744 
745 	INIT_HLIST_NODE(&cl->hnode);
746 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
747 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
748 	clhash->hashelems++;
749 }
750 EXPORT_SYMBOL(qdisc_class_hash_insert);
751 
qdisc_class_hash_remove(struct Qdisc_class_hash * clhash,struct Qdisc_class_common * cl)752 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
753 			     struct Qdisc_class_common *cl)
754 {
755 	hlist_del(&cl->hnode);
756 	clhash->hashelems--;
757 }
758 EXPORT_SYMBOL(qdisc_class_hash_remove);
759 
760 /* Allocate an unique handle from space managed by kernel
761  * Possible range is [8000-FFFF]:0000 (0x8000 values)
762  */
qdisc_alloc_handle(struct net_device * dev)763 static u32 qdisc_alloc_handle(struct net_device *dev)
764 {
765 	int i = 0x8000;
766 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
767 
768 	do {
769 		autohandle += TC_H_MAKE(0x10000U, 0);
770 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
771 			autohandle = TC_H_MAKE(0x80000000U, 0);
772 		if (!qdisc_lookup(dev, autohandle))
773 			return autohandle;
774 		cond_resched();
775 	} while	(--i > 0);
776 
777 	return 0;
778 }
779 
qdisc_tree_reduce_backlog(struct Qdisc * sch,int n,int len)780 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
781 {
782 	const struct Qdisc_class_ops *cops;
783 	unsigned long cl;
784 	u32 parentid;
785 	bool notify;
786 	int drops;
787 
788 	drops = max_t(int, n, 0);
789 	rcu_read_lock();
790 	while ((parentid = sch->parent)) {
791 		if (parentid == TC_H_ROOT)
792 			break;
793 
794 		if (sch->flags & TCQ_F_NOPARENT)
795 			break;
796 		/* Notify parent qdisc only if child qdisc becomes empty. */
797 		notify = !sch->q.qlen;
798 		/* TODO: perform the search on a per txq basis */
799 		sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
800 		if (sch == NULL) {
801 			WARN_ON_ONCE(parentid != TC_H_ROOT);
802 			break;
803 		}
804 		cops = sch->ops->cl_ops;
805 		if (notify && cops->qlen_notify) {
806 			/* Note that qlen_notify must be idempotent as it may get called
807 			 * multiple times.
808 			 */
809 			cl = cops->find(sch, parentid);
810 			cops->qlen_notify(sch, cl);
811 		}
812 		sch->q.qlen -= n;
813 		sch->qstats.backlog -= len;
814 		__qdisc_qstats_drop(sch, drops);
815 	}
816 	rcu_read_unlock();
817 }
818 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
819 
qdisc_offload_dump_helper(struct Qdisc * sch,enum tc_setup_type type,void * type_data)820 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
821 			      void *type_data)
822 {
823 	struct net_device *dev = qdisc_dev(sch);
824 	int err;
825 
826 	sch->flags &= ~TCQ_F_OFFLOADED;
827 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
828 		return 0;
829 
830 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
831 	if (err == -EOPNOTSUPP)
832 		return 0;
833 
834 	if (!err)
835 		sch->flags |= TCQ_F_OFFLOADED;
836 
837 	return err;
838 }
839 EXPORT_SYMBOL(qdisc_offload_dump_helper);
840 
qdisc_offload_graft_helper(struct net_device * dev,struct Qdisc * sch,struct Qdisc * new,struct Qdisc * old,enum tc_setup_type type,void * type_data,struct netlink_ext_ack * extack)841 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
842 				struct Qdisc *new, struct Qdisc *old,
843 				enum tc_setup_type type, void *type_data,
844 				struct netlink_ext_ack *extack)
845 {
846 	bool any_qdisc_is_offloaded;
847 	int err;
848 
849 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
850 		return;
851 
852 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
853 
854 	/* Don't report error if the graft is part of destroy operation. */
855 	if (!err || !new || new == &noop_qdisc)
856 		return;
857 
858 	/* Don't report error if the parent, the old child and the new
859 	 * one are not offloaded.
860 	 */
861 	any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
862 	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
863 	any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
864 
865 	if (any_qdisc_is_offloaded)
866 		NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
867 }
868 EXPORT_SYMBOL(qdisc_offload_graft_helper);
869 
qdisc_offload_query_caps(struct net_device * dev,enum tc_setup_type type,void * caps,size_t caps_len)870 void qdisc_offload_query_caps(struct net_device *dev,
871 			      enum tc_setup_type type,
872 			      void *caps, size_t caps_len)
873 {
874 	const struct net_device_ops *ops = dev->netdev_ops;
875 	struct tc_query_caps_base base = {
876 		.type = type,
877 		.caps = caps,
878 	};
879 
880 	memset(caps, 0, caps_len);
881 
882 	if (ops->ndo_setup_tc)
883 		ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
884 }
885 EXPORT_SYMBOL(qdisc_offload_query_caps);
886 
qdisc_offload_graft_root(struct net_device * dev,struct Qdisc * new,struct Qdisc * old,struct netlink_ext_ack * extack)887 static void qdisc_offload_graft_root(struct net_device *dev,
888 				     struct Qdisc *new, struct Qdisc *old,
889 				     struct netlink_ext_ack *extack)
890 {
891 	struct tc_root_qopt_offload graft_offload = {
892 		.command	= TC_ROOT_GRAFT,
893 		.handle		= new ? new->handle : 0,
894 		.ingress	= (new && new->flags & TCQ_F_INGRESS) ||
895 				  (old && old->flags & TCQ_F_INGRESS),
896 	};
897 
898 	qdisc_offload_graft_helper(dev, NULL, new, old,
899 				   TC_SETUP_ROOT_QDISC, &graft_offload, extack);
900 }
901 
tc_fill_qdisc(struct sk_buff * skb,struct Qdisc * q,u32 clid,u32 portid,u32 seq,u16 flags,int event,struct netlink_ext_ack * extack)902 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
903 			 u32 portid, u32 seq, u16 flags, int event,
904 			 struct netlink_ext_ack *extack)
905 {
906 	struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
907 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
908 	struct tcmsg *tcm;
909 	struct nlmsghdr  *nlh;
910 	unsigned char *b = skb_tail_pointer(skb);
911 	struct gnet_dump d;
912 	struct qdisc_size_table *stab;
913 	u32 block_index;
914 	__u32 qlen;
915 
916 	cond_resched();
917 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
918 	if (!nlh)
919 		goto out_nlmsg_trim;
920 	tcm = nlmsg_data(nlh);
921 	tcm->tcm_family = AF_UNSPEC;
922 	tcm->tcm__pad1 = 0;
923 	tcm->tcm__pad2 = 0;
924 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
925 	tcm->tcm_parent = clid;
926 	tcm->tcm_handle = q->handle;
927 	tcm->tcm_info = refcount_read(&q->refcnt);
928 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
929 		goto nla_put_failure;
930 	if (q->ops->ingress_block_get) {
931 		block_index = q->ops->ingress_block_get(q);
932 		if (block_index &&
933 		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
934 			goto nla_put_failure;
935 	}
936 	if (q->ops->egress_block_get) {
937 		block_index = q->ops->egress_block_get(q);
938 		if (block_index &&
939 		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
940 			goto nla_put_failure;
941 	}
942 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
943 		goto nla_put_failure;
944 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
945 		goto nla_put_failure;
946 	qlen = qdisc_qlen_sum(q);
947 
948 	stab = rtnl_dereference(q->stab);
949 	if (stab && qdisc_dump_stab(skb, stab) < 0)
950 		goto nla_put_failure;
951 
952 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
953 					 NULL, &d, TCA_PAD) < 0)
954 		goto nla_put_failure;
955 
956 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
957 		goto nla_put_failure;
958 
959 	if (qdisc_is_percpu_stats(q)) {
960 		cpu_bstats = q->cpu_bstats;
961 		cpu_qstats = q->cpu_qstats;
962 	}
963 
964 	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
965 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
966 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
967 		goto nla_put_failure;
968 
969 	if (gnet_stats_finish_copy(&d) < 0)
970 		goto nla_put_failure;
971 
972 	if (extack && extack->_msg &&
973 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
974 		goto out_nlmsg_trim;
975 
976 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
977 
978 	return skb->len;
979 
980 out_nlmsg_trim:
981 nla_put_failure:
982 	nlmsg_trim(skb, b);
983 	return -1;
984 }
985 
tc_qdisc_dump_ignore(struct Qdisc * q,bool dump_invisible)986 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
987 {
988 	if (q->flags & TCQ_F_BUILTIN)
989 		return true;
990 	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
991 		return true;
992 
993 	return false;
994 }
995 
qdisc_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,u32 clid,struct Qdisc * old,struct Qdisc * new,struct netlink_ext_ack * extack)996 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
997 			struct nlmsghdr *n, u32 clid,
998 			struct Qdisc *old, struct Qdisc *new,
999 			struct netlink_ext_ack *extack)
1000 {
1001 	struct sk_buff *skb;
1002 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1003 
1004 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1005 	if (!skb)
1006 		return -ENOBUFS;
1007 
1008 	if (old && !tc_qdisc_dump_ignore(old, false)) {
1009 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1010 				  0, RTM_DELQDISC, extack) < 0)
1011 			goto err_out;
1012 	}
1013 	if (new && !tc_qdisc_dump_ignore(new, false)) {
1014 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1015 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1016 			goto err_out;
1017 	}
1018 
1019 	if (skb->len)
1020 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1021 				      n->nlmsg_flags & NLM_F_ECHO);
1022 
1023 err_out:
1024 	kfree_skb(skb);
1025 	return -EINVAL;
1026 }
1027 
notify_and_destroy(struct net * net,struct sk_buff * skb,struct nlmsghdr * n,u32 clid,struct Qdisc * old,struct Qdisc * new,struct netlink_ext_ack * extack)1028 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1029 			       struct nlmsghdr *n, u32 clid,
1030 			       struct Qdisc *old, struct Qdisc *new,
1031 			       struct netlink_ext_ack *extack)
1032 {
1033 	if (new || old)
1034 		qdisc_notify(net, skb, n, clid, old, new, extack);
1035 
1036 	if (old)
1037 		qdisc_put(old);
1038 }
1039 
qdisc_clear_nolock(struct Qdisc * sch)1040 static void qdisc_clear_nolock(struct Qdisc *sch)
1041 {
1042 	sch->flags &= ~TCQ_F_NOLOCK;
1043 	if (!(sch->flags & TCQ_F_CPUSTATS))
1044 		return;
1045 
1046 	free_percpu(sch->cpu_bstats);
1047 	free_percpu(sch->cpu_qstats);
1048 	sch->cpu_bstats = NULL;
1049 	sch->cpu_qstats = NULL;
1050 	sch->flags &= ~TCQ_F_CPUSTATS;
1051 }
1052 
1053 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1054  * to device "dev".
1055  *
1056  * When appropriate send a netlink notification using 'skb'
1057  * and "n".
1058  *
1059  * On success, destroy old qdisc.
1060  */
1061 
qdisc_graft(struct net_device * dev,struct Qdisc * parent,struct sk_buff * skb,struct nlmsghdr * n,u32 classid,struct Qdisc * new,struct Qdisc * old,struct netlink_ext_ack * extack)1062 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1063 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1064 		       struct Qdisc *new, struct Qdisc *old,
1065 		       struct netlink_ext_ack *extack)
1066 {
1067 	struct Qdisc *q = old;
1068 	struct net *net = dev_net(dev);
1069 
1070 	if (parent == NULL) {
1071 		unsigned int i, num_q, ingress;
1072 		struct netdev_queue *dev_queue;
1073 
1074 		ingress = 0;
1075 		num_q = dev->num_tx_queues;
1076 		if ((q && q->flags & TCQ_F_INGRESS) ||
1077 		    (new && new->flags & TCQ_F_INGRESS)) {
1078 			ingress = 1;
1079 			dev_queue = dev_ingress_queue(dev);
1080 			if (!dev_queue) {
1081 				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1082 				return -ENOENT;
1083 			}
1084 
1085 			q = rtnl_dereference(dev_queue->qdisc_sleeping);
1086 
1087 			/* This is the counterpart of that qdisc_refcount_inc_nz() call in
1088 			 * __tcf_qdisc_find() for filter requests.
1089 			 */
1090 			if (!qdisc_refcount_dec_if_one(q)) {
1091 				NL_SET_ERR_MSG(extack,
1092 					       "Current ingress or clsact Qdisc has ongoing filter requests");
1093 				return -EBUSY;
1094 			}
1095 		}
1096 
1097 		if (dev->flags & IFF_UP)
1098 			dev_deactivate(dev);
1099 
1100 		qdisc_offload_graft_root(dev, new, old, extack);
1101 
1102 		if (new && new->ops->attach && !ingress)
1103 			goto skip;
1104 
1105 		if (!ingress) {
1106 			for (i = 0; i < num_q; i++) {
1107 				dev_queue = netdev_get_tx_queue(dev, i);
1108 				old = dev_graft_qdisc(dev_queue, new);
1109 
1110 				if (new && i > 0)
1111 					qdisc_refcount_inc(new);
1112 				qdisc_put(old);
1113 			}
1114 		} else {
1115 			old = dev_graft_qdisc(dev_queue, NULL);
1116 
1117 			/* {ingress,clsact}_destroy() @old before grafting @new to avoid
1118 			 * unprotected concurrent accesses to net_device::miniq_{in,e}gress
1119 			 * pointer(s) in mini_qdisc_pair_swap().
1120 			 */
1121 			qdisc_notify(net, skb, n, classid, old, new, extack);
1122 			qdisc_destroy(old);
1123 
1124 			dev_graft_qdisc(dev_queue, new);
1125 		}
1126 
1127 skip:
1128 		if (!ingress) {
1129 			old = rtnl_dereference(dev->qdisc);
1130 			if (new && !new->ops->attach)
1131 				qdisc_refcount_inc(new);
1132 			rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1133 
1134 			notify_and_destroy(net, skb, n, classid, old, new, extack);
1135 
1136 			if (new && new->ops->attach)
1137 				new->ops->attach(new);
1138 		}
1139 
1140 		if (dev->flags & IFF_UP)
1141 			dev_activate(dev);
1142 	} else {
1143 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1144 		unsigned long cl;
1145 		int err;
1146 
1147 		/* Only support running class lockless if parent is lockless */
1148 		if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1149 			qdisc_clear_nolock(new);
1150 
1151 		if (!cops || !cops->graft)
1152 			return -EOPNOTSUPP;
1153 
1154 		cl = cops->find(parent, classid);
1155 		if (!cl) {
1156 			NL_SET_ERR_MSG(extack, "Specified class not found");
1157 			return -ENOENT;
1158 		}
1159 
1160 		if (new && new->ops == &noqueue_qdisc_ops) {
1161 			NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1162 			return -EINVAL;
1163 		}
1164 
1165 		if (new &&
1166 		    !(parent->flags & TCQ_F_MQROOT) &&
1167 		    rcu_access_pointer(new->stab)) {
1168 			NL_SET_ERR_MSG(extack, "STAB not supported on a non root");
1169 			return -EINVAL;
1170 		}
1171 		err = cops->graft(parent, cl, new, &old, extack);
1172 		if (err)
1173 			return err;
1174 		notify_and_destroy(net, skb, n, classid, old, new, extack);
1175 	}
1176 	return 0;
1177 }
1178 
qdisc_block_indexes_set(struct Qdisc * sch,struct nlattr ** tca,struct netlink_ext_ack * extack)1179 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1180 				   struct netlink_ext_ack *extack)
1181 {
1182 	u32 block_index;
1183 
1184 	if (tca[TCA_INGRESS_BLOCK]) {
1185 		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1186 
1187 		if (!block_index) {
1188 			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1189 			return -EINVAL;
1190 		}
1191 		if (!sch->ops->ingress_block_set) {
1192 			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1193 			return -EOPNOTSUPP;
1194 		}
1195 		sch->ops->ingress_block_set(sch, block_index);
1196 	}
1197 	if (tca[TCA_EGRESS_BLOCK]) {
1198 		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1199 
1200 		if (!block_index) {
1201 			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1202 			return -EINVAL;
1203 		}
1204 		if (!sch->ops->egress_block_set) {
1205 			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1206 			return -EOPNOTSUPP;
1207 		}
1208 		sch->ops->egress_block_set(sch, block_index);
1209 	}
1210 	return 0;
1211 }
1212 
1213 /*
1214    Allocate and initialize new qdisc.
1215 
1216    Parameters are passed via opt.
1217  */
1218 
qdisc_create(struct net_device * dev,struct netdev_queue * dev_queue,u32 parent,u32 handle,struct nlattr ** tca,int * errp,struct netlink_ext_ack * extack)1219 static struct Qdisc *qdisc_create(struct net_device *dev,
1220 				  struct netdev_queue *dev_queue,
1221 				  u32 parent, u32 handle,
1222 				  struct nlattr **tca, int *errp,
1223 				  struct netlink_ext_ack *extack)
1224 {
1225 	int err;
1226 	struct nlattr *kind = tca[TCA_KIND];
1227 	struct Qdisc *sch;
1228 	struct Qdisc_ops *ops;
1229 	struct qdisc_size_table *stab;
1230 
1231 	ops = qdisc_lookup_ops(kind);
1232 #ifdef CONFIG_MODULES
1233 	if (ops == NULL && kind != NULL) {
1234 		char name[IFNAMSIZ];
1235 		if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1236 			/* We dropped the RTNL semaphore in order to
1237 			 * perform the module load.  So, even if we
1238 			 * succeeded in loading the module we have to
1239 			 * tell the caller to replay the request.  We
1240 			 * indicate this using -EAGAIN.
1241 			 * We replay the request because the device may
1242 			 * go away in the mean time.
1243 			 */
1244 			rtnl_unlock();
1245 			request_module("sch_%s", name);
1246 			rtnl_lock();
1247 			ops = qdisc_lookup_ops(kind);
1248 			if (ops != NULL) {
1249 				/* We will try again qdisc_lookup_ops,
1250 				 * so don't keep a reference.
1251 				 */
1252 				module_put(ops->owner);
1253 				err = -EAGAIN;
1254 				goto err_out;
1255 			}
1256 		}
1257 	}
1258 #endif
1259 
1260 	err = -ENOENT;
1261 	if (!ops) {
1262 		NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1263 		goto err_out;
1264 	}
1265 
1266 	sch = qdisc_alloc(dev_queue, ops, extack);
1267 	if (IS_ERR(sch)) {
1268 		err = PTR_ERR(sch);
1269 		goto err_out2;
1270 	}
1271 
1272 	sch->parent = parent;
1273 
1274 	if (handle == TC_H_INGRESS) {
1275 		if (!(sch->flags & TCQ_F_INGRESS)) {
1276 			NL_SET_ERR_MSG(extack,
1277 				       "Specified parent ID is reserved for ingress and clsact Qdiscs");
1278 			err = -EINVAL;
1279 			goto err_out3;
1280 		}
1281 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
1282 	} else {
1283 		if (handle == 0) {
1284 			handle = qdisc_alloc_handle(dev);
1285 			if (handle == 0) {
1286 				NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1287 				err = -ENOSPC;
1288 				goto err_out3;
1289 			}
1290 		}
1291 		if (!netif_is_multiqueue(dev))
1292 			sch->flags |= TCQ_F_ONETXQUEUE;
1293 	}
1294 
1295 	sch->handle = handle;
1296 
1297 	/* This exist to keep backward compatible with a userspace
1298 	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1299 	 * facility on older kernels by setting tx_queue_len=0 (prior
1300 	 * to qdisc init), and then forgot to reinit tx_queue_len
1301 	 * before again attaching a qdisc.
1302 	 */
1303 	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1304 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1305 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1306 	}
1307 
1308 	err = qdisc_block_indexes_set(sch, tca, extack);
1309 	if (err)
1310 		goto err_out3;
1311 
1312 	if (tca[TCA_STAB]) {
1313 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1314 		if (IS_ERR(stab)) {
1315 			err = PTR_ERR(stab);
1316 			goto err_out3;
1317 		}
1318 		rcu_assign_pointer(sch->stab, stab);
1319 	}
1320 
1321 	if (ops->init) {
1322 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1323 		if (err != 0)
1324 			goto err_out4;
1325 	}
1326 
1327 	if (tca[TCA_RATE]) {
1328 		err = -EOPNOTSUPP;
1329 		if (sch->flags & TCQ_F_MQROOT) {
1330 			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1331 			goto err_out4;
1332 		}
1333 
1334 		err = gen_new_estimator(&sch->bstats,
1335 					sch->cpu_bstats,
1336 					&sch->rate_est,
1337 					NULL,
1338 					true,
1339 					tca[TCA_RATE]);
1340 		if (err) {
1341 			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1342 			goto err_out4;
1343 		}
1344 	}
1345 
1346 	qdisc_hash_add(sch, false);
1347 	trace_qdisc_create(ops, dev, parent);
1348 
1349 	return sch;
1350 
1351 err_out4:
1352 	/* Even if ops->init() failed, we call ops->destroy()
1353 	 * like qdisc_create_dflt().
1354 	 */
1355 	if (ops->destroy)
1356 		ops->destroy(sch);
1357 	qdisc_put_stab(rtnl_dereference(sch->stab));
1358 err_out3:
1359 	lockdep_unregister_key(&sch->root_lock_key);
1360 	netdev_put(dev, &sch->dev_tracker);
1361 	qdisc_free(sch);
1362 err_out2:
1363 	module_put(ops->owner);
1364 err_out:
1365 	*errp = err;
1366 	return NULL;
1367 }
1368 
qdisc_change(struct Qdisc * sch,struct nlattr ** tca,struct netlink_ext_ack * extack)1369 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1370 			struct netlink_ext_ack *extack)
1371 {
1372 	struct qdisc_size_table *ostab, *stab = NULL;
1373 	int err = 0;
1374 
1375 	if (tca[TCA_OPTIONS]) {
1376 		if (!sch->ops->change) {
1377 			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1378 			return -EINVAL;
1379 		}
1380 		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1381 			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1382 			return -EOPNOTSUPP;
1383 		}
1384 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1385 		if (err)
1386 			return err;
1387 	}
1388 
1389 	if (tca[TCA_STAB]) {
1390 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1391 		if (IS_ERR(stab))
1392 			return PTR_ERR(stab);
1393 	}
1394 
1395 	ostab = rtnl_dereference(sch->stab);
1396 	rcu_assign_pointer(sch->stab, stab);
1397 	qdisc_put_stab(ostab);
1398 
1399 	if (tca[TCA_RATE]) {
1400 		/* NB: ignores errors from replace_estimator
1401 		   because change can't be undone. */
1402 		if (sch->flags & TCQ_F_MQROOT)
1403 			goto out;
1404 		gen_replace_estimator(&sch->bstats,
1405 				      sch->cpu_bstats,
1406 				      &sch->rate_est,
1407 				      NULL,
1408 				      true,
1409 				      tca[TCA_RATE]);
1410 	}
1411 out:
1412 	return 0;
1413 }
1414 
1415 struct check_loop_arg {
1416 	struct qdisc_walker	w;
1417 	struct Qdisc		*p;
1418 	int			depth;
1419 };
1420 
1421 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1422 			 struct qdisc_walker *w);
1423 
check_loop(struct Qdisc * q,struct Qdisc * p,int depth)1424 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1425 {
1426 	struct check_loop_arg	arg;
1427 
1428 	if (q->ops->cl_ops == NULL)
1429 		return 0;
1430 
1431 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1432 	arg.w.fn = check_loop_fn;
1433 	arg.depth = depth;
1434 	arg.p = p;
1435 	q->ops->cl_ops->walk(q, &arg.w);
1436 	return arg.w.stop ? -ELOOP : 0;
1437 }
1438 
1439 static int
check_loop_fn(struct Qdisc * q,unsigned long cl,struct qdisc_walker * w)1440 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1441 {
1442 	struct Qdisc *leaf;
1443 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1444 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1445 
1446 	leaf = cops->leaf(q, cl);
1447 	if (leaf) {
1448 		if (leaf == arg->p || arg->depth > 7)
1449 			return -ELOOP;
1450 		return check_loop(leaf, arg->p, arg->depth + 1);
1451 	}
1452 	return 0;
1453 }
1454 
1455 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1456 	[TCA_KIND]		= { .type = NLA_STRING },
1457 	[TCA_RATE]		= { .type = NLA_BINARY,
1458 				    .len = sizeof(struct tc_estimator) },
1459 	[TCA_STAB]		= { .type = NLA_NESTED },
1460 	[TCA_DUMP_INVISIBLE]	= { .type = NLA_FLAG },
1461 	[TCA_CHAIN]		= { .type = NLA_U32 },
1462 	[TCA_INGRESS_BLOCK]	= { .type = NLA_U32 },
1463 	[TCA_EGRESS_BLOCK]	= { .type = NLA_U32 },
1464 };
1465 
1466 /*
1467  * Delete/get qdisc.
1468  */
1469 
tc_get_qdisc(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1470 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1471 			struct netlink_ext_ack *extack)
1472 {
1473 	struct net *net = sock_net(skb->sk);
1474 	struct tcmsg *tcm = nlmsg_data(n);
1475 	struct nlattr *tca[TCA_MAX + 1];
1476 	struct net_device *dev;
1477 	u32 clid;
1478 	struct Qdisc *q = NULL;
1479 	struct Qdisc *p = NULL;
1480 	int err;
1481 
1482 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1483 				     rtm_tca_policy, extack);
1484 	if (err < 0)
1485 		return err;
1486 
1487 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1488 	if (!dev)
1489 		return -ENODEV;
1490 
1491 	clid = tcm->tcm_parent;
1492 	if (clid) {
1493 		if (clid != TC_H_ROOT) {
1494 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1495 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1496 				if (!p) {
1497 					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1498 					return -ENOENT;
1499 				}
1500 				q = qdisc_leaf(p, clid);
1501 			} else if (dev_ingress_queue(dev)) {
1502 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1503 			}
1504 		} else {
1505 			q = rtnl_dereference(dev->qdisc);
1506 		}
1507 		if (!q) {
1508 			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1509 			return -ENOENT;
1510 		}
1511 
1512 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1513 			NL_SET_ERR_MSG(extack, "Invalid handle");
1514 			return -EINVAL;
1515 		}
1516 	} else {
1517 		q = qdisc_lookup(dev, tcm->tcm_handle);
1518 		if (!q) {
1519 			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1520 			return -ENOENT;
1521 		}
1522 	}
1523 
1524 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1525 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1526 		return -EINVAL;
1527 	}
1528 
1529 	if (n->nlmsg_type == RTM_DELQDISC) {
1530 		if (!clid) {
1531 			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1532 			return -EINVAL;
1533 		}
1534 		if (q->handle == 0) {
1535 			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1536 			return -ENOENT;
1537 		}
1538 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1539 		if (err != 0)
1540 			return err;
1541 	} else {
1542 		qdisc_notify(net, skb, n, clid, NULL, q, NULL);
1543 	}
1544 	return 0;
1545 }
1546 
req_create_or_replace(struct nlmsghdr * n)1547 static bool req_create_or_replace(struct nlmsghdr *n)
1548 {
1549 	return (n->nlmsg_flags & NLM_F_CREATE &&
1550 		n->nlmsg_flags & NLM_F_REPLACE);
1551 }
1552 
req_create_exclusive(struct nlmsghdr * n)1553 static bool req_create_exclusive(struct nlmsghdr *n)
1554 {
1555 	return (n->nlmsg_flags & NLM_F_CREATE &&
1556 		n->nlmsg_flags & NLM_F_EXCL);
1557 }
1558 
req_change(struct nlmsghdr * n)1559 static bool req_change(struct nlmsghdr *n)
1560 {
1561 	return (!(n->nlmsg_flags & NLM_F_CREATE) &&
1562 		!(n->nlmsg_flags & NLM_F_REPLACE) &&
1563 		!(n->nlmsg_flags & NLM_F_EXCL));
1564 }
1565 
1566 /*
1567  * Create/change qdisc.
1568  */
tc_modify_qdisc(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1569 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1570 			   struct netlink_ext_ack *extack)
1571 {
1572 	struct net *net = sock_net(skb->sk);
1573 	struct tcmsg *tcm;
1574 	struct nlattr *tca[TCA_MAX + 1];
1575 	struct net_device *dev;
1576 	u32 clid;
1577 	struct Qdisc *q, *p;
1578 	int err;
1579 
1580 replay:
1581 	/* Reinit, just in case something touches this. */
1582 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1583 				     rtm_tca_policy, extack);
1584 	if (err < 0)
1585 		return err;
1586 
1587 	tcm = nlmsg_data(n);
1588 	clid = tcm->tcm_parent;
1589 	q = p = NULL;
1590 
1591 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1592 	if (!dev)
1593 		return -ENODEV;
1594 
1595 
1596 	if (clid) {
1597 		if (clid != TC_H_ROOT) {
1598 			if (clid != TC_H_INGRESS) {
1599 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1600 				if (!p) {
1601 					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1602 					return -ENOENT;
1603 				}
1604 				q = qdisc_leaf(p, clid);
1605 			} else if (dev_ingress_queue_create(dev)) {
1606 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1607 			}
1608 		} else {
1609 			q = rtnl_dereference(dev->qdisc);
1610 		}
1611 
1612 		/* It may be default qdisc, ignore it */
1613 		if (q && q->handle == 0)
1614 			q = NULL;
1615 
1616 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1617 			if (tcm->tcm_handle) {
1618 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1619 					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1620 					return -EEXIST;
1621 				}
1622 				if (TC_H_MIN(tcm->tcm_handle)) {
1623 					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1624 					return -EINVAL;
1625 				}
1626 				q = qdisc_lookup(dev, tcm->tcm_handle);
1627 				if (!q)
1628 					goto create_n_graft;
1629 				if (q->parent != tcm->tcm_parent) {
1630 					NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
1631 					return -EINVAL;
1632 				}
1633 				if (n->nlmsg_flags & NLM_F_EXCL) {
1634 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1635 					return -EEXIST;
1636 				}
1637 				if (tca[TCA_KIND] &&
1638 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1639 					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1640 					return -EINVAL;
1641 				}
1642 				if (q->flags & TCQ_F_INGRESS) {
1643 					NL_SET_ERR_MSG(extack,
1644 						       "Cannot regraft ingress or clsact Qdiscs");
1645 					return -EINVAL;
1646 				}
1647 				if (q == p ||
1648 				    (p && check_loop(q, p, 0))) {
1649 					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1650 					return -ELOOP;
1651 				}
1652 				if (clid == TC_H_INGRESS) {
1653 					NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1654 					return -EINVAL;
1655 				}
1656 				qdisc_refcount_inc(q);
1657 				goto graft;
1658 			} else {
1659 				if (!q)
1660 					goto create_n_graft;
1661 
1662 				/* This magic test requires explanation.
1663 				 *
1664 				 *   We know, that some child q is already
1665 				 *   attached to this parent and have choice:
1666 				 *   1) change it or 2) create/graft new one.
1667 				 *   If the requested qdisc kind is different
1668 				 *   than the existing one, then we choose graft.
1669 				 *   If they are the same then this is "change"
1670 				 *   operation - just let it fallthrough..
1671 				 *
1672 				 *   1. We are allowed to create/graft only
1673 				 *   if the request is explicitly stating
1674 				 *   "please create if it doesn't exist".
1675 				 *
1676 				 *   2. If the request is to exclusive create
1677 				 *   then the qdisc tcm_handle is not expected
1678 				 *   to exist, so that we choose create/graft too.
1679 				 *
1680 				 *   3. The last case is when no flags are set.
1681 				 *   This will happen when for example tc
1682 				 *   utility issues a "change" command.
1683 				 *   Alas, it is sort of hole in API, we
1684 				 *   cannot decide what to do unambiguously.
1685 				 *   For now we select create/graft.
1686 				 */
1687 				if (tca[TCA_KIND] &&
1688 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1689 					if (req_create_or_replace(n) ||
1690 					    req_create_exclusive(n))
1691 						goto create_n_graft;
1692 					else if (req_change(n))
1693 						goto create_n_graft2;
1694 				}
1695 			}
1696 		}
1697 	} else {
1698 		if (!tcm->tcm_handle) {
1699 			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1700 			return -EINVAL;
1701 		}
1702 		q = qdisc_lookup(dev, tcm->tcm_handle);
1703 	}
1704 
1705 	/* Change qdisc parameters */
1706 	if (!q) {
1707 		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1708 		return -ENOENT;
1709 	}
1710 	if (n->nlmsg_flags & NLM_F_EXCL) {
1711 		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1712 		return -EEXIST;
1713 	}
1714 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1715 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1716 		return -EINVAL;
1717 	}
1718 	err = qdisc_change(q, tca, extack);
1719 	if (err == 0)
1720 		qdisc_notify(net, skb, n, clid, NULL, q, extack);
1721 	return err;
1722 
1723 create_n_graft:
1724 	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1725 		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1726 		return -ENOENT;
1727 	}
1728 create_n_graft2:
1729 	if (clid == TC_H_INGRESS) {
1730 		if (dev_ingress_queue(dev)) {
1731 			q = qdisc_create(dev, dev_ingress_queue(dev),
1732 					 tcm->tcm_parent, tcm->tcm_parent,
1733 					 tca, &err, extack);
1734 		} else {
1735 			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1736 			err = -ENOENT;
1737 		}
1738 	} else {
1739 		struct netdev_queue *dev_queue;
1740 
1741 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1742 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1743 		else if (p)
1744 			dev_queue = p->dev_queue;
1745 		else
1746 			dev_queue = netdev_get_tx_queue(dev, 0);
1747 
1748 		q = qdisc_create(dev, dev_queue,
1749 				 tcm->tcm_parent, tcm->tcm_handle,
1750 				 tca, &err, extack);
1751 	}
1752 	if (q == NULL) {
1753 		if (err == -EAGAIN)
1754 			goto replay;
1755 		return err;
1756 	}
1757 
1758 graft:
1759 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1760 	if (err) {
1761 		if (q)
1762 			qdisc_put(q);
1763 		return err;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
tc_dump_qdisc_root(struct Qdisc * root,struct sk_buff * skb,struct netlink_callback * cb,int * q_idx_p,int s_q_idx,bool recur,bool dump_invisible)1769 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1770 			      struct netlink_callback *cb,
1771 			      int *q_idx_p, int s_q_idx, bool recur,
1772 			      bool dump_invisible)
1773 {
1774 	int ret = 0, q_idx = *q_idx_p;
1775 	struct Qdisc *q;
1776 	int b;
1777 
1778 	if (!root)
1779 		return 0;
1780 
1781 	q = root;
1782 	if (q_idx < s_q_idx) {
1783 		q_idx++;
1784 	} else {
1785 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1786 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1787 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1788 				  RTM_NEWQDISC, NULL) <= 0)
1789 			goto done;
1790 		q_idx++;
1791 	}
1792 
1793 	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1794 	 * itself has already been dumped.
1795 	 *
1796 	 * If we've already dumped the top-level (ingress) qdisc above and the global
1797 	 * qdisc hashtable, we don't want to hit it again
1798 	 */
1799 	if (!qdisc_dev(root) || !recur)
1800 		goto out;
1801 
1802 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1803 		if (q_idx < s_q_idx) {
1804 			q_idx++;
1805 			continue;
1806 		}
1807 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1808 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1809 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1810 				  RTM_NEWQDISC, NULL) <= 0)
1811 			goto done;
1812 		q_idx++;
1813 	}
1814 
1815 out:
1816 	*q_idx_p = q_idx;
1817 	return ret;
1818 done:
1819 	ret = -1;
1820 	goto out;
1821 }
1822 
tc_dump_qdisc(struct sk_buff * skb,struct netlink_callback * cb)1823 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1824 {
1825 	struct net *net = sock_net(skb->sk);
1826 	int idx, q_idx;
1827 	int s_idx, s_q_idx;
1828 	struct net_device *dev;
1829 	const struct nlmsghdr *nlh = cb->nlh;
1830 	struct nlattr *tca[TCA_MAX + 1];
1831 	int err;
1832 
1833 	s_idx = cb->args[0];
1834 	s_q_idx = q_idx = cb->args[1];
1835 
1836 	idx = 0;
1837 	ASSERT_RTNL();
1838 
1839 	err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1840 				     rtm_tca_policy, cb->extack);
1841 	if (err < 0)
1842 		return err;
1843 
1844 	for_each_netdev(net, dev) {
1845 		struct netdev_queue *dev_queue;
1846 
1847 		if (idx < s_idx)
1848 			goto cont;
1849 		if (idx > s_idx)
1850 			s_q_idx = 0;
1851 		q_idx = 0;
1852 
1853 		if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1854 				       skb, cb, &q_idx, s_q_idx,
1855 				       true, tca[TCA_DUMP_INVISIBLE]) < 0)
1856 			goto done;
1857 
1858 		dev_queue = dev_ingress_queue(dev);
1859 		if (dev_queue &&
1860 		    tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1861 				       skb, cb, &q_idx, s_q_idx, false,
1862 				       tca[TCA_DUMP_INVISIBLE]) < 0)
1863 			goto done;
1864 
1865 cont:
1866 		idx++;
1867 	}
1868 
1869 done:
1870 	cb->args[0] = idx;
1871 	cb->args[1] = q_idx;
1872 
1873 	return skb->len;
1874 }
1875 
1876 
1877 
1878 /************************************************
1879  *	Traffic classes manipulation.		*
1880  ************************************************/
1881 
tc_fill_tclass(struct sk_buff * skb,struct Qdisc * q,unsigned long cl,u32 portid,u32 seq,u16 flags,int event,struct netlink_ext_ack * extack)1882 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1883 			  unsigned long cl, u32 portid, u32 seq, u16 flags,
1884 			  int event, struct netlink_ext_ack *extack)
1885 {
1886 	struct tcmsg *tcm;
1887 	struct nlmsghdr  *nlh;
1888 	unsigned char *b = skb_tail_pointer(skb);
1889 	struct gnet_dump d;
1890 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1891 
1892 	cond_resched();
1893 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1894 	if (!nlh)
1895 		goto out_nlmsg_trim;
1896 	tcm = nlmsg_data(nlh);
1897 	tcm->tcm_family = AF_UNSPEC;
1898 	tcm->tcm__pad1 = 0;
1899 	tcm->tcm__pad2 = 0;
1900 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1901 	tcm->tcm_parent = q->handle;
1902 	tcm->tcm_handle = q->handle;
1903 	tcm->tcm_info = 0;
1904 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1905 		goto nla_put_failure;
1906 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1907 		goto nla_put_failure;
1908 
1909 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1910 					 NULL, &d, TCA_PAD) < 0)
1911 		goto nla_put_failure;
1912 
1913 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1914 		goto nla_put_failure;
1915 
1916 	if (gnet_stats_finish_copy(&d) < 0)
1917 		goto nla_put_failure;
1918 
1919 	if (extack && extack->_msg &&
1920 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1921 		goto out_nlmsg_trim;
1922 
1923 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1924 
1925 	return skb->len;
1926 
1927 out_nlmsg_trim:
1928 nla_put_failure:
1929 	nlmsg_trim(skb, b);
1930 	return -1;
1931 }
1932 
tclass_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct Qdisc * q,unsigned long cl,int event,struct netlink_ext_ack * extack)1933 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1934 			 struct nlmsghdr *n, struct Qdisc *q,
1935 			 unsigned long cl, int event, struct netlink_ext_ack *extack)
1936 {
1937 	struct sk_buff *skb;
1938 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1939 
1940 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1941 	if (!skb)
1942 		return -ENOBUFS;
1943 
1944 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1945 		kfree_skb(skb);
1946 		return -EINVAL;
1947 	}
1948 
1949 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1950 			      n->nlmsg_flags & NLM_F_ECHO);
1951 }
1952 
tclass_del_notify(struct net * net,const struct Qdisc_class_ops * cops,struct sk_buff * oskb,struct nlmsghdr * n,struct Qdisc * q,unsigned long cl,struct netlink_ext_ack * extack)1953 static int tclass_del_notify(struct net *net,
1954 			     const struct Qdisc_class_ops *cops,
1955 			     struct sk_buff *oskb, struct nlmsghdr *n,
1956 			     struct Qdisc *q, unsigned long cl,
1957 			     struct netlink_ext_ack *extack)
1958 {
1959 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1960 	struct sk_buff *skb;
1961 	int err = 0;
1962 
1963 	if (!cops->delete)
1964 		return -EOPNOTSUPP;
1965 
1966 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1967 	if (!skb)
1968 		return -ENOBUFS;
1969 
1970 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1971 			   RTM_DELTCLASS, extack) < 0) {
1972 		kfree_skb(skb);
1973 		return -EINVAL;
1974 	}
1975 
1976 	err = cops->delete(q, cl, extack);
1977 	if (err) {
1978 		kfree_skb(skb);
1979 		return err;
1980 	}
1981 
1982 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1983 			     n->nlmsg_flags & NLM_F_ECHO);
1984 	return err;
1985 }
1986 
1987 #ifdef CONFIG_NET_CLS
1988 
1989 struct tcf_bind_args {
1990 	struct tcf_walker w;
1991 	unsigned long base;
1992 	unsigned long cl;
1993 	u32 classid;
1994 };
1995 
tcf_node_bind(struct tcf_proto * tp,void * n,struct tcf_walker * arg)1996 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1997 {
1998 	struct tcf_bind_args *a = (void *)arg;
1999 
2000 	if (n && tp->ops->bind_class) {
2001 		struct Qdisc *q = tcf_block_q(tp->chain->block);
2002 
2003 		sch_tree_lock(q);
2004 		tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
2005 		sch_tree_unlock(q);
2006 	}
2007 	return 0;
2008 }
2009 
2010 struct tc_bind_class_args {
2011 	struct qdisc_walker w;
2012 	unsigned long new_cl;
2013 	u32 portid;
2014 	u32 clid;
2015 };
2016 
tc_bind_class_walker(struct Qdisc * q,unsigned long cl,struct qdisc_walker * w)2017 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
2018 				struct qdisc_walker *w)
2019 {
2020 	struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
2021 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2022 	struct tcf_block *block;
2023 	struct tcf_chain *chain;
2024 
2025 	block = cops->tcf_block(q, cl, NULL);
2026 	if (!block)
2027 		return 0;
2028 	for (chain = tcf_get_next_chain(block, NULL);
2029 	     chain;
2030 	     chain = tcf_get_next_chain(block, chain)) {
2031 		struct tcf_proto *tp;
2032 
2033 		for (tp = tcf_get_next_proto(chain, NULL);
2034 		     tp; tp = tcf_get_next_proto(chain, tp)) {
2035 			struct tcf_bind_args arg = {};
2036 
2037 			arg.w.fn = tcf_node_bind;
2038 			arg.classid = a->clid;
2039 			arg.base = cl;
2040 			arg.cl = a->new_cl;
2041 			tp->ops->walk(tp, &arg.w, true);
2042 		}
2043 	}
2044 
2045 	return 0;
2046 }
2047 
tc_bind_tclass(struct Qdisc * q,u32 portid,u32 clid,unsigned long new_cl)2048 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2049 			   unsigned long new_cl)
2050 {
2051 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2052 	struct tc_bind_class_args args = {};
2053 
2054 	if (!cops->tcf_block)
2055 		return;
2056 	args.portid = portid;
2057 	args.clid = clid;
2058 	args.new_cl = new_cl;
2059 	args.w.fn = tc_bind_class_walker;
2060 	q->ops->cl_ops->walk(q, &args.w);
2061 }
2062 
2063 #else
2064 
tc_bind_tclass(struct Qdisc * q,u32 portid,u32 clid,unsigned long new_cl)2065 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2066 			   unsigned long new_cl)
2067 {
2068 }
2069 
2070 #endif
2071 
tc_ctl_tclass(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2072 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2073 			 struct netlink_ext_ack *extack)
2074 {
2075 	struct net *net = sock_net(skb->sk);
2076 	struct tcmsg *tcm = nlmsg_data(n);
2077 	struct nlattr *tca[TCA_MAX + 1];
2078 	struct net_device *dev;
2079 	struct Qdisc *q = NULL;
2080 	const struct Qdisc_class_ops *cops;
2081 	unsigned long cl = 0;
2082 	unsigned long new_cl;
2083 	u32 portid;
2084 	u32 clid;
2085 	u32 qid;
2086 	int err;
2087 
2088 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2089 				     rtm_tca_policy, extack);
2090 	if (err < 0)
2091 		return err;
2092 
2093 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2094 	if (!dev)
2095 		return -ENODEV;
2096 
2097 	/*
2098 	   parent == TC_H_UNSPEC - unspecified parent.
2099 	   parent == TC_H_ROOT   - class is root, which has no parent.
2100 	   parent == X:0	 - parent is root class.
2101 	   parent == X:Y	 - parent is a node in hierarchy.
2102 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
2103 
2104 	   handle == 0:0	 - generate handle from kernel pool.
2105 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
2106 	   handle == X:Y	 - clear.
2107 	   handle == X:0	 - root class.
2108 	 */
2109 
2110 	/* Step 1. Determine qdisc handle X:0 */
2111 
2112 	portid = tcm->tcm_parent;
2113 	clid = tcm->tcm_handle;
2114 	qid = TC_H_MAJ(clid);
2115 
2116 	if (portid != TC_H_ROOT) {
2117 		u32 qid1 = TC_H_MAJ(portid);
2118 
2119 		if (qid && qid1) {
2120 			/* If both majors are known, they must be identical. */
2121 			if (qid != qid1)
2122 				return -EINVAL;
2123 		} else if (qid1) {
2124 			qid = qid1;
2125 		} else if (qid == 0)
2126 			qid = rtnl_dereference(dev->qdisc)->handle;
2127 
2128 		/* Now qid is genuine qdisc handle consistent
2129 		 * both with parent and child.
2130 		 *
2131 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2132 		 */
2133 		if (portid)
2134 			portid = TC_H_MAKE(qid, portid);
2135 	} else {
2136 		if (qid == 0)
2137 			qid = rtnl_dereference(dev->qdisc)->handle;
2138 	}
2139 
2140 	/* OK. Locate qdisc */
2141 	q = qdisc_lookup(dev, qid);
2142 	if (!q)
2143 		return -ENOENT;
2144 
2145 	/* An check that it supports classes */
2146 	cops = q->ops->cl_ops;
2147 	if (cops == NULL)
2148 		return -EINVAL;
2149 
2150 	/* Now try to get class */
2151 	if (clid == 0) {
2152 		if (portid == TC_H_ROOT)
2153 			clid = qid;
2154 	} else
2155 		clid = TC_H_MAKE(qid, clid);
2156 
2157 	if (clid)
2158 		cl = cops->find(q, clid);
2159 
2160 	if (cl == 0) {
2161 		err = -ENOENT;
2162 		if (n->nlmsg_type != RTM_NEWTCLASS ||
2163 		    !(n->nlmsg_flags & NLM_F_CREATE))
2164 			goto out;
2165 	} else {
2166 		switch (n->nlmsg_type) {
2167 		case RTM_NEWTCLASS:
2168 			err = -EEXIST;
2169 			if (n->nlmsg_flags & NLM_F_EXCL)
2170 				goto out;
2171 			break;
2172 		case RTM_DELTCLASS:
2173 			err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2174 			/* Unbind the class with flilters with 0 */
2175 			tc_bind_tclass(q, portid, clid, 0);
2176 			goto out;
2177 		case RTM_GETTCLASS:
2178 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
2179 			goto out;
2180 		default:
2181 			err = -EINVAL;
2182 			goto out;
2183 		}
2184 	}
2185 
2186 	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2187 		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2188 		return -EOPNOTSUPP;
2189 	}
2190 
2191 	/* Prevent creation of traffic classes with classid TC_H_ROOT */
2192 	if (clid == TC_H_ROOT) {
2193 		NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT");
2194 		return -EINVAL;
2195 	}
2196 
2197 	new_cl = cl;
2198 	err = -EOPNOTSUPP;
2199 	if (cops->change)
2200 		err = cops->change(q, clid, portid, tca, &new_cl, extack);
2201 	if (err == 0) {
2202 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2203 		/* We just create a new class, need to do reverse binding. */
2204 		if (cl != new_cl)
2205 			tc_bind_tclass(q, portid, clid, new_cl);
2206 	}
2207 out:
2208 	return err;
2209 }
2210 
2211 struct qdisc_dump_args {
2212 	struct qdisc_walker	w;
2213 	struct sk_buff		*skb;
2214 	struct netlink_callback	*cb;
2215 };
2216 
qdisc_class_dump(struct Qdisc * q,unsigned long cl,struct qdisc_walker * arg)2217 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2218 			    struct qdisc_walker *arg)
2219 {
2220 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2221 
2222 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2223 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2224 			      RTM_NEWTCLASS, NULL);
2225 }
2226 
tc_dump_tclass_qdisc(struct Qdisc * q,struct sk_buff * skb,struct tcmsg * tcm,struct netlink_callback * cb,int * t_p,int s_t)2227 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2228 				struct tcmsg *tcm, struct netlink_callback *cb,
2229 				int *t_p, int s_t)
2230 {
2231 	struct qdisc_dump_args arg;
2232 
2233 	if (tc_qdisc_dump_ignore(q, false) ||
2234 	    *t_p < s_t || !q->ops->cl_ops ||
2235 	    (tcm->tcm_parent &&
2236 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2237 		(*t_p)++;
2238 		return 0;
2239 	}
2240 	if (*t_p > s_t)
2241 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2242 	arg.w.fn = qdisc_class_dump;
2243 	arg.skb = skb;
2244 	arg.cb = cb;
2245 	arg.w.stop  = 0;
2246 	arg.w.skip = cb->args[1];
2247 	arg.w.count = 0;
2248 	q->ops->cl_ops->walk(q, &arg.w);
2249 	cb->args[1] = arg.w.count;
2250 	if (arg.w.stop)
2251 		return -1;
2252 	(*t_p)++;
2253 	return 0;
2254 }
2255 
tc_dump_tclass_root(struct Qdisc * root,struct sk_buff * skb,struct tcmsg * tcm,struct netlink_callback * cb,int * t_p,int s_t,bool recur)2256 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2257 			       struct tcmsg *tcm, struct netlink_callback *cb,
2258 			       int *t_p, int s_t, bool recur)
2259 {
2260 	struct Qdisc *q;
2261 	int b;
2262 
2263 	if (!root)
2264 		return 0;
2265 
2266 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2267 		return -1;
2268 
2269 	if (!qdisc_dev(root) || !recur)
2270 		return 0;
2271 
2272 	if (tcm->tcm_parent) {
2273 		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2274 		if (q && q != root &&
2275 		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2276 			return -1;
2277 		return 0;
2278 	}
2279 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2280 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2281 			return -1;
2282 	}
2283 
2284 	return 0;
2285 }
2286 
tc_dump_tclass(struct sk_buff * skb,struct netlink_callback * cb)2287 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2288 {
2289 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2290 	struct net *net = sock_net(skb->sk);
2291 	struct netdev_queue *dev_queue;
2292 	struct net_device *dev;
2293 	int t, s_t;
2294 
2295 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2296 		return 0;
2297 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2298 	if (!dev)
2299 		return 0;
2300 
2301 	s_t = cb->args[0];
2302 	t = 0;
2303 
2304 	if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2305 				skb, tcm, cb, &t, s_t, true) < 0)
2306 		goto done;
2307 
2308 	dev_queue = dev_ingress_queue(dev);
2309 	if (dev_queue &&
2310 	    tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2311 				skb, tcm, cb, &t, s_t, false) < 0)
2312 		goto done;
2313 
2314 done:
2315 	cb->args[0] = t;
2316 
2317 	dev_put(dev);
2318 	return skb->len;
2319 }
2320 
2321 #ifdef CONFIG_PROC_FS
psched_show(struct seq_file * seq,void * v)2322 static int psched_show(struct seq_file *seq, void *v)
2323 {
2324 	seq_printf(seq, "%08x %08x %08x %08x\n",
2325 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2326 		   1000000,
2327 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2328 
2329 	return 0;
2330 }
2331 
psched_net_init(struct net * net)2332 static int __net_init psched_net_init(struct net *net)
2333 {
2334 	struct proc_dir_entry *e;
2335 
2336 	e = proc_create_single("psched", 0, net->proc_net, psched_show);
2337 	if (e == NULL)
2338 		return -ENOMEM;
2339 
2340 	return 0;
2341 }
2342 
psched_net_exit(struct net * net)2343 static void __net_exit psched_net_exit(struct net *net)
2344 {
2345 	remove_proc_entry("psched", net->proc_net);
2346 }
2347 #else
psched_net_init(struct net * net)2348 static int __net_init psched_net_init(struct net *net)
2349 {
2350 	return 0;
2351 }
2352 
psched_net_exit(struct net * net)2353 static void __net_exit psched_net_exit(struct net *net)
2354 {
2355 }
2356 #endif
2357 
2358 static struct pernet_operations psched_net_ops = {
2359 	.init = psched_net_init,
2360 	.exit = psched_net_exit,
2361 };
2362 
2363 #if IS_ENABLED(CONFIG_RETPOLINE)
2364 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2365 #endif
2366 
pktsched_init(void)2367 static int __init pktsched_init(void)
2368 {
2369 	int err;
2370 
2371 	err = register_pernet_subsys(&psched_net_ops);
2372 	if (err) {
2373 		pr_err("pktsched_init: "
2374 		       "cannot initialize per netns operations\n");
2375 		return err;
2376 	}
2377 
2378 	register_qdisc(&pfifo_fast_ops);
2379 	register_qdisc(&pfifo_qdisc_ops);
2380 	register_qdisc(&bfifo_qdisc_ops);
2381 	register_qdisc(&pfifo_head_drop_qdisc_ops);
2382 	register_qdisc(&mq_qdisc_ops);
2383 	register_qdisc(&noqueue_qdisc_ops);
2384 
2385 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2386 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2387 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2388 		      0);
2389 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2390 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2391 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2392 		      0);
2393 
2394 	tc_wrapper_init();
2395 
2396 	return 0;
2397 }
2398 
2399 subsys_initcall(pktsched_init);
2400