xref: /openbmc/linux/net/sched/sch_api.c (revision fd37b884)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_api.c	Packet scheduler API.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Fixes:
8  *
9  * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
10  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
11  * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
12  */
13 
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kmod.h>
24 #include <linux/list.h>
25 #include <linux/hrtimer.h>
26 #include <linux/slab.h>
27 #include <linux/hashtable.h>
28 
29 #include <net/net_namespace.h>
30 #include <net/sock.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_wrapper.h>
35 
36 #include <trace/events/qdisc.h>
37 
38 /*
39 
40    Short review.
41    -------------
42 
43    This file consists of two interrelated parts:
44 
45    1. queueing disciplines manager frontend.
46    2. traffic classes manager frontend.
47 
48    Generally, queueing discipline ("qdisc") is a black box,
49    which is able to enqueue packets and to dequeue them (when
50    device is ready to send something) in order and at times
51    determined by algorithm hidden in it.
52 
53    qdisc's are divided to two categories:
54    - "queues", which have no internal structure visible from outside.
55    - "schedulers", which split all the packets to "traffic classes",
56      using "packet classifiers" (look at cls_api.c)
57 
58    In turn, classes may have child qdiscs (as rule, queues)
59    attached to them etc. etc. etc.
60 
61    The goal of the routines in this file is to translate
62    information supplied by user in the form of handles
63    to more intelligible for kernel form, to make some sanity
64    checks and part of work, which is common to all qdiscs
65    and to provide rtnetlink notifications.
66 
67    All real intelligent work is done inside qdisc modules.
68 
69 
70 
71    Every discipline has two major routines: enqueue and dequeue.
72 
73    ---dequeue
74 
75    dequeue usually returns a skb to send. It is allowed to return NULL,
76    but it does not mean that queue is empty, it just means that
77    discipline does not want to send anything this time.
78    Queue is really empty if q->q.qlen == 0.
79    For complicated disciplines with multiple queues q->q is not
80    real packet queue, but however q->q.qlen must be valid.
81 
82    ---enqueue
83 
84    enqueue returns 0, if packet was enqueued successfully.
85    If packet (this one or another one) was dropped, it returns
86    not zero error code.
87    NET_XMIT_DROP 	- this packet dropped
88      Expected action: do not backoff, but wait until queue will clear.
89    NET_XMIT_CN	 	- probably this packet enqueued, but another one dropped.
90      Expected action: backoff or ignore
91 
92    Auxiliary routines:
93 
94    ---peek
95 
96    like dequeue but without removing a packet from the queue
97 
98    ---reset
99 
100    returns qdisc to initial state: purge all buffers, clear all
101    timers, counters (except for statistics) etc.
102 
103    ---init
104 
105    initializes newly created qdisc.
106 
107    ---destroy
108 
109    destroys resources allocated by init and during lifetime of qdisc.
110 
111    ---change
112 
113    changes qdisc parameters.
114  */
115 
116 /* Protects list of registered TC modules. It is pure SMP lock. */
117 static DEFINE_RWLOCK(qdisc_mod_lock);
118 
119 
120 /************************************************
121  *	Queueing disciplines manipulation.	*
122  ************************************************/
123 
124 
125 /* The list of all installed queueing disciplines. */
126 
127 static struct Qdisc_ops *qdisc_base;
128 
129 /* Register/unregister queueing discipline */
130 
131 int register_qdisc(struct Qdisc_ops *qops)
132 {
133 	struct Qdisc_ops *q, **qp;
134 	int rc = -EEXIST;
135 
136 	write_lock(&qdisc_mod_lock);
137 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
138 		if (!strcmp(qops->id, q->id))
139 			goto out;
140 
141 	if (qops->enqueue == NULL)
142 		qops->enqueue = noop_qdisc_ops.enqueue;
143 	if (qops->peek == NULL) {
144 		if (qops->dequeue == NULL)
145 			qops->peek = noop_qdisc_ops.peek;
146 		else
147 			goto out_einval;
148 	}
149 	if (qops->dequeue == NULL)
150 		qops->dequeue = noop_qdisc_ops.dequeue;
151 
152 	if (qops->cl_ops) {
153 		const struct Qdisc_class_ops *cops = qops->cl_ops;
154 
155 		if (!(cops->find && cops->walk && cops->leaf))
156 			goto out_einval;
157 
158 		if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
159 			goto out_einval;
160 	}
161 
162 	qops->next = NULL;
163 	*qp = qops;
164 	rc = 0;
165 out:
166 	write_unlock(&qdisc_mod_lock);
167 	return rc;
168 
169 out_einval:
170 	rc = -EINVAL;
171 	goto out;
172 }
173 EXPORT_SYMBOL(register_qdisc);
174 
175 void unregister_qdisc(struct Qdisc_ops *qops)
176 {
177 	struct Qdisc_ops *q, **qp;
178 	int err = -ENOENT;
179 
180 	write_lock(&qdisc_mod_lock);
181 	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
182 		if (q == qops)
183 			break;
184 	if (q) {
185 		*qp = q->next;
186 		q->next = NULL;
187 		err = 0;
188 	}
189 	write_unlock(&qdisc_mod_lock);
190 
191 	WARN(err, "unregister qdisc(%s) failed\n", qops->id);
192 }
193 EXPORT_SYMBOL(unregister_qdisc);
194 
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
197 {
198 	read_lock(&qdisc_mod_lock);
199 	strscpy(name, default_qdisc_ops->id, len);
200 	read_unlock(&qdisc_mod_lock);
201 }
202 
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
204 {
205 	struct Qdisc_ops *q = NULL;
206 
207 	for (q = qdisc_base; q; q = q->next) {
208 		if (!strcmp(name, q->id)) {
209 			if (!try_module_get(q->owner))
210 				q = NULL;
211 			break;
212 		}
213 	}
214 
215 	return q;
216 }
217 
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
220 {
221 	const struct Qdisc_ops *ops;
222 
223 	if (!capable(CAP_NET_ADMIN))
224 		return -EPERM;
225 
226 	write_lock(&qdisc_mod_lock);
227 	ops = qdisc_lookup_default(name);
228 	if (!ops) {
229 		/* Not found, drop lock and try to load module */
230 		write_unlock(&qdisc_mod_lock);
231 		request_module("sch_%s", name);
232 		write_lock(&qdisc_mod_lock);
233 
234 		ops = qdisc_lookup_default(name);
235 	}
236 
237 	if (ops) {
238 		/* Set new default */
239 		module_put(default_qdisc_ops->owner);
240 		default_qdisc_ops = ops;
241 	}
242 	write_unlock(&qdisc_mod_lock);
243 
244 	return ops ? 0 : -ENOENT;
245 }
246 
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
250 {
251 	return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
252 }
253 late_initcall(sch_default_qdisc);
254 #endif
255 
256 /* We know handle. Find qdisc among all qdisc's attached to device
257  * (root qdisc, all its children, children of children etc.)
258  * Note: caller either uses rtnl or rcu_read_lock()
259  */
260 
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
262 {
263 	struct Qdisc *q;
264 
265 	if (!qdisc_dev(root))
266 		return (root->handle == handle ? root : NULL);
267 
268 	if (!(root->flags & TCQ_F_BUILTIN) &&
269 	    root->handle == handle)
270 		return root;
271 
272 	hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
273 				   lockdep_rtnl_is_held()) {
274 		if (q->handle == handle)
275 			return q;
276 	}
277 	return NULL;
278 }
279 
280 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 {
282 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 		ASSERT_RTNL();
284 		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 		if (invisible)
286 			q->flags |= TCQ_F_INVISIBLE;
287 	}
288 }
289 EXPORT_SYMBOL(qdisc_hash_add);
290 
291 void qdisc_hash_del(struct Qdisc *q)
292 {
293 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 		ASSERT_RTNL();
295 		hash_del_rcu(&q->hash);
296 	}
297 }
298 EXPORT_SYMBOL(qdisc_hash_del);
299 
300 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301 {
302 	struct Qdisc *q;
303 
304 	if (!handle)
305 		return NULL;
306 	q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
307 	if (q)
308 		goto out;
309 
310 	if (dev_ingress_queue(dev))
311 		q = qdisc_match_from_root(
312 			rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
313 			handle);
314 out:
315 	return q;
316 }
317 
318 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319 {
320 	struct netdev_queue *nq;
321 	struct Qdisc *q;
322 
323 	if (!handle)
324 		return NULL;
325 	q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
326 	if (q)
327 		goto out;
328 
329 	nq = dev_ingress_queue_rcu(dev);
330 	if (nq)
331 		q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
332 					  handle);
333 out:
334 	return q;
335 }
336 
337 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338 {
339 	unsigned long cl;
340 	const struct Qdisc_class_ops *cops = p->ops->cl_ops;
341 
342 	if (cops == NULL)
343 		return NULL;
344 	cl = cops->find(p, classid);
345 
346 	if (cl == 0)
347 		return NULL;
348 	return cops->leaf(p, cl);
349 }
350 
351 /* Find queueing discipline by name */
352 
353 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
354 {
355 	struct Qdisc_ops *q = NULL;
356 
357 	if (kind) {
358 		read_lock(&qdisc_mod_lock);
359 		for (q = qdisc_base; q; q = q->next) {
360 			if (nla_strcmp(kind, q->id) == 0) {
361 				if (!try_module_get(q->owner))
362 					q = NULL;
363 				break;
364 			}
365 		}
366 		read_unlock(&qdisc_mod_lock);
367 	}
368 	return q;
369 }
370 
371 /* The linklayer setting were not transferred from iproute2, in older
372  * versions, and the rate tables lookup systems have been dropped in
373  * the kernel. To keep backward compatible with older iproute2 tc
374  * utils, we detect the linklayer setting by detecting if the rate
375  * table were modified.
376  *
377  * For linklayer ATM table entries, the rate table will be aligned to
378  * 48 bytes, thus some table entries will contain the same value.  The
379  * mpu (min packet unit) is also encoded into the old rate table, thus
380  * starting from the mpu, we find low and high table entries for
381  * mapping this cell.  If these entries contain the same value, when
382  * the rate tables have been modified for linklayer ATM.
383  *
384  * This is done by rounding mpu to the nearest 48 bytes cell/entry,
385  * and then roundup to the next cell, calc the table entry one below,
386  * and compare.
387  */
388 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
389 {
390 	int low       = roundup(r->mpu, 48);
391 	int high      = roundup(low+1, 48);
392 	int cell_low  = low >> r->cell_log;
393 	int cell_high = (high >> r->cell_log) - 1;
394 
395 	/* rtab is too inaccurate at rates > 100Mbit/s */
396 	if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
397 		pr_debug("TC linklayer: Giving up ATM detection\n");
398 		return TC_LINKLAYER_ETHERNET;
399 	}
400 
401 	if ((cell_high > cell_low) && (cell_high < 256)
402 	    && (rtab[cell_low] == rtab[cell_high])) {
403 		pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
404 			 cell_low, cell_high, rtab[cell_high]);
405 		return TC_LINKLAYER_ATM;
406 	}
407 	return TC_LINKLAYER_ETHERNET;
408 }
409 
410 static struct qdisc_rate_table *qdisc_rtab_list;
411 
412 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
413 					struct nlattr *tab,
414 					struct netlink_ext_ack *extack)
415 {
416 	struct qdisc_rate_table *rtab;
417 
418 	if (tab == NULL || r->rate == 0 ||
419 	    r->cell_log == 0 || r->cell_log >= 32 ||
420 	    nla_len(tab) != TC_RTAB_SIZE) {
421 		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 		return NULL;
423 	}
424 
425 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
426 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
427 		    !memcmp(&rtab->data, nla_data(tab), 1024)) {
428 			rtab->refcnt++;
429 			return rtab;
430 		}
431 	}
432 
433 	rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 	if (rtab) {
435 		rtab->rate = *r;
436 		rtab->refcnt = 1;
437 		memcpy(rtab->data, nla_data(tab), 1024);
438 		if (r->linklayer == TC_LINKLAYER_UNAWARE)
439 			r->linklayer = __detect_linklayer(r, rtab->data);
440 		rtab->next = qdisc_rtab_list;
441 		qdisc_rtab_list = rtab;
442 	} else {
443 		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 	}
445 	return rtab;
446 }
447 EXPORT_SYMBOL(qdisc_get_rtab);
448 
449 void qdisc_put_rtab(struct qdisc_rate_table *tab)
450 {
451 	struct qdisc_rate_table *rtab, **rtabp;
452 
453 	if (!tab || --tab->refcnt)
454 		return;
455 
456 	for (rtabp = &qdisc_rtab_list;
457 	     (rtab = *rtabp) != NULL;
458 	     rtabp = &rtab->next) {
459 		if (rtab == tab) {
460 			*rtabp = rtab->next;
461 			kfree(rtab);
462 			return;
463 		}
464 	}
465 }
466 EXPORT_SYMBOL(qdisc_put_rtab);
467 
468 static LIST_HEAD(qdisc_stab_list);
469 
470 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
471 	[TCA_STAB_BASE]	= { .len = sizeof(struct tc_sizespec) },
472 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
473 };
474 
475 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
476 					       struct netlink_ext_ack *extack)
477 {
478 	struct nlattr *tb[TCA_STAB_MAX + 1];
479 	struct qdisc_size_table *stab;
480 	struct tc_sizespec *s;
481 	unsigned int tsize = 0;
482 	u16 *tab = NULL;
483 	int err;
484 
485 	err = nla_parse_nested_deprecated(tb, TCA_STAB_MAX, opt, stab_policy,
486 					  extack);
487 	if (err < 0)
488 		return ERR_PTR(err);
489 	if (!tb[TCA_STAB_BASE]) {
490 		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
491 		return ERR_PTR(-EINVAL);
492 	}
493 
494 	s = nla_data(tb[TCA_STAB_BASE]);
495 
496 	if (s->tsize > 0) {
497 		if (!tb[TCA_STAB_DATA]) {
498 			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
499 			return ERR_PTR(-EINVAL);
500 		}
501 		tab = nla_data(tb[TCA_STAB_DATA]);
502 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
503 	}
504 
505 	if (tsize != s->tsize || (!tab && tsize > 0)) {
506 		NL_SET_ERR_MSG(extack, "Invalid size of size table");
507 		return ERR_PTR(-EINVAL);
508 	}
509 
510 	list_for_each_entry(stab, &qdisc_stab_list, list) {
511 		if (memcmp(&stab->szopts, s, sizeof(*s)))
512 			continue;
513 		if (tsize > 0 &&
514 		    memcmp(stab->data, tab, flex_array_size(stab, data, tsize)))
515 			continue;
516 		stab->refcnt++;
517 		return stab;
518 	}
519 
520 	if (s->size_log > STAB_SIZE_LOG_MAX ||
521 	    s->cell_log > STAB_SIZE_LOG_MAX) {
522 		NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
523 		return ERR_PTR(-EINVAL);
524 	}
525 
526 	stab = kmalloc(struct_size(stab, data, tsize), GFP_KERNEL);
527 	if (!stab)
528 		return ERR_PTR(-ENOMEM);
529 
530 	stab->refcnt = 1;
531 	stab->szopts = *s;
532 	if (tsize > 0)
533 		memcpy(stab->data, tab, flex_array_size(stab, data, tsize));
534 
535 	list_add_tail(&stab->list, &qdisc_stab_list);
536 
537 	return stab;
538 }
539 
540 void qdisc_put_stab(struct qdisc_size_table *tab)
541 {
542 	if (!tab)
543 		return;
544 
545 	if (--tab->refcnt == 0) {
546 		list_del(&tab->list);
547 		kfree_rcu(tab, rcu);
548 	}
549 }
550 EXPORT_SYMBOL(qdisc_put_stab);
551 
552 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
553 {
554 	struct nlattr *nest;
555 
556 	nest = nla_nest_start_noflag(skb, TCA_STAB);
557 	if (nest == NULL)
558 		goto nla_put_failure;
559 	if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
560 		goto nla_put_failure;
561 	nla_nest_end(skb, nest);
562 
563 	return skb->len;
564 
565 nla_put_failure:
566 	return -1;
567 }
568 
569 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
570 			       const struct qdisc_size_table *stab)
571 {
572 	int pkt_len, slot;
573 
574 	pkt_len = skb->len + stab->szopts.overhead;
575 	if (unlikely(!stab->szopts.tsize))
576 		goto out;
577 
578 	slot = pkt_len + stab->szopts.cell_align;
579 	if (unlikely(slot < 0))
580 		slot = 0;
581 
582 	slot >>= stab->szopts.cell_log;
583 	if (likely(slot < stab->szopts.tsize))
584 		pkt_len = stab->data[slot];
585 	else
586 		pkt_len = stab->data[stab->szopts.tsize - 1] *
587 				(slot / stab->szopts.tsize) +
588 				stab->data[slot % stab->szopts.tsize];
589 
590 	pkt_len <<= stab->szopts.size_log;
591 out:
592 	if (unlikely(pkt_len < 1))
593 		pkt_len = 1;
594 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
595 }
596 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
597 
598 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
599 {
600 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
601 		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
602 			txt, qdisc->ops->id, qdisc->handle >> 16);
603 		qdisc->flags |= TCQ_F_WARN_NONWC;
604 	}
605 }
606 EXPORT_SYMBOL(qdisc_warn_nonwc);
607 
608 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
609 {
610 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
611 						 timer);
612 
613 	rcu_read_lock();
614 	__netif_schedule(qdisc_root(wd->qdisc));
615 	rcu_read_unlock();
616 
617 	return HRTIMER_NORESTART;
618 }
619 
620 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
621 				 clockid_t clockid)
622 {
623 	hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
624 	wd->timer.function = qdisc_watchdog;
625 	wd->qdisc = qdisc;
626 }
627 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
628 
629 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
630 {
631 	qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
632 }
633 EXPORT_SYMBOL(qdisc_watchdog_init);
634 
635 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
636 				      u64 delta_ns)
637 {
638 	bool deactivated;
639 
640 	rcu_read_lock();
641 	deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
642 			       &qdisc_root_sleeping(wd->qdisc)->state);
643 	rcu_read_unlock();
644 	if (deactivated)
645 		return;
646 
647 	if (hrtimer_is_queued(&wd->timer)) {
648 		u64 softexpires;
649 
650 		softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer));
651 		/* If timer is already set in [expires, expires + delta_ns],
652 		 * do not reprogram it.
653 		 */
654 		if (softexpires - expires <= delta_ns)
655 			return;
656 	}
657 
658 	hrtimer_start_range_ns(&wd->timer,
659 			       ns_to_ktime(expires),
660 			       delta_ns,
661 			       HRTIMER_MODE_ABS_PINNED);
662 }
663 EXPORT_SYMBOL(qdisc_watchdog_schedule_range_ns);
664 
665 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
666 {
667 	hrtimer_cancel(&wd->timer);
668 }
669 EXPORT_SYMBOL(qdisc_watchdog_cancel);
670 
671 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
672 {
673 	struct hlist_head *h;
674 	unsigned int i;
675 
676 	h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
677 
678 	if (h != NULL) {
679 		for (i = 0; i < n; i++)
680 			INIT_HLIST_HEAD(&h[i]);
681 	}
682 	return h;
683 }
684 
685 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
686 {
687 	struct Qdisc_class_common *cl;
688 	struct hlist_node *next;
689 	struct hlist_head *nhash, *ohash;
690 	unsigned int nsize, nmask, osize;
691 	unsigned int i, h;
692 
693 	/* Rehash when load factor exceeds 0.75 */
694 	if (clhash->hashelems * 4 <= clhash->hashsize * 3)
695 		return;
696 	nsize = clhash->hashsize * 2;
697 	nmask = nsize - 1;
698 	nhash = qdisc_class_hash_alloc(nsize);
699 	if (nhash == NULL)
700 		return;
701 
702 	ohash = clhash->hash;
703 	osize = clhash->hashsize;
704 
705 	sch_tree_lock(sch);
706 	for (i = 0; i < osize; i++) {
707 		hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
708 			h = qdisc_class_hash(cl->classid, nmask);
709 			hlist_add_head(&cl->hnode, &nhash[h]);
710 		}
711 	}
712 	clhash->hash     = nhash;
713 	clhash->hashsize = nsize;
714 	clhash->hashmask = nmask;
715 	sch_tree_unlock(sch);
716 
717 	kvfree(ohash);
718 }
719 EXPORT_SYMBOL(qdisc_class_hash_grow);
720 
721 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
722 {
723 	unsigned int size = 4;
724 
725 	clhash->hash = qdisc_class_hash_alloc(size);
726 	if (!clhash->hash)
727 		return -ENOMEM;
728 	clhash->hashsize  = size;
729 	clhash->hashmask  = size - 1;
730 	clhash->hashelems = 0;
731 	return 0;
732 }
733 EXPORT_SYMBOL(qdisc_class_hash_init);
734 
735 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
736 {
737 	kvfree(clhash->hash);
738 }
739 EXPORT_SYMBOL(qdisc_class_hash_destroy);
740 
741 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
742 			     struct Qdisc_class_common *cl)
743 {
744 	unsigned int h;
745 
746 	INIT_HLIST_NODE(&cl->hnode);
747 	h = qdisc_class_hash(cl->classid, clhash->hashmask);
748 	hlist_add_head(&cl->hnode, &clhash->hash[h]);
749 	clhash->hashelems++;
750 }
751 EXPORT_SYMBOL(qdisc_class_hash_insert);
752 
753 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
754 			     struct Qdisc_class_common *cl)
755 {
756 	hlist_del(&cl->hnode);
757 	clhash->hashelems--;
758 }
759 EXPORT_SYMBOL(qdisc_class_hash_remove);
760 
761 /* Allocate an unique handle from space managed by kernel
762  * Possible range is [8000-FFFF]:0000 (0x8000 values)
763  */
764 static u32 qdisc_alloc_handle(struct net_device *dev)
765 {
766 	int i = 0x8000;
767 	static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
768 
769 	do {
770 		autohandle += TC_H_MAKE(0x10000U, 0);
771 		if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
772 			autohandle = TC_H_MAKE(0x80000000U, 0);
773 		if (!qdisc_lookup(dev, autohandle))
774 			return autohandle;
775 		cond_resched();
776 	} while	(--i > 0);
777 
778 	return 0;
779 }
780 
781 void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
782 {
783 	bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
784 	const struct Qdisc_class_ops *cops;
785 	unsigned long cl;
786 	u32 parentid;
787 	bool notify;
788 	int drops;
789 
790 	if (n == 0 && len == 0)
791 		return;
792 	drops = max_t(int, n, 0);
793 	rcu_read_lock();
794 	while ((parentid = sch->parent)) {
795 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
796 			break;
797 
798 		if (sch->flags & TCQ_F_NOPARENT)
799 			break;
800 		/* Notify parent qdisc only if child qdisc becomes empty.
801 		 *
802 		 * If child was empty even before update then backlog
803 		 * counter is screwed and we skip notification because
804 		 * parent class is already passive.
805 		 *
806 		 * If the original child was offloaded then it is allowed
807 		 * to be seem as empty, so the parent is notified anyway.
808 		 */
809 		notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
810 						       !qdisc_is_offloaded);
811 		/* TODO: perform the search on a per txq basis */
812 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
813 		if (sch == NULL) {
814 			WARN_ON_ONCE(parentid != TC_H_ROOT);
815 			break;
816 		}
817 		cops = sch->ops->cl_ops;
818 		if (notify && cops->qlen_notify) {
819 			cl = cops->find(sch, parentid);
820 			cops->qlen_notify(sch, cl);
821 		}
822 		sch->q.qlen -= n;
823 		sch->qstats.backlog -= len;
824 		__qdisc_qstats_drop(sch, drops);
825 	}
826 	rcu_read_unlock();
827 }
828 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
829 
830 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
831 			      void *type_data)
832 {
833 	struct net_device *dev = qdisc_dev(sch);
834 	int err;
835 
836 	sch->flags &= ~TCQ_F_OFFLOADED;
837 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
838 		return 0;
839 
840 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
841 	if (err == -EOPNOTSUPP)
842 		return 0;
843 
844 	if (!err)
845 		sch->flags |= TCQ_F_OFFLOADED;
846 
847 	return err;
848 }
849 EXPORT_SYMBOL(qdisc_offload_dump_helper);
850 
851 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
852 				struct Qdisc *new, struct Qdisc *old,
853 				enum tc_setup_type type, void *type_data,
854 				struct netlink_ext_ack *extack)
855 {
856 	bool any_qdisc_is_offloaded;
857 	int err;
858 
859 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
860 		return;
861 
862 	err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
863 
864 	/* Don't report error if the graft is part of destroy operation. */
865 	if (!err || !new || new == &noop_qdisc)
866 		return;
867 
868 	/* Don't report error if the parent, the old child and the new
869 	 * one are not offloaded.
870 	 */
871 	any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
872 	any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
873 	any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
874 
875 	if (any_qdisc_is_offloaded)
876 		NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
877 }
878 EXPORT_SYMBOL(qdisc_offload_graft_helper);
879 
880 void qdisc_offload_query_caps(struct net_device *dev,
881 			      enum tc_setup_type type,
882 			      void *caps, size_t caps_len)
883 {
884 	const struct net_device_ops *ops = dev->netdev_ops;
885 	struct tc_query_caps_base base = {
886 		.type = type,
887 		.caps = caps,
888 	};
889 
890 	memset(caps, 0, caps_len);
891 
892 	if (ops->ndo_setup_tc)
893 		ops->ndo_setup_tc(dev, TC_QUERY_CAPS, &base);
894 }
895 EXPORT_SYMBOL(qdisc_offload_query_caps);
896 
897 static void qdisc_offload_graft_root(struct net_device *dev,
898 				     struct Qdisc *new, struct Qdisc *old,
899 				     struct netlink_ext_ack *extack)
900 {
901 	struct tc_root_qopt_offload graft_offload = {
902 		.command	= TC_ROOT_GRAFT,
903 		.handle		= new ? new->handle : 0,
904 		.ingress	= (new && new->flags & TCQ_F_INGRESS) ||
905 				  (old && old->flags & TCQ_F_INGRESS),
906 	};
907 
908 	qdisc_offload_graft_helper(dev, NULL, new, old,
909 				   TC_SETUP_ROOT_QDISC, &graft_offload, extack);
910 }
911 
912 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
913 			 u32 portid, u32 seq, u16 flags, int event,
914 			 struct netlink_ext_ack *extack)
915 {
916 	struct gnet_stats_basic_sync __percpu *cpu_bstats = NULL;
917 	struct gnet_stats_queue __percpu *cpu_qstats = NULL;
918 	struct tcmsg *tcm;
919 	struct nlmsghdr  *nlh;
920 	unsigned char *b = skb_tail_pointer(skb);
921 	struct gnet_dump d;
922 	struct qdisc_size_table *stab;
923 	u32 block_index;
924 	__u32 qlen;
925 
926 	cond_resched();
927 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
928 	if (!nlh)
929 		goto out_nlmsg_trim;
930 	tcm = nlmsg_data(nlh);
931 	tcm->tcm_family = AF_UNSPEC;
932 	tcm->tcm__pad1 = 0;
933 	tcm->tcm__pad2 = 0;
934 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
935 	tcm->tcm_parent = clid;
936 	tcm->tcm_handle = q->handle;
937 	tcm->tcm_info = refcount_read(&q->refcnt);
938 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
939 		goto nla_put_failure;
940 	if (q->ops->ingress_block_get) {
941 		block_index = q->ops->ingress_block_get(q);
942 		if (block_index &&
943 		    nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
944 			goto nla_put_failure;
945 	}
946 	if (q->ops->egress_block_get) {
947 		block_index = q->ops->egress_block_get(q);
948 		if (block_index &&
949 		    nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
950 			goto nla_put_failure;
951 	}
952 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
953 		goto nla_put_failure;
954 	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
955 		goto nla_put_failure;
956 	qlen = qdisc_qlen_sum(q);
957 
958 	stab = rtnl_dereference(q->stab);
959 	if (stab && qdisc_dump_stab(skb, stab) < 0)
960 		goto nla_put_failure;
961 
962 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
963 					 NULL, &d, TCA_PAD) < 0)
964 		goto nla_put_failure;
965 
966 	if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
967 		goto nla_put_failure;
968 
969 	if (qdisc_is_percpu_stats(q)) {
970 		cpu_bstats = q->cpu_bstats;
971 		cpu_qstats = q->cpu_qstats;
972 	}
973 
974 	if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats, true) < 0 ||
975 	    gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
976 	    gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
977 		goto nla_put_failure;
978 
979 	if (gnet_stats_finish_copy(&d) < 0)
980 		goto nla_put_failure;
981 
982 	if (extack && extack->_msg &&
983 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
984 		goto out_nlmsg_trim;
985 
986 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
987 
988 	return skb->len;
989 
990 out_nlmsg_trim:
991 nla_put_failure:
992 	nlmsg_trim(skb, b);
993 	return -1;
994 }
995 
996 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
997 {
998 	if (q->flags & TCQ_F_BUILTIN)
999 		return true;
1000 	if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1001 		return true;
1002 
1003 	return false;
1004 }
1005 
1006 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1007 			struct nlmsghdr *n, u32 clid,
1008 			struct Qdisc *old, struct Qdisc *new,
1009 			struct netlink_ext_ack *extack)
1010 {
1011 	struct sk_buff *skb;
1012 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1013 
1014 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1015 	if (!skb)
1016 		return -ENOBUFS;
1017 
1018 	if (old && !tc_qdisc_dump_ignore(old, false)) {
1019 		if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1020 				  0, RTM_DELQDISC, extack) < 0)
1021 			goto err_out;
1022 	}
1023 	if (new && !tc_qdisc_dump_ignore(new, false)) {
1024 		if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1025 				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC, extack) < 0)
1026 			goto err_out;
1027 	}
1028 
1029 	if (skb->len)
1030 		return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1031 				      n->nlmsg_flags & NLM_F_ECHO);
1032 
1033 err_out:
1034 	kfree_skb(skb);
1035 	return -EINVAL;
1036 }
1037 
1038 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
1039 			       struct nlmsghdr *n, u32 clid,
1040 			       struct Qdisc *old, struct Qdisc *new,
1041 			       struct netlink_ext_ack *extack)
1042 {
1043 	if (new || old)
1044 		qdisc_notify(net, skb, n, clid, old, new, extack);
1045 
1046 	if (old)
1047 		qdisc_put(old);
1048 }
1049 
1050 static void qdisc_clear_nolock(struct Qdisc *sch)
1051 {
1052 	sch->flags &= ~TCQ_F_NOLOCK;
1053 	if (!(sch->flags & TCQ_F_CPUSTATS))
1054 		return;
1055 
1056 	free_percpu(sch->cpu_bstats);
1057 	free_percpu(sch->cpu_qstats);
1058 	sch->cpu_bstats = NULL;
1059 	sch->cpu_qstats = NULL;
1060 	sch->flags &= ~TCQ_F_CPUSTATS;
1061 }
1062 
1063 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1064  * to device "dev".
1065  *
1066  * When appropriate send a netlink notification using 'skb'
1067  * and "n".
1068  *
1069  * On success, destroy old qdisc.
1070  */
1071 
1072 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1073 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1074 		       struct Qdisc *new, struct Qdisc *old,
1075 		       struct netlink_ext_ack *extack)
1076 {
1077 	struct Qdisc *q = old;
1078 	struct net *net = dev_net(dev);
1079 
1080 	if (parent == NULL) {
1081 		unsigned int i, num_q, ingress;
1082 
1083 		ingress = 0;
1084 		num_q = dev->num_tx_queues;
1085 		if ((q && q->flags & TCQ_F_INGRESS) ||
1086 		    (new && new->flags & TCQ_F_INGRESS)) {
1087 			num_q = 1;
1088 			ingress = 1;
1089 			if (!dev_ingress_queue(dev)) {
1090 				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1091 				return -ENOENT;
1092 			}
1093 		}
1094 
1095 		if (dev->flags & IFF_UP)
1096 			dev_deactivate(dev);
1097 
1098 		qdisc_offload_graft_root(dev, new, old, extack);
1099 
1100 		if (new && new->ops->attach && !ingress)
1101 			goto skip;
1102 
1103 		for (i = 0; i < num_q; i++) {
1104 			struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1105 
1106 			if (!ingress)
1107 				dev_queue = netdev_get_tx_queue(dev, i);
1108 
1109 			old = dev_graft_qdisc(dev_queue, new);
1110 			if (new && i > 0)
1111 				qdisc_refcount_inc(new);
1112 
1113 			if (!ingress)
1114 				qdisc_put(old);
1115 		}
1116 
1117 skip:
1118 		if (!ingress) {
1119 			old = rtnl_dereference(dev->qdisc);
1120 			if (new && !new->ops->attach)
1121 				qdisc_refcount_inc(new);
1122 			rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
1123 
1124 			notify_and_destroy(net, skb, n, classid, old, new, extack);
1125 
1126 			if (new && new->ops->attach)
1127 				new->ops->attach(new);
1128 		} else {
1129 			notify_and_destroy(net, skb, n, classid, old, new, extack);
1130 		}
1131 
1132 		if (dev->flags & IFF_UP)
1133 			dev_activate(dev);
1134 	} else {
1135 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1136 		unsigned long cl;
1137 		int err;
1138 
1139 		/* Only support running class lockless if parent is lockless */
1140 		if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
1141 			qdisc_clear_nolock(new);
1142 
1143 		if (!cops || !cops->graft)
1144 			return -EOPNOTSUPP;
1145 
1146 		cl = cops->find(parent, classid);
1147 		if (!cl) {
1148 			NL_SET_ERR_MSG(extack, "Specified class not found");
1149 			return -ENOENT;
1150 		}
1151 
1152 		if (new && new->ops == &noqueue_qdisc_ops) {
1153 			NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
1154 			return -EINVAL;
1155 		}
1156 
1157 		err = cops->graft(parent, cl, new, &old, extack);
1158 		if (err)
1159 			return err;
1160 		notify_and_destroy(net, skb, n, classid, old, new, extack);
1161 	}
1162 	return 0;
1163 }
1164 
1165 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1166 				   struct netlink_ext_ack *extack)
1167 {
1168 	u32 block_index;
1169 
1170 	if (tca[TCA_INGRESS_BLOCK]) {
1171 		block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1172 
1173 		if (!block_index) {
1174 			NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1175 			return -EINVAL;
1176 		}
1177 		if (!sch->ops->ingress_block_set) {
1178 			NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1179 			return -EOPNOTSUPP;
1180 		}
1181 		sch->ops->ingress_block_set(sch, block_index);
1182 	}
1183 	if (tca[TCA_EGRESS_BLOCK]) {
1184 		block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1185 
1186 		if (!block_index) {
1187 			NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1188 			return -EINVAL;
1189 		}
1190 		if (!sch->ops->egress_block_set) {
1191 			NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1192 			return -EOPNOTSUPP;
1193 		}
1194 		sch->ops->egress_block_set(sch, block_index);
1195 	}
1196 	return 0;
1197 }
1198 
1199 /*
1200    Allocate and initialize new qdisc.
1201 
1202    Parameters are passed via opt.
1203  */
1204 
1205 static struct Qdisc *qdisc_create(struct net_device *dev,
1206 				  struct netdev_queue *dev_queue,
1207 				  u32 parent, u32 handle,
1208 				  struct nlattr **tca, int *errp,
1209 				  struct netlink_ext_ack *extack)
1210 {
1211 	int err;
1212 	struct nlattr *kind = tca[TCA_KIND];
1213 	struct Qdisc *sch;
1214 	struct Qdisc_ops *ops;
1215 	struct qdisc_size_table *stab;
1216 
1217 	ops = qdisc_lookup_ops(kind);
1218 #ifdef CONFIG_MODULES
1219 	if (ops == NULL && kind != NULL) {
1220 		char name[IFNAMSIZ];
1221 		if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
1222 			/* We dropped the RTNL semaphore in order to
1223 			 * perform the module load.  So, even if we
1224 			 * succeeded in loading the module we have to
1225 			 * tell the caller to replay the request.  We
1226 			 * indicate this using -EAGAIN.
1227 			 * We replay the request because the device may
1228 			 * go away in the mean time.
1229 			 */
1230 			rtnl_unlock();
1231 			request_module("sch_%s", name);
1232 			rtnl_lock();
1233 			ops = qdisc_lookup_ops(kind);
1234 			if (ops != NULL) {
1235 				/* We will try again qdisc_lookup_ops,
1236 				 * so don't keep a reference.
1237 				 */
1238 				module_put(ops->owner);
1239 				err = -EAGAIN;
1240 				goto err_out;
1241 			}
1242 		}
1243 	}
1244 #endif
1245 
1246 	err = -ENOENT;
1247 	if (!ops) {
1248 		NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
1249 		goto err_out;
1250 	}
1251 
1252 	sch = qdisc_alloc(dev_queue, ops, extack);
1253 	if (IS_ERR(sch)) {
1254 		err = PTR_ERR(sch);
1255 		goto err_out2;
1256 	}
1257 
1258 	sch->parent = parent;
1259 
1260 	if (handle == TC_H_INGRESS) {
1261 		if (!(sch->flags & TCQ_F_INGRESS)) {
1262 			NL_SET_ERR_MSG(extack,
1263 				       "Specified parent ID is reserved for ingress and clsact Qdiscs");
1264 			err = -EINVAL;
1265 			goto err_out3;
1266 		}
1267 		handle = TC_H_MAKE(TC_H_INGRESS, 0);
1268 	} else {
1269 		if (handle == 0) {
1270 			handle = qdisc_alloc_handle(dev);
1271 			if (handle == 0) {
1272 				NL_SET_ERR_MSG(extack, "Maximum number of qdisc handles was exceeded");
1273 				err = -ENOSPC;
1274 				goto err_out3;
1275 			}
1276 		}
1277 		if (!netif_is_multiqueue(dev))
1278 			sch->flags |= TCQ_F_ONETXQUEUE;
1279 	}
1280 
1281 	sch->handle = handle;
1282 
1283 	/* This exist to keep backward compatible with a userspace
1284 	 * loophole, what allowed userspace to get IFF_NO_QUEUE
1285 	 * facility on older kernels by setting tx_queue_len=0 (prior
1286 	 * to qdisc init), and then forgot to reinit tx_queue_len
1287 	 * before again attaching a qdisc.
1288 	 */
1289 	if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1290 		dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1291 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1292 	}
1293 
1294 	err = qdisc_block_indexes_set(sch, tca, extack);
1295 	if (err)
1296 		goto err_out3;
1297 
1298 	if (tca[TCA_STAB]) {
1299 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1300 		if (IS_ERR(stab)) {
1301 			err = PTR_ERR(stab);
1302 			goto err_out3;
1303 		}
1304 		rcu_assign_pointer(sch->stab, stab);
1305 	}
1306 
1307 	if (ops->init) {
1308 		err = ops->init(sch, tca[TCA_OPTIONS], extack);
1309 		if (err != 0)
1310 			goto err_out4;
1311 	}
1312 
1313 	if (tca[TCA_RATE]) {
1314 		err = -EOPNOTSUPP;
1315 		if (sch->flags & TCQ_F_MQROOT) {
1316 			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1317 			goto err_out4;
1318 		}
1319 
1320 		err = gen_new_estimator(&sch->bstats,
1321 					sch->cpu_bstats,
1322 					&sch->rate_est,
1323 					NULL,
1324 					true,
1325 					tca[TCA_RATE]);
1326 		if (err) {
1327 			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1328 			goto err_out4;
1329 		}
1330 	}
1331 
1332 	qdisc_hash_add(sch, false);
1333 	trace_qdisc_create(ops, dev, parent);
1334 
1335 	return sch;
1336 
1337 err_out4:
1338 	/* Even if ops->init() failed, we call ops->destroy()
1339 	 * like qdisc_create_dflt().
1340 	 */
1341 	if (ops->destroy)
1342 		ops->destroy(sch);
1343 	qdisc_put_stab(rtnl_dereference(sch->stab));
1344 err_out3:
1345 	netdev_put(dev, &sch->dev_tracker);
1346 	qdisc_free(sch);
1347 err_out2:
1348 	module_put(ops->owner);
1349 err_out:
1350 	*errp = err;
1351 	return NULL;
1352 }
1353 
1354 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1355 			struct netlink_ext_ack *extack)
1356 {
1357 	struct qdisc_size_table *ostab, *stab = NULL;
1358 	int err = 0;
1359 
1360 	if (tca[TCA_OPTIONS]) {
1361 		if (!sch->ops->change) {
1362 			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1363 			return -EINVAL;
1364 		}
1365 		if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1366 			NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1367 			return -EOPNOTSUPP;
1368 		}
1369 		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1370 		if (err)
1371 			return err;
1372 	}
1373 
1374 	if (tca[TCA_STAB]) {
1375 		stab = qdisc_get_stab(tca[TCA_STAB], extack);
1376 		if (IS_ERR(stab))
1377 			return PTR_ERR(stab);
1378 	}
1379 
1380 	ostab = rtnl_dereference(sch->stab);
1381 	rcu_assign_pointer(sch->stab, stab);
1382 	qdisc_put_stab(ostab);
1383 
1384 	if (tca[TCA_RATE]) {
1385 		/* NB: ignores errors from replace_estimator
1386 		   because change can't be undone. */
1387 		if (sch->flags & TCQ_F_MQROOT)
1388 			goto out;
1389 		gen_replace_estimator(&sch->bstats,
1390 				      sch->cpu_bstats,
1391 				      &sch->rate_est,
1392 				      NULL,
1393 				      true,
1394 				      tca[TCA_RATE]);
1395 	}
1396 out:
1397 	return 0;
1398 }
1399 
1400 struct check_loop_arg {
1401 	struct qdisc_walker	w;
1402 	struct Qdisc		*p;
1403 	int			depth;
1404 };
1405 
1406 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1407 			 struct qdisc_walker *w);
1408 
1409 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1410 {
1411 	struct check_loop_arg	arg;
1412 
1413 	if (q->ops->cl_ops == NULL)
1414 		return 0;
1415 
1416 	arg.w.stop = arg.w.skip = arg.w.count = 0;
1417 	arg.w.fn = check_loop_fn;
1418 	arg.depth = depth;
1419 	arg.p = p;
1420 	q->ops->cl_ops->walk(q, &arg.w);
1421 	return arg.w.stop ? -ELOOP : 0;
1422 }
1423 
1424 static int
1425 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1426 {
1427 	struct Qdisc *leaf;
1428 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1429 	struct check_loop_arg *arg = (struct check_loop_arg *)w;
1430 
1431 	leaf = cops->leaf(q, cl);
1432 	if (leaf) {
1433 		if (leaf == arg->p || arg->depth > 7)
1434 			return -ELOOP;
1435 		return check_loop(leaf, arg->p, arg->depth + 1);
1436 	}
1437 	return 0;
1438 }
1439 
1440 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1441 	[TCA_KIND]		= { .type = NLA_STRING },
1442 	[TCA_RATE]		= { .type = NLA_BINARY,
1443 				    .len = sizeof(struct tc_estimator) },
1444 	[TCA_STAB]		= { .type = NLA_NESTED },
1445 	[TCA_DUMP_INVISIBLE]	= { .type = NLA_FLAG },
1446 	[TCA_CHAIN]		= { .type = NLA_U32 },
1447 	[TCA_INGRESS_BLOCK]	= { .type = NLA_U32 },
1448 	[TCA_EGRESS_BLOCK]	= { .type = NLA_U32 },
1449 };
1450 
1451 /*
1452  * Delete/get qdisc.
1453  */
1454 
1455 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1456 			struct netlink_ext_ack *extack)
1457 {
1458 	struct net *net = sock_net(skb->sk);
1459 	struct tcmsg *tcm = nlmsg_data(n);
1460 	struct nlattr *tca[TCA_MAX + 1];
1461 	struct net_device *dev;
1462 	u32 clid;
1463 	struct Qdisc *q = NULL;
1464 	struct Qdisc *p = NULL;
1465 	int err;
1466 
1467 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1468 				     rtm_tca_policy, extack);
1469 	if (err < 0)
1470 		return err;
1471 
1472 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1473 	if (!dev)
1474 		return -ENODEV;
1475 
1476 	clid = tcm->tcm_parent;
1477 	if (clid) {
1478 		if (clid != TC_H_ROOT) {
1479 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1480 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1481 				if (!p) {
1482 					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1483 					return -ENOENT;
1484 				}
1485 				q = qdisc_leaf(p, clid);
1486 			} else if (dev_ingress_queue(dev)) {
1487 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1488 			}
1489 		} else {
1490 			q = rtnl_dereference(dev->qdisc);
1491 		}
1492 		if (!q) {
1493 			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1494 			return -ENOENT;
1495 		}
1496 
1497 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1498 			NL_SET_ERR_MSG(extack, "Invalid handle");
1499 			return -EINVAL;
1500 		}
1501 	} else {
1502 		q = qdisc_lookup(dev, tcm->tcm_handle);
1503 		if (!q) {
1504 			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1505 			return -ENOENT;
1506 		}
1507 	}
1508 
1509 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1510 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1511 		return -EINVAL;
1512 	}
1513 
1514 	if (n->nlmsg_type == RTM_DELQDISC) {
1515 		if (!clid) {
1516 			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1517 			return -EINVAL;
1518 		}
1519 		if (q->handle == 0) {
1520 			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1521 			return -ENOENT;
1522 		}
1523 		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1524 		if (err != 0)
1525 			return err;
1526 	} else {
1527 		qdisc_notify(net, skb, n, clid, NULL, q, NULL);
1528 	}
1529 	return 0;
1530 }
1531 
1532 /*
1533  * Create/change qdisc.
1534  */
1535 
1536 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1537 			   struct netlink_ext_ack *extack)
1538 {
1539 	struct net *net = sock_net(skb->sk);
1540 	struct tcmsg *tcm;
1541 	struct nlattr *tca[TCA_MAX + 1];
1542 	struct net_device *dev;
1543 	u32 clid;
1544 	struct Qdisc *q, *p;
1545 	int err;
1546 
1547 replay:
1548 	/* Reinit, just in case something touches this. */
1549 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
1550 				     rtm_tca_policy, extack);
1551 	if (err < 0)
1552 		return err;
1553 
1554 	tcm = nlmsg_data(n);
1555 	clid = tcm->tcm_parent;
1556 	q = p = NULL;
1557 
1558 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1559 	if (!dev)
1560 		return -ENODEV;
1561 
1562 
1563 	if (clid) {
1564 		if (clid != TC_H_ROOT) {
1565 			if (clid != TC_H_INGRESS) {
1566 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
1567 				if (!p) {
1568 					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1569 					return -ENOENT;
1570 				}
1571 				q = qdisc_leaf(p, clid);
1572 			} else if (dev_ingress_queue_create(dev)) {
1573 				q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
1574 			}
1575 		} else {
1576 			q = rtnl_dereference(dev->qdisc);
1577 		}
1578 
1579 		/* It may be default qdisc, ignore it */
1580 		if (q && q->handle == 0)
1581 			q = NULL;
1582 
1583 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1584 			if (tcm->tcm_handle) {
1585 				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1586 					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1587 					return -EEXIST;
1588 				}
1589 				if (TC_H_MIN(tcm->tcm_handle)) {
1590 					NL_SET_ERR_MSG(extack, "Invalid minor handle");
1591 					return -EINVAL;
1592 				}
1593 				q = qdisc_lookup(dev, tcm->tcm_handle);
1594 				if (!q)
1595 					goto create_n_graft;
1596 				if (n->nlmsg_flags & NLM_F_EXCL) {
1597 					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1598 					return -EEXIST;
1599 				}
1600 				if (tca[TCA_KIND] &&
1601 				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1602 					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1603 					return -EINVAL;
1604 				}
1605 				if (q->flags & TCQ_F_INGRESS) {
1606 					NL_SET_ERR_MSG(extack,
1607 						       "Cannot regraft ingress or clsact Qdiscs");
1608 					return -EINVAL;
1609 				}
1610 				if (q == p ||
1611 				    (p && check_loop(q, p, 0))) {
1612 					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1613 					return -ELOOP;
1614 				}
1615 				if (clid == TC_H_INGRESS) {
1616 					NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
1617 					return -EINVAL;
1618 				}
1619 				qdisc_refcount_inc(q);
1620 				goto graft;
1621 			} else {
1622 				if (!q)
1623 					goto create_n_graft;
1624 
1625 				/* This magic test requires explanation.
1626 				 *
1627 				 *   We know, that some child q is already
1628 				 *   attached to this parent and have choice:
1629 				 *   either to change it or to create/graft new one.
1630 				 *
1631 				 *   1. We are allowed to create/graft only
1632 				 *   if CREATE and REPLACE flags are set.
1633 				 *
1634 				 *   2. If EXCL is set, requestor wanted to say,
1635 				 *   that qdisc tcm_handle is not expected
1636 				 *   to exist, so that we choose create/graft too.
1637 				 *
1638 				 *   3. The last case is when no flags are set.
1639 				 *   Alas, it is sort of hole in API, we
1640 				 *   cannot decide what to do unambiguously.
1641 				 *   For now we select create/graft, if
1642 				 *   user gave KIND, which does not match existing.
1643 				 */
1644 				if ((n->nlmsg_flags & NLM_F_CREATE) &&
1645 				    (n->nlmsg_flags & NLM_F_REPLACE) &&
1646 				    ((n->nlmsg_flags & NLM_F_EXCL) ||
1647 				     (tca[TCA_KIND] &&
1648 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
1649 					goto create_n_graft;
1650 			}
1651 		}
1652 	} else {
1653 		if (!tcm->tcm_handle) {
1654 			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1655 			return -EINVAL;
1656 		}
1657 		q = qdisc_lookup(dev, tcm->tcm_handle);
1658 	}
1659 
1660 	/* Change qdisc parameters */
1661 	if (!q) {
1662 		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1663 		return -ENOENT;
1664 	}
1665 	if (n->nlmsg_flags & NLM_F_EXCL) {
1666 		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1667 		return -EEXIST;
1668 	}
1669 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1670 		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1671 		return -EINVAL;
1672 	}
1673 	err = qdisc_change(q, tca, extack);
1674 	if (err == 0)
1675 		qdisc_notify(net, skb, n, clid, NULL, q, extack);
1676 	return err;
1677 
1678 create_n_graft:
1679 	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1680 		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1681 		return -ENOENT;
1682 	}
1683 	if (clid == TC_H_INGRESS) {
1684 		if (dev_ingress_queue(dev)) {
1685 			q = qdisc_create(dev, dev_ingress_queue(dev),
1686 					 tcm->tcm_parent, tcm->tcm_parent,
1687 					 tca, &err, extack);
1688 		} else {
1689 			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1690 			err = -ENOENT;
1691 		}
1692 	} else {
1693 		struct netdev_queue *dev_queue;
1694 
1695 		if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1696 			dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1697 		else if (p)
1698 			dev_queue = p->dev_queue;
1699 		else
1700 			dev_queue = netdev_get_tx_queue(dev, 0);
1701 
1702 		q = qdisc_create(dev, dev_queue,
1703 				 tcm->tcm_parent, tcm->tcm_handle,
1704 				 tca, &err, extack);
1705 	}
1706 	if (q == NULL) {
1707 		if (err == -EAGAIN)
1708 			goto replay;
1709 		return err;
1710 	}
1711 
1712 graft:
1713 	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1714 	if (err) {
1715 		if (q)
1716 			qdisc_put(q);
1717 		return err;
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1724 			      struct netlink_callback *cb,
1725 			      int *q_idx_p, int s_q_idx, bool recur,
1726 			      bool dump_invisible)
1727 {
1728 	int ret = 0, q_idx = *q_idx_p;
1729 	struct Qdisc *q;
1730 	int b;
1731 
1732 	if (!root)
1733 		return 0;
1734 
1735 	q = root;
1736 	if (q_idx < s_q_idx) {
1737 		q_idx++;
1738 	} else {
1739 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1740 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1741 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1742 				  RTM_NEWQDISC, NULL) <= 0)
1743 			goto done;
1744 		q_idx++;
1745 	}
1746 
1747 	/* If dumping singletons, there is no qdisc_dev(root) and the singleton
1748 	 * itself has already been dumped.
1749 	 *
1750 	 * If we've already dumped the top-level (ingress) qdisc above and the global
1751 	 * qdisc hashtable, we don't want to hit it again
1752 	 */
1753 	if (!qdisc_dev(root) || !recur)
1754 		goto out;
1755 
1756 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1757 		if (q_idx < s_q_idx) {
1758 			q_idx++;
1759 			continue;
1760 		}
1761 		if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1762 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1763 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
1764 				  RTM_NEWQDISC, NULL) <= 0)
1765 			goto done;
1766 		q_idx++;
1767 	}
1768 
1769 out:
1770 	*q_idx_p = q_idx;
1771 	return ret;
1772 done:
1773 	ret = -1;
1774 	goto out;
1775 }
1776 
1777 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1778 {
1779 	struct net *net = sock_net(skb->sk);
1780 	int idx, q_idx;
1781 	int s_idx, s_q_idx;
1782 	struct net_device *dev;
1783 	const struct nlmsghdr *nlh = cb->nlh;
1784 	struct nlattr *tca[TCA_MAX + 1];
1785 	int err;
1786 
1787 	s_idx = cb->args[0];
1788 	s_q_idx = q_idx = cb->args[1];
1789 
1790 	idx = 0;
1791 	ASSERT_RTNL();
1792 
1793 	err = nlmsg_parse_deprecated(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1794 				     rtm_tca_policy, cb->extack);
1795 	if (err < 0)
1796 		return err;
1797 
1798 	for_each_netdev(net, dev) {
1799 		struct netdev_queue *dev_queue;
1800 
1801 		if (idx < s_idx)
1802 			goto cont;
1803 		if (idx > s_idx)
1804 			s_q_idx = 0;
1805 		q_idx = 0;
1806 
1807 		if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
1808 				       skb, cb, &q_idx, s_q_idx,
1809 				       true, tca[TCA_DUMP_INVISIBLE]) < 0)
1810 			goto done;
1811 
1812 		dev_queue = dev_ingress_queue(dev);
1813 		if (dev_queue &&
1814 		    tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
1815 				       skb, cb, &q_idx, s_q_idx, false,
1816 				       tca[TCA_DUMP_INVISIBLE]) < 0)
1817 			goto done;
1818 
1819 cont:
1820 		idx++;
1821 	}
1822 
1823 done:
1824 	cb->args[0] = idx;
1825 	cb->args[1] = q_idx;
1826 
1827 	return skb->len;
1828 }
1829 
1830 
1831 
1832 /************************************************
1833  *	Traffic classes manipulation.		*
1834  ************************************************/
1835 
1836 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1837 			  unsigned long cl, u32 portid, u32 seq, u16 flags,
1838 			  int event, struct netlink_ext_ack *extack)
1839 {
1840 	struct tcmsg *tcm;
1841 	struct nlmsghdr  *nlh;
1842 	unsigned char *b = skb_tail_pointer(skb);
1843 	struct gnet_dump d;
1844 	const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1845 
1846 	cond_resched();
1847 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1848 	if (!nlh)
1849 		goto out_nlmsg_trim;
1850 	tcm = nlmsg_data(nlh);
1851 	tcm->tcm_family = AF_UNSPEC;
1852 	tcm->tcm__pad1 = 0;
1853 	tcm->tcm__pad2 = 0;
1854 	tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1855 	tcm->tcm_parent = q->handle;
1856 	tcm->tcm_handle = q->handle;
1857 	tcm->tcm_info = 0;
1858 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
1859 		goto nla_put_failure;
1860 	if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1861 		goto nla_put_failure;
1862 
1863 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1864 					 NULL, &d, TCA_PAD) < 0)
1865 		goto nla_put_failure;
1866 
1867 	if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1868 		goto nla_put_failure;
1869 
1870 	if (gnet_stats_finish_copy(&d) < 0)
1871 		goto nla_put_failure;
1872 
1873 	if (extack && extack->_msg &&
1874 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
1875 		goto out_nlmsg_trim;
1876 
1877 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1878 
1879 	return skb->len;
1880 
1881 out_nlmsg_trim:
1882 nla_put_failure:
1883 	nlmsg_trim(skb, b);
1884 	return -1;
1885 }
1886 
1887 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1888 			 struct nlmsghdr *n, struct Qdisc *q,
1889 			 unsigned long cl, int event, struct netlink_ext_ack *extack)
1890 {
1891 	struct sk_buff *skb;
1892 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1893 
1894 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1895 	if (!skb)
1896 		return -ENOBUFS;
1897 
1898 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event, extack) < 0) {
1899 		kfree_skb(skb);
1900 		return -EINVAL;
1901 	}
1902 
1903 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1904 			      n->nlmsg_flags & NLM_F_ECHO);
1905 }
1906 
1907 static int tclass_del_notify(struct net *net,
1908 			     const struct Qdisc_class_ops *cops,
1909 			     struct sk_buff *oskb, struct nlmsghdr *n,
1910 			     struct Qdisc *q, unsigned long cl,
1911 			     struct netlink_ext_ack *extack)
1912 {
1913 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1914 	struct sk_buff *skb;
1915 	int err = 0;
1916 
1917 	if (!cops->delete)
1918 		return -EOPNOTSUPP;
1919 
1920 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1921 	if (!skb)
1922 		return -ENOBUFS;
1923 
1924 	if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1925 			   RTM_DELTCLASS, extack) < 0) {
1926 		kfree_skb(skb);
1927 		return -EINVAL;
1928 	}
1929 
1930 	err = cops->delete(q, cl, extack);
1931 	if (err) {
1932 		kfree_skb(skb);
1933 		return err;
1934 	}
1935 
1936 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1937 			     n->nlmsg_flags & NLM_F_ECHO);
1938 	return err;
1939 }
1940 
1941 #ifdef CONFIG_NET_CLS
1942 
1943 struct tcf_bind_args {
1944 	struct tcf_walker w;
1945 	unsigned long base;
1946 	unsigned long cl;
1947 	u32 classid;
1948 };
1949 
1950 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1951 {
1952 	struct tcf_bind_args *a = (void *)arg;
1953 
1954 	if (n && tp->ops->bind_class) {
1955 		struct Qdisc *q = tcf_block_q(tp->chain->block);
1956 
1957 		sch_tree_lock(q);
1958 		tp->ops->bind_class(n, a->classid, a->cl, q, a->base);
1959 		sch_tree_unlock(q);
1960 	}
1961 	return 0;
1962 }
1963 
1964 struct tc_bind_class_args {
1965 	struct qdisc_walker w;
1966 	unsigned long new_cl;
1967 	u32 portid;
1968 	u32 clid;
1969 };
1970 
1971 static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl,
1972 				struct qdisc_walker *w)
1973 {
1974 	struct tc_bind_class_args *a = (struct tc_bind_class_args *)w;
1975 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1976 	struct tcf_block *block;
1977 	struct tcf_chain *chain;
1978 
1979 	block = cops->tcf_block(q, cl, NULL);
1980 	if (!block)
1981 		return 0;
1982 	for (chain = tcf_get_next_chain(block, NULL);
1983 	     chain;
1984 	     chain = tcf_get_next_chain(block, chain)) {
1985 		struct tcf_proto *tp;
1986 
1987 		for (tp = tcf_get_next_proto(chain, NULL);
1988 		     tp; tp = tcf_get_next_proto(chain, tp)) {
1989 			struct tcf_bind_args arg = {};
1990 
1991 			arg.w.fn = tcf_node_bind;
1992 			arg.classid = a->clid;
1993 			arg.base = cl;
1994 			arg.cl = a->new_cl;
1995 			tp->ops->walk(tp, &arg.w, true);
1996 		}
1997 	}
1998 
1999 	return 0;
2000 }
2001 
2002 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2003 			   unsigned long new_cl)
2004 {
2005 	const struct Qdisc_class_ops *cops = q->ops->cl_ops;
2006 	struct tc_bind_class_args args = {};
2007 
2008 	if (!cops->tcf_block)
2009 		return;
2010 	args.portid = portid;
2011 	args.clid = clid;
2012 	args.new_cl = new_cl;
2013 	args.w.fn = tc_bind_class_walker;
2014 	q->ops->cl_ops->walk(q, &args.w);
2015 }
2016 
2017 #else
2018 
2019 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
2020 			   unsigned long new_cl)
2021 {
2022 }
2023 
2024 #endif
2025 
2026 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
2027 			 struct netlink_ext_ack *extack)
2028 {
2029 	struct net *net = sock_net(skb->sk);
2030 	struct tcmsg *tcm = nlmsg_data(n);
2031 	struct nlattr *tca[TCA_MAX + 1];
2032 	struct net_device *dev;
2033 	struct Qdisc *q = NULL;
2034 	const struct Qdisc_class_ops *cops;
2035 	unsigned long cl = 0;
2036 	unsigned long new_cl;
2037 	u32 portid;
2038 	u32 clid;
2039 	u32 qid;
2040 	int err;
2041 
2042 	err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
2043 				     rtm_tca_policy, extack);
2044 	if (err < 0)
2045 		return err;
2046 
2047 	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2048 	if (!dev)
2049 		return -ENODEV;
2050 
2051 	/*
2052 	   parent == TC_H_UNSPEC - unspecified parent.
2053 	   parent == TC_H_ROOT   - class is root, which has no parent.
2054 	   parent == X:0	 - parent is root class.
2055 	   parent == X:Y	 - parent is a node in hierarchy.
2056 	   parent == 0:Y	 - parent is X:Y, where X:0 is qdisc.
2057 
2058 	   handle == 0:0	 - generate handle from kernel pool.
2059 	   handle == 0:Y	 - class is X:Y, where X:0 is qdisc.
2060 	   handle == X:Y	 - clear.
2061 	   handle == X:0	 - root class.
2062 	 */
2063 
2064 	/* Step 1. Determine qdisc handle X:0 */
2065 
2066 	portid = tcm->tcm_parent;
2067 	clid = tcm->tcm_handle;
2068 	qid = TC_H_MAJ(clid);
2069 
2070 	if (portid != TC_H_ROOT) {
2071 		u32 qid1 = TC_H_MAJ(portid);
2072 
2073 		if (qid && qid1) {
2074 			/* If both majors are known, they must be identical. */
2075 			if (qid != qid1)
2076 				return -EINVAL;
2077 		} else if (qid1) {
2078 			qid = qid1;
2079 		} else if (qid == 0)
2080 			qid = rtnl_dereference(dev->qdisc)->handle;
2081 
2082 		/* Now qid is genuine qdisc handle consistent
2083 		 * both with parent and child.
2084 		 *
2085 		 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2086 		 */
2087 		if (portid)
2088 			portid = TC_H_MAKE(qid, portid);
2089 	} else {
2090 		if (qid == 0)
2091 			qid = rtnl_dereference(dev->qdisc)->handle;
2092 	}
2093 
2094 	/* OK. Locate qdisc */
2095 	q = qdisc_lookup(dev, qid);
2096 	if (!q)
2097 		return -ENOENT;
2098 
2099 	/* An check that it supports classes */
2100 	cops = q->ops->cl_ops;
2101 	if (cops == NULL)
2102 		return -EINVAL;
2103 
2104 	/* Now try to get class */
2105 	if (clid == 0) {
2106 		if (portid == TC_H_ROOT)
2107 			clid = qid;
2108 	} else
2109 		clid = TC_H_MAKE(qid, clid);
2110 
2111 	if (clid)
2112 		cl = cops->find(q, clid);
2113 
2114 	if (cl == 0) {
2115 		err = -ENOENT;
2116 		if (n->nlmsg_type != RTM_NEWTCLASS ||
2117 		    !(n->nlmsg_flags & NLM_F_CREATE))
2118 			goto out;
2119 	} else {
2120 		switch (n->nlmsg_type) {
2121 		case RTM_NEWTCLASS:
2122 			err = -EEXIST;
2123 			if (n->nlmsg_flags & NLM_F_EXCL)
2124 				goto out;
2125 			break;
2126 		case RTM_DELTCLASS:
2127 			err = tclass_del_notify(net, cops, skb, n, q, cl, extack);
2128 			/* Unbind the class with flilters with 0 */
2129 			tc_bind_tclass(q, portid, clid, 0);
2130 			goto out;
2131 		case RTM_GETTCLASS:
2132 			err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS, extack);
2133 			goto out;
2134 		default:
2135 			err = -EINVAL;
2136 			goto out;
2137 		}
2138 	}
2139 
2140 	if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2141 		NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2142 		return -EOPNOTSUPP;
2143 	}
2144 
2145 	new_cl = cl;
2146 	err = -EOPNOTSUPP;
2147 	if (cops->change)
2148 		err = cops->change(q, clid, portid, tca, &new_cl, extack);
2149 	if (err == 0) {
2150 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS, extack);
2151 		/* We just create a new class, need to do reverse binding. */
2152 		if (cl != new_cl)
2153 			tc_bind_tclass(q, portid, clid, new_cl);
2154 	}
2155 out:
2156 	return err;
2157 }
2158 
2159 struct qdisc_dump_args {
2160 	struct qdisc_walker	w;
2161 	struct sk_buff		*skb;
2162 	struct netlink_callback	*cb;
2163 };
2164 
2165 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2166 			    struct qdisc_walker *arg)
2167 {
2168 	struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2169 
2170 	return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2171 			      a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2172 			      RTM_NEWTCLASS, NULL);
2173 }
2174 
2175 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2176 				struct tcmsg *tcm, struct netlink_callback *cb,
2177 				int *t_p, int s_t)
2178 {
2179 	struct qdisc_dump_args arg;
2180 
2181 	if (tc_qdisc_dump_ignore(q, false) ||
2182 	    *t_p < s_t || !q->ops->cl_ops ||
2183 	    (tcm->tcm_parent &&
2184 	     TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2185 		(*t_p)++;
2186 		return 0;
2187 	}
2188 	if (*t_p > s_t)
2189 		memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2190 	arg.w.fn = qdisc_class_dump;
2191 	arg.skb = skb;
2192 	arg.cb = cb;
2193 	arg.w.stop  = 0;
2194 	arg.w.skip = cb->args[1];
2195 	arg.w.count = 0;
2196 	q->ops->cl_ops->walk(q, &arg.w);
2197 	cb->args[1] = arg.w.count;
2198 	if (arg.w.stop)
2199 		return -1;
2200 	(*t_p)++;
2201 	return 0;
2202 }
2203 
2204 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2205 			       struct tcmsg *tcm, struct netlink_callback *cb,
2206 			       int *t_p, int s_t, bool recur)
2207 {
2208 	struct Qdisc *q;
2209 	int b;
2210 
2211 	if (!root)
2212 		return 0;
2213 
2214 	if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2215 		return -1;
2216 
2217 	if (!qdisc_dev(root) || !recur)
2218 		return 0;
2219 
2220 	if (tcm->tcm_parent) {
2221 		q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2222 		if (q && q != root &&
2223 		    tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2224 			return -1;
2225 		return 0;
2226 	}
2227 	hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2228 		if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2229 			return -1;
2230 	}
2231 
2232 	return 0;
2233 }
2234 
2235 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2236 {
2237 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2238 	struct net *net = sock_net(skb->sk);
2239 	struct netdev_queue *dev_queue;
2240 	struct net_device *dev;
2241 	int t, s_t;
2242 
2243 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2244 		return 0;
2245 	dev = dev_get_by_index(net, tcm->tcm_ifindex);
2246 	if (!dev)
2247 		return 0;
2248 
2249 	s_t = cb->args[0];
2250 	t = 0;
2251 
2252 	if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
2253 				skb, tcm, cb, &t, s_t, true) < 0)
2254 		goto done;
2255 
2256 	dev_queue = dev_ingress_queue(dev);
2257 	if (dev_queue &&
2258 	    tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
2259 				skb, tcm, cb, &t, s_t, false) < 0)
2260 		goto done;
2261 
2262 done:
2263 	cb->args[0] = t;
2264 
2265 	dev_put(dev);
2266 	return skb->len;
2267 }
2268 
2269 #ifdef CONFIG_PROC_FS
2270 static int psched_show(struct seq_file *seq, void *v)
2271 {
2272 	seq_printf(seq, "%08x %08x %08x %08x\n",
2273 		   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2274 		   1000000,
2275 		   (u32)NSEC_PER_SEC / hrtimer_resolution);
2276 
2277 	return 0;
2278 }
2279 
2280 static int __net_init psched_net_init(struct net *net)
2281 {
2282 	struct proc_dir_entry *e;
2283 
2284 	e = proc_create_single("psched", 0, net->proc_net, psched_show);
2285 	if (e == NULL)
2286 		return -ENOMEM;
2287 
2288 	return 0;
2289 }
2290 
2291 static void __net_exit psched_net_exit(struct net *net)
2292 {
2293 	remove_proc_entry("psched", net->proc_net);
2294 }
2295 #else
2296 static int __net_init psched_net_init(struct net *net)
2297 {
2298 	return 0;
2299 }
2300 
2301 static void __net_exit psched_net_exit(struct net *net)
2302 {
2303 }
2304 #endif
2305 
2306 static struct pernet_operations psched_net_ops = {
2307 	.init = psched_net_init,
2308 	.exit = psched_net_exit,
2309 };
2310 
2311 #if IS_ENABLED(CONFIG_RETPOLINE)
2312 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
2313 #endif
2314 
2315 static int __init pktsched_init(void)
2316 {
2317 	int err;
2318 
2319 	err = register_pernet_subsys(&psched_net_ops);
2320 	if (err) {
2321 		pr_err("pktsched_init: "
2322 		       "cannot initialize per netns operations\n");
2323 		return err;
2324 	}
2325 
2326 	register_qdisc(&pfifo_fast_ops);
2327 	register_qdisc(&pfifo_qdisc_ops);
2328 	register_qdisc(&bfifo_qdisc_ops);
2329 	register_qdisc(&pfifo_head_drop_qdisc_ops);
2330 	register_qdisc(&mq_qdisc_ops);
2331 	register_qdisc(&noqueue_qdisc_ops);
2332 
2333 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2334 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2335 	rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2336 		      0);
2337 	rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2338 	rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2339 	rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2340 		      0);
2341 
2342 	tc_wrapper_init();
2343 
2344 	return 0;
2345 }
2346 
2347 subsys_initcall(pktsched_init);
2348