xref: /openbmc/linux/net/sched/sch_generic.c (revision 5f04d506)
1 /*
2  * net/sched/sch_generic.c	Generic packet scheduler routines.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11  *              - Ingress support
12  */
13 
14 #include <linux/bitops.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/init.h>
25 #include <linux/rcupdate.h>
26 #include <linux/list.h>
27 #include <linux/slab.h>
28 #include <net/pkt_sched.h>
29 #include <net/dst.h>
30 
31 /* Main transmission queue. */
32 
33 /* Modifications to data participating in scheduling must be protected with
34  * qdisc_lock(qdisc) spinlock.
35  *
36  * The idea is the following:
37  * - enqueue, dequeue are serialized via qdisc root lock
38  * - ingress filtering is also serialized via qdisc root lock
39  * - updates to tree and tree walking are only done under the rtnl mutex.
40  */
41 
42 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
43 {
44 	skb_dst_force(skb);
45 	q->gso_skb = skb;
46 	q->qstats.requeues++;
47 	q->q.qlen++;	/* it's still part of the queue */
48 	__netif_schedule(q);
49 
50 	return 0;
51 }
52 
53 static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54 {
55 	struct sk_buff *skb = q->gso_skb;
56 
57 	if (unlikely(skb)) {
58 		struct net_device *dev = qdisc_dev(q);
59 		struct netdev_queue *txq;
60 
61 		/* check the reason of requeuing without tx lock first */
62 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 		if (!netif_tx_queue_frozen_or_stopped(txq)) {
64 			q->gso_skb = NULL;
65 			q->q.qlen--;
66 		} else
67 			skb = NULL;
68 	} else {
69 		skb = q->dequeue(q);
70 	}
71 
72 	return skb;
73 }
74 
75 static inline int handle_dev_cpu_collision(struct sk_buff *skb,
76 					   struct netdev_queue *dev_queue,
77 					   struct Qdisc *q)
78 {
79 	int ret;
80 
81 	if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
82 		/*
83 		 * Same CPU holding the lock. It may be a transient
84 		 * configuration error, when hard_start_xmit() recurses. We
85 		 * detect it by checking xmit owner and drop the packet when
86 		 * deadloop is detected. Return OK to try the next skb.
87 		 */
88 		kfree_skb(skb);
89 		if (net_ratelimit())
90 			printk(KERN_WARNING "Dead loop on netdevice %s, "
91 			       "fix it urgently!\n", dev_queue->dev->name);
92 		ret = qdisc_qlen(q);
93 	} else {
94 		/*
95 		 * Another cpu is holding lock, requeue & delay xmits for
96 		 * some time.
97 		 */
98 		__this_cpu_inc(softnet_data.cpu_collision);
99 		ret = dev_requeue_skb(skb, q);
100 	}
101 
102 	return ret;
103 }
104 
105 /*
106  * Transmit one skb, and handle the return status as required. Holding the
107  * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
108  * function.
109  *
110  * Returns to the caller:
111  *				0  - queue is empty or throttled.
112  *				>0 - queue is not empty.
113  */
114 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
115 		    struct net_device *dev, struct netdev_queue *txq,
116 		    spinlock_t *root_lock)
117 {
118 	int ret = NETDEV_TX_BUSY;
119 
120 	/* And release qdisc */
121 	spin_unlock(root_lock);
122 
123 	HARD_TX_LOCK(dev, txq, smp_processor_id());
124 	if (!netif_tx_queue_frozen_or_stopped(txq))
125 		ret = dev_hard_start_xmit(skb, dev, txq);
126 
127 	HARD_TX_UNLOCK(dev, txq);
128 
129 	spin_lock(root_lock);
130 
131 	if (dev_xmit_complete(ret)) {
132 		/* Driver sent out skb successfully or skb was consumed */
133 		ret = qdisc_qlen(q);
134 	} else if (ret == NETDEV_TX_LOCKED) {
135 		/* Driver try lock failed */
136 		ret = handle_dev_cpu_collision(skb, txq, q);
137 	} else {
138 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
139 		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
140 			printk(KERN_WARNING "BUG %s code %d qlen %d\n",
141 			       dev->name, ret, q->q.qlen);
142 
143 		ret = dev_requeue_skb(skb, q);
144 	}
145 
146 	if (ret && netif_tx_queue_frozen_or_stopped(txq))
147 		ret = 0;
148 
149 	return ret;
150 }
151 
152 /*
153  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
154  *
155  * __QDISC_STATE_RUNNING guarantees only one CPU can process
156  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
157  * this queue.
158  *
159  *  netif_tx_lock serializes accesses to device driver.
160  *
161  *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
162  *  if one is grabbed, another must be free.
163  *
164  * Note, that this procedure can be called by a watchdog timer
165  *
166  * Returns to the caller:
167  *				0  - queue is empty or throttled.
168  *				>0 - queue is not empty.
169  *
170  */
171 static inline int qdisc_restart(struct Qdisc *q)
172 {
173 	struct netdev_queue *txq;
174 	struct net_device *dev;
175 	spinlock_t *root_lock;
176 	struct sk_buff *skb;
177 
178 	/* Dequeue packet */
179 	skb = dequeue_skb(q);
180 	if (unlikely(!skb))
181 		return 0;
182 	WARN_ON_ONCE(skb_dst_is_noref(skb));
183 	root_lock = qdisc_lock(q);
184 	dev = qdisc_dev(q);
185 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
186 
187 	return sch_direct_xmit(skb, q, dev, txq, root_lock);
188 }
189 
190 void __qdisc_run(struct Qdisc *q)
191 {
192 	unsigned long start_time = jiffies;
193 
194 	while (qdisc_restart(q)) {
195 		/*
196 		 * Postpone processing if
197 		 * 1. another process needs the CPU;
198 		 * 2. we've been doing it for too long.
199 		 */
200 		if (need_resched() || jiffies != start_time) {
201 			__netif_schedule(q);
202 			break;
203 		}
204 	}
205 
206 	qdisc_run_end(q);
207 }
208 
209 unsigned long dev_trans_start(struct net_device *dev)
210 {
211 	unsigned long val, res = dev->trans_start;
212 	unsigned int i;
213 
214 	for (i = 0; i < dev->num_tx_queues; i++) {
215 		val = netdev_get_tx_queue(dev, i)->trans_start;
216 		if (val && time_after(val, res))
217 			res = val;
218 	}
219 	dev->trans_start = res;
220 	return res;
221 }
222 EXPORT_SYMBOL(dev_trans_start);
223 
224 static void dev_watchdog(unsigned long arg)
225 {
226 	struct net_device *dev = (struct net_device *)arg;
227 
228 	netif_tx_lock(dev);
229 	if (!qdisc_tx_is_noop(dev)) {
230 		if (netif_device_present(dev) &&
231 		    netif_running(dev) &&
232 		    netif_carrier_ok(dev)) {
233 			int some_queue_timedout = 0;
234 			unsigned int i;
235 			unsigned long trans_start;
236 
237 			for (i = 0; i < dev->num_tx_queues; i++) {
238 				struct netdev_queue *txq;
239 
240 				txq = netdev_get_tx_queue(dev, i);
241 				/*
242 				 * old device drivers set dev->trans_start
243 				 */
244 				trans_start = txq->trans_start ? : dev->trans_start;
245 				if (netif_tx_queue_stopped(txq) &&
246 				    time_after(jiffies, (trans_start +
247 							 dev->watchdog_timeo))) {
248 					some_queue_timedout = 1;
249 					break;
250 				}
251 			}
252 
253 			if (some_queue_timedout) {
254 				char drivername[64];
255 				WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
256 				       dev->name, netdev_drivername(dev, drivername, 64), i);
257 				dev->netdev_ops->ndo_tx_timeout(dev);
258 			}
259 			if (!mod_timer(&dev->watchdog_timer,
260 				       round_jiffies(jiffies +
261 						     dev->watchdog_timeo)))
262 				dev_hold(dev);
263 		}
264 	}
265 	netif_tx_unlock(dev);
266 
267 	dev_put(dev);
268 }
269 
270 void __netdev_watchdog_up(struct net_device *dev)
271 {
272 	if (dev->netdev_ops->ndo_tx_timeout) {
273 		if (dev->watchdog_timeo <= 0)
274 			dev->watchdog_timeo = 5*HZ;
275 		if (!mod_timer(&dev->watchdog_timer,
276 			       round_jiffies(jiffies + dev->watchdog_timeo)))
277 			dev_hold(dev);
278 	}
279 }
280 
281 static void dev_watchdog_up(struct net_device *dev)
282 {
283 	__netdev_watchdog_up(dev);
284 }
285 
286 static void dev_watchdog_down(struct net_device *dev)
287 {
288 	netif_tx_lock_bh(dev);
289 	if (del_timer(&dev->watchdog_timer))
290 		dev_put(dev);
291 	netif_tx_unlock_bh(dev);
292 }
293 
294 /**
295  *	netif_carrier_on - set carrier
296  *	@dev: network device
297  *
298  * Device has detected that carrier.
299  */
300 void netif_carrier_on(struct net_device *dev)
301 {
302 	if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
303 		if (dev->reg_state == NETREG_UNINITIALIZED)
304 			return;
305 		linkwatch_fire_event(dev);
306 		if (netif_running(dev))
307 			__netdev_watchdog_up(dev);
308 	}
309 }
310 EXPORT_SYMBOL(netif_carrier_on);
311 
312 /**
313  *	netif_carrier_off - clear carrier
314  *	@dev: network device
315  *
316  * Device has detected loss of carrier.
317  */
318 void netif_carrier_off(struct net_device *dev)
319 {
320 	if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
321 		if (dev->reg_state == NETREG_UNINITIALIZED)
322 			return;
323 		linkwatch_fire_event(dev);
324 	}
325 }
326 EXPORT_SYMBOL(netif_carrier_off);
327 
328 /**
329  * 	netif_notify_peers - notify network peers about existence of @dev
330  * 	@dev: network device
331  *
332  * Generate traffic such that interested network peers are aware of
333  * @dev, such as by generating a gratuitous ARP. This may be used when
334  * a device wants to inform the rest of the network about some sort of
335  * reconfiguration such as a failover event or virtual machine
336  * migration.
337  */
338 void netif_notify_peers(struct net_device *dev)
339 {
340 	rtnl_lock();
341 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
342 	rtnl_unlock();
343 }
344 EXPORT_SYMBOL(netif_notify_peers);
345 
346 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
347    under all circumstances. It is difficult to invent anything faster or
348    cheaper.
349  */
350 
351 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
352 {
353 	kfree_skb(skb);
354 	return NET_XMIT_CN;
355 }
356 
357 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
358 {
359 	return NULL;
360 }
361 
362 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
363 	.id		=	"noop",
364 	.priv_size	=	0,
365 	.enqueue	=	noop_enqueue,
366 	.dequeue	=	noop_dequeue,
367 	.peek		=	noop_dequeue,
368 	.owner		=	THIS_MODULE,
369 };
370 
371 static struct netdev_queue noop_netdev_queue = {
372 	.qdisc		=	&noop_qdisc,
373 	.qdisc_sleeping	=	&noop_qdisc,
374 };
375 
376 struct Qdisc noop_qdisc = {
377 	.enqueue	=	noop_enqueue,
378 	.dequeue	=	noop_dequeue,
379 	.flags		=	TCQ_F_BUILTIN,
380 	.ops		=	&noop_qdisc_ops,
381 	.list		=	LIST_HEAD_INIT(noop_qdisc.list),
382 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
383 	.dev_queue	=	&noop_netdev_queue,
384 	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
385 };
386 EXPORT_SYMBOL(noop_qdisc);
387 
388 static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
389 	.id		=	"noqueue",
390 	.priv_size	=	0,
391 	.enqueue	=	noop_enqueue,
392 	.dequeue	=	noop_dequeue,
393 	.peek		=	noop_dequeue,
394 	.owner		=	THIS_MODULE,
395 };
396 
397 static struct Qdisc noqueue_qdisc;
398 static struct netdev_queue noqueue_netdev_queue = {
399 	.qdisc		=	&noqueue_qdisc,
400 	.qdisc_sleeping	=	&noqueue_qdisc,
401 };
402 
403 static struct Qdisc noqueue_qdisc = {
404 	.enqueue	=	NULL,
405 	.dequeue	=	noop_dequeue,
406 	.flags		=	TCQ_F_BUILTIN,
407 	.ops		=	&noqueue_qdisc_ops,
408 	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
409 	.q.lock		=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
410 	.dev_queue	=	&noqueue_netdev_queue,
411 	.busylock	=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
412 };
413 
414 
415 static const u8 prio2band[TC_PRIO_MAX+1] =
416 	{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
417 
418 /* 3-band FIFO queue: old style, but should be a bit faster than
419    generic prio+fifo combination.
420  */
421 
422 #define PFIFO_FAST_BANDS 3
423 
424 /*
425  * Private data for a pfifo_fast scheduler containing:
426  * 	- queues for the three band
427  * 	- bitmap indicating which of the bands contain skbs
428  */
429 struct pfifo_fast_priv {
430 	u32 bitmap;
431 	struct sk_buff_head q[PFIFO_FAST_BANDS];
432 };
433 
434 /*
435  * Convert a bitmap to the first band number where an skb is queued, where:
436  * 	bitmap=0 means there are no skbs on any band.
437  * 	bitmap=1 means there is an skb on band 0.
438  *	bitmap=7 means there are skbs on all 3 bands, etc.
439  */
440 static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
441 
442 static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
443 					     int band)
444 {
445 	return priv->q + band;
446 }
447 
448 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
449 {
450 	if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
451 		int band = prio2band[skb->priority & TC_PRIO_MAX];
452 		struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
453 		struct sk_buff_head *list = band2list(priv, band);
454 
455 		priv->bitmap |= (1 << band);
456 		qdisc->q.qlen++;
457 		return __qdisc_enqueue_tail(skb, qdisc, list);
458 	}
459 
460 	return qdisc_drop(skb, qdisc);
461 }
462 
463 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
464 {
465 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
466 	int band = bitmap2band[priv->bitmap];
467 
468 	if (likely(band >= 0)) {
469 		struct sk_buff_head *list = band2list(priv, band);
470 		struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
471 
472 		qdisc->q.qlen--;
473 		if (skb_queue_empty(list))
474 			priv->bitmap &= ~(1 << band);
475 
476 		return skb;
477 	}
478 
479 	return NULL;
480 }
481 
482 static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
483 {
484 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
485 	int band = bitmap2band[priv->bitmap];
486 
487 	if (band >= 0) {
488 		struct sk_buff_head *list = band2list(priv, band);
489 
490 		return skb_peek(list);
491 	}
492 
493 	return NULL;
494 }
495 
496 static void pfifo_fast_reset(struct Qdisc* qdisc)
497 {
498 	int prio;
499 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
500 
501 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
502 		__qdisc_reset_queue(qdisc, band2list(priv, prio));
503 
504 	priv->bitmap = 0;
505 	qdisc->qstats.backlog = 0;
506 	qdisc->q.qlen = 0;
507 }
508 
509 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
510 {
511 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
512 
513 	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
514 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
515 	return skb->len;
516 
517 nla_put_failure:
518 	return -1;
519 }
520 
521 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
522 {
523 	int prio;
524 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
525 
526 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
527 		skb_queue_head_init(band2list(priv, prio));
528 
529 	return 0;
530 }
531 
532 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
533 	.id		=	"pfifo_fast",
534 	.priv_size	=	sizeof(struct pfifo_fast_priv),
535 	.enqueue	=	pfifo_fast_enqueue,
536 	.dequeue	=	pfifo_fast_dequeue,
537 	.peek		=	pfifo_fast_peek,
538 	.init		=	pfifo_fast_init,
539 	.reset		=	pfifo_fast_reset,
540 	.dump		=	pfifo_fast_dump,
541 	.owner		=	THIS_MODULE,
542 };
543 
544 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
545 			  struct Qdisc_ops *ops)
546 {
547 	void *p;
548 	struct Qdisc *sch;
549 	unsigned int size;
550 	int err = -ENOBUFS;
551 
552 	/* ensure that the Qdisc and the private data are 64-byte aligned */
553 	size = QDISC_ALIGN(sizeof(*sch));
554 	size += ops->priv_size + (QDISC_ALIGNTO - 1);
555 
556 	p = kzalloc_node(size, GFP_KERNEL,
557 			 netdev_queue_numa_node_read(dev_queue));
558 
559 	if (!p)
560 		goto errout;
561 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
562 	sch->padded = (char *) sch - (char *) p;
563 
564 	INIT_LIST_HEAD(&sch->list);
565 	skb_queue_head_init(&sch->q);
566 	spin_lock_init(&sch->busylock);
567 	sch->ops = ops;
568 	sch->enqueue = ops->enqueue;
569 	sch->dequeue = ops->dequeue;
570 	sch->dev_queue = dev_queue;
571 	dev_hold(qdisc_dev(sch));
572 	atomic_set(&sch->refcnt, 1);
573 
574 	return sch;
575 errout:
576 	return ERR_PTR(err);
577 }
578 
579 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
580 				struct Qdisc_ops *ops, unsigned int parentid)
581 {
582 	struct Qdisc *sch;
583 
584 	sch = qdisc_alloc(dev_queue, ops);
585 	if (IS_ERR(sch))
586 		goto errout;
587 	sch->parent = parentid;
588 
589 	if (!ops->init || ops->init(sch, NULL) == 0)
590 		return sch;
591 
592 	qdisc_destroy(sch);
593 errout:
594 	return NULL;
595 }
596 EXPORT_SYMBOL(qdisc_create_dflt);
597 
598 /* Under qdisc_lock(qdisc) and BH! */
599 
600 void qdisc_reset(struct Qdisc *qdisc)
601 {
602 	const struct Qdisc_ops *ops = qdisc->ops;
603 
604 	if (ops->reset)
605 		ops->reset(qdisc);
606 
607 	if (qdisc->gso_skb) {
608 		kfree_skb(qdisc->gso_skb);
609 		qdisc->gso_skb = NULL;
610 		qdisc->q.qlen = 0;
611 	}
612 }
613 EXPORT_SYMBOL(qdisc_reset);
614 
615 static void qdisc_rcu_free(struct rcu_head *head)
616 {
617 	struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
618 
619 	kfree((char *) qdisc - qdisc->padded);
620 }
621 
622 void qdisc_destroy(struct Qdisc *qdisc)
623 {
624 	const struct Qdisc_ops  *ops = qdisc->ops;
625 
626 	if (qdisc->flags & TCQ_F_BUILTIN ||
627 	    !atomic_dec_and_test(&qdisc->refcnt))
628 		return;
629 
630 #ifdef CONFIG_NET_SCHED
631 	qdisc_list_del(qdisc);
632 
633 	qdisc_put_stab(qdisc->stab);
634 #endif
635 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
636 	if (ops->reset)
637 		ops->reset(qdisc);
638 	if (ops->destroy)
639 		ops->destroy(qdisc);
640 
641 	module_put(ops->owner);
642 	dev_put(qdisc_dev(qdisc));
643 
644 	kfree_skb(qdisc->gso_skb);
645 	/*
646 	 * gen_estimator est_timer() might access qdisc->q.lock,
647 	 * wait a RCU grace period before freeing qdisc.
648 	 */
649 	call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
650 }
651 EXPORT_SYMBOL(qdisc_destroy);
652 
653 /* Attach toplevel qdisc to device queue. */
654 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
655 			      struct Qdisc *qdisc)
656 {
657 	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
658 	spinlock_t *root_lock;
659 
660 	root_lock = qdisc_lock(oqdisc);
661 	spin_lock_bh(root_lock);
662 
663 	/* Prune old scheduler */
664 	if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
665 		qdisc_reset(oqdisc);
666 
667 	/* ... and graft new one */
668 	if (qdisc == NULL)
669 		qdisc = &noop_qdisc;
670 	dev_queue->qdisc_sleeping = qdisc;
671 	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
672 
673 	spin_unlock_bh(root_lock);
674 
675 	return oqdisc;
676 }
677 
678 static void attach_one_default_qdisc(struct net_device *dev,
679 				     struct netdev_queue *dev_queue,
680 				     void *_unused)
681 {
682 	struct Qdisc *qdisc;
683 
684 	if (dev->tx_queue_len) {
685 		qdisc = qdisc_create_dflt(dev_queue,
686 					  &pfifo_fast_ops, TC_H_ROOT);
687 		if (!qdisc) {
688 			printk(KERN_INFO "%s: activation failed\n", dev->name);
689 			return;
690 		}
691 
692 		/* Can by-pass the queue discipline for default qdisc */
693 		qdisc->flags |= TCQ_F_CAN_BYPASS;
694 	} else {
695 		qdisc =  &noqueue_qdisc;
696 	}
697 	dev_queue->qdisc_sleeping = qdisc;
698 }
699 
700 static void attach_default_qdiscs(struct net_device *dev)
701 {
702 	struct netdev_queue *txq;
703 	struct Qdisc *qdisc;
704 
705 	txq = netdev_get_tx_queue(dev, 0);
706 
707 	if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
708 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
709 		dev->qdisc = txq->qdisc_sleeping;
710 		atomic_inc(&dev->qdisc->refcnt);
711 	} else {
712 		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
713 		if (qdisc) {
714 			qdisc->ops->attach(qdisc);
715 			dev->qdisc = qdisc;
716 		}
717 	}
718 }
719 
720 static void transition_one_qdisc(struct net_device *dev,
721 				 struct netdev_queue *dev_queue,
722 				 void *_need_watchdog)
723 {
724 	struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
725 	int *need_watchdog_p = _need_watchdog;
726 
727 	if (!(new_qdisc->flags & TCQ_F_BUILTIN))
728 		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
729 
730 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
731 	if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
732 		dev_queue->trans_start = 0;
733 		*need_watchdog_p = 1;
734 	}
735 }
736 
737 void dev_activate(struct net_device *dev)
738 {
739 	int need_watchdog;
740 
741 	/* No queueing discipline is attached to device;
742 	   create default one i.e. pfifo_fast for devices,
743 	   which need queueing and noqueue_qdisc for
744 	   virtual interfaces
745 	 */
746 
747 	if (dev->qdisc == &noop_qdisc)
748 		attach_default_qdiscs(dev);
749 
750 	if (!netif_carrier_ok(dev))
751 		/* Delay activation until next carrier-on event */
752 		return;
753 
754 	need_watchdog = 0;
755 	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
756 	if (dev_ingress_queue(dev))
757 		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
758 
759 	if (need_watchdog) {
760 		dev->trans_start = jiffies;
761 		dev_watchdog_up(dev);
762 	}
763 }
764 
765 static void dev_deactivate_queue(struct net_device *dev,
766 				 struct netdev_queue *dev_queue,
767 				 void *_qdisc_default)
768 {
769 	struct Qdisc *qdisc_default = _qdisc_default;
770 	struct Qdisc *qdisc;
771 
772 	qdisc = dev_queue->qdisc;
773 	if (qdisc) {
774 		spin_lock_bh(qdisc_lock(qdisc));
775 
776 		if (!(qdisc->flags & TCQ_F_BUILTIN))
777 			set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
778 
779 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
780 		qdisc_reset(qdisc);
781 
782 		spin_unlock_bh(qdisc_lock(qdisc));
783 	}
784 }
785 
786 static bool some_qdisc_is_busy(struct net_device *dev)
787 {
788 	unsigned int i;
789 
790 	for (i = 0; i < dev->num_tx_queues; i++) {
791 		struct netdev_queue *dev_queue;
792 		spinlock_t *root_lock;
793 		struct Qdisc *q;
794 		int val;
795 
796 		dev_queue = netdev_get_tx_queue(dev, i);
797 		q = dev_queue->qdisc_sleeping;
798 		root_lock = qdisc_lock(q);
799 
800 		spin_lock_bh(root_lock);
801 
802 		val = (qdisc_is_running(q) ||
803 		       test_bit(__QDISC_STATE_SCHED, &q->state));
804 
805 		spin_unlock_bh(root_lock);
806 
807 		if (val)
808 			return true;
809 	}
810 	return false;
811 }
812 
813 void dev_deactivate_many(struct list_head *head)
814 {
815 	struct net_device *dev;
816 
817 	list_for_each_entry(dev, head, unreg_list) {
818 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
819 					 &noop_qdisc);
820 		if (dev_ingress_queue(dev))
821 			dev_deactivate_queue(dev, dev_ingress_queue(dev),
822 					     &noop_qdisc);
823 
824 		dev_watchdog_down(dev);
825 	}
826 
827 	/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
828 	synchronize_rcu();
829 
830 	/* Wait for outstanding qdisc_run calls. */
831 	list_for_each_entry(dev, head, unreg_list)
832 		while (some_qdisc_is_busy(dev))
833 			yield();
834 }
835 
836 void dev_deactivate(struct net_device *dev)
837 {
838 	LIST_HEAD(single);
839 
840 	list_add(&dev->unreg_list, &single);
841 	dev_deactivate_many(&single);
842 	list_del(&single);
843 }
844 
845 static void dev_init_scheduler_queue(struct net_device *dev,
846 				     struct netdev_queue *dev_queue,
847 				     void *_qdisc)
848 {
849 	struct Qdisc *qdisc = _qdisc;
850 
851 	dev_queue->qdisc = qdisc;
852 	dev_queue->qdisc_sleeping = qdisc;
853 }
854 
855 void dev_init_scheduler(struct net_device *dev)
856 {
857 	dev->qdisc = &noop_qdisc;
858 	netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
859 	if (dev_ingress_queue(dev))
860 		dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
861 
862 	setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
863 }
864 
865 static void shutdown_scheduler_queue(struct net_device *dev,
866 				     struct netdev_queue *dev_queue,
867 				     void *_qdisc_default)
868 {
869 	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
870 	struct Qdisc *qdisc_default = _qdisc_default;
871 
872 	if (qdisc) {
873 		rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
874 		dev_queue->qdisc_sleeping = qdisc_default;
875 
876 		qdisc_destroy(qdisc);
877 	}
878 }
879 
880 void dev_shutdown(struct net_device *dev)
881 {
882 	netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
883 	if (dev_ingress_queue(dev))
884 		shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
885 	qdisc_destroy(dev->qdisc);
886 	dev->qdisc = &noop_qdisc;
887 
888 	WARN_ON(timer_pending(&dev->watchdog_timer));
889 }
890