sch_generic.c (b47300168e770b60ab96c8924854c3b0eb4260eb) sch_generic.c (d314774cf2cd5dfeb39a00d37deee65d4c627927)
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 210 unchanged lines hidden (view full) ---

219 }
220
221 if (some_queue_stopped &&
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64));
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *

--- 210 unchanged lines hidden (view full) ---

219 }
220
221 if (some_queue_stopped &&
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64));
227 dev->tx_timeout(dev);
227 dev->netdev_ops->ndo_tx_timeout(dev);
228 }
229 if (!mod_timer(&dev->watchdog_timer,
230 round_jiffies(jiffies +
231 dev->watchdog_timeo)))
232 dev_hold(dev);
233 }
234 }
235 netif_tx_unlock(dev);
236
237 dev_put(dev);
238}
239
240void __netdev_watchdog_up(struct net_device *dev)
241{
228 }
229 if (!mod_timer(&dev->watchdog_timer,
230 round_jiffies(jiffies +
231 dev->watchdog_timeo)))
232 dev_hold(dev);
233 }
234 }
235 netif_tx_unlock(dev);
236
237 dev_put(dev);
238}
239
240void __netdev_watchdog_up(struct net_device *dev)
241{
242 if (dev->tx_timeout) {
242 if (dev->netdev_ops->ndo_tx_timeout) {
243 if (dev->watchdog_timeo <= 0)
244 dev->watchdog_timeo = 5*HZ;
245 if (!mod_timer(&dev->watchdog_timer,
246 round_jiffies(jiffies + dev->watchdog_timeo)))
247 dev_hold(dev);
248 }
249}
250

--- 14 unchanged lines hidden (view full) ---

265 * netif_carrier_on - set carrier
266 * @dev: network device
267 *
268 * Device has detected that carrier.
269 */
270void netif_carrier_on(struct net_device *dev)
271{
272 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
243 if (dev->watchdog_timeo <= 0)
244 dev->watchdog_timeo = 5*HZ;
245 if (!mod_timer(&dev->watchdog_timer,
246 round_jiffies(jiffies + dev->watchdog_timeo)))
247 dev_hold(dev);
248 }
249}
250

--- 14 unchanged lines hidden (view full) ---

265 * netif_carrier_on - set carrier
266 * @dev: network device
267 *
268 * Device has detected that carrier.
269 */
270void netif_carrier_on(struct net_device *dev)
271{
272 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
273 if (dev->reg_state == NETREG_UNINITIALIZED)
274 return;
275 linkwatch_fire_event(dev);
276 if (netif_running(dev))
277 __netdev_watchdog_up(dev);
278 }
279}
280EXPORT_SYMBOL(netif_carrier_on);
281
282/**
283 * netif_carrier_off - clear carrier
284 * @dev: network device
285 *
286 * Device has detected loss of carrier.
287 */
288void netif_carrier_off(struct net_device *dev)
289{
273 linkwatch_fire_event(dev);
274 if (netif_running(dev))
275 __netdev_watchdog_up(dev);
276 }
277}
278EXPORT_SYMBOL(netif_carrier_on);
279
280/**
281 * netif_carrier_off - clear carrier
282 * @dev: network device
283 *
284 * Device has detected loss of carrier.
285 */
286void netif_carrier_off(struct net_device *dev)
287{
290 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
291 if (dev->reg_state == NETREG_UNINITIALIZED)
292 return;
288 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
293 linkwatch_fire_event(dev);
289 linkwatch_fire_event(dev);
294 }
295}
296EXPORT_SYMBOL(netif_carrier_off);
297
298/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
299 under all circumstances. It is difficult to invent anything faster or
300 cheaper.
301 */
302
303static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
304{
305 kfree_skb(skb);
306 return NET_XMIT_CN;
307}
308
309static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
310{
311 return NULL;
312}
313
290}
291EXPORT_SYMBOL(netif_carrier_off);
292
293/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
294 under all circumstances. It is difficult to invent anything faster or
295 cheaper.
296 */
297
298static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
299{
300 kfree_skb(skb);
301 return NET_XMIT_CN;
302}
303
304static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
305{
306 return NULL;
307}
308
314static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
315{
316 if (net_ratelimit())
317 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
318 skb->dev->name);
319 kfree_skb(skb);
320 return NET_XMIT_CN;
321}
322
323struct Qdisc_ops noop_qdisc_ops __read_mostly = {
324 .id = "noop",
325 .priv_size = 0,
326 .enqueue = noop_enqueue,
327 .dequeue = noop_dequeue,
309struct Qdisc_ops noop_qdisc_ops __read_mostly = {
310 .id = "noop",
311 .priv_size = 0,
312 .enqueue = noop_enqueue,
313 .dequeue = noop_dequeue,
328 .requeue = noop_requeue,
314 .peek = noop_dequeue,
329 .owner = THIS_MODULE,
330};
331
332static struct netdev_queue noop_netdev_queue = {
333 .qdisc = &noop_qdisc,
334 .qdisc_sleeping = &noop_qdisc,
335};
336
337struct Qdisc noop_qdisc = {
338 .enqueue = noop_enqueue,
339 .dequeue = noop_dequeue,
340 .flags = TCQ_F_BUILTIN,
341 .ops = &noop_qdisc_ops,
342 .list = LIST_HEAD_INIT(noop_qdisc.list),
315 .owner = THIS_MODULE,
316};
317
318static struct netdev_queue noop_netdev_queue = {
319 .qdisc = &noop_qdisc,
320 .qdisc_sleeping = &noop_qdisc,
321};
322
323struct Qdisc noop_qdisc = {
324 .enqueue = noop_enqueue,
325 .dequeue = noop_dequeue,
326 .flags = TCQ_F_BUILTIN,
327 .ops = &noop_qdisc_ops,
328 .list = LIST_HEAD_INIT(noop_qdisc.list),
343 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
344 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
345 .dev_queue = &noop_netdev_queue,
346};
347EXPORT_SYMBOL(noop_qdisc);
348
349static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
350 .id = "noqueue",
351 .priv_size = 0,
352 .enqueue = noop_enqueue,
353 .dequeue = noop_dequeue,
329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
330 .dev_queue = &noop_netdev_queue,
331};
332EXPORT_SYMBOL(noop_qdisc);
333
334static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
335 .id = "noqueue",
336 .priv_size = 0,
337 .enqueue = noop_enqueue,
338 .dequeue = noop_dequeue,
354 .requeue = noop_requeue,
339 .peek = noop_dequeue,
355 .owner = THIS_MODULE,
356};
357
358static struct Qdisc noqueue_qdisc;
359static struct netdev_queue noqueue_netdev_queue = {
360 .qdisc = &noqueue_qdisc,
361 .qdisc_sleeping = &noqueue_qdisc,
362};
363
364static struct Qdisc noqueue_qdisc = {
365 .enqueue = NULL,
366 .dequeue = noop_dequeue,
367 .flags = TCQ_F_BUILTIN,
368 .ops = &noqueue_qdisc_ops,
369 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
340 .owner = THIS_MODULE,
341};
342
343static struct Qdisc noqueue_qdisc;
344static struct netdev_queue noqueue_netdev_queue = {
345 .qdisc = &noqueue_qdisc,
346 .qdisc_sleeping = &noqueue_qdisc,
347};
348
349static struct Qdisc noqueue_qdisc = {
350 .enqueue = NULL,
351 .dequeue = noop_dequeue,
352 .flags = TCQ_F_BUILTIN,
353 .ops = &noqueue_qdisc_ops,
354 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
370 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
371 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
372 .dev_queue = &noqueue_netdev_queue,
373};
374
375
376static const u8 prio2band[TC_PRIO_MAX+1] =
377 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
378

--- 32 unchanged lines hidden (view full) ---

411 qdisc->q.qlen--;
412 return __qdisc_dequeue_head(qdisc, list + prio);
413 }
414 }
415
416 return NULL;
417}
418
355 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
356 .dev_queue = &noqueue_netdev_queue,
357};
358
359
360static const u8 prio2band[TC_PRIO_MAX+1] =
361 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
362

--- 32 unchanged lines hidden (view full) ---

395 qdisc->q.qlen--;
396 return __qdisc_dequeue_head(qdisc, list + prio);
397 }
398 }
399
400 return NULL;
401}
402
419static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
403static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
420{
404{
421 qdisc->q.qlen++;
422 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
405 int prio;
406 struct sk_buff_head *list = qdisc_priv(qdisc);
407
408 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
409 if (!skb_queue_empty(list + prio))
410 return skb_peek(list + prio);
411 }
412
413 return NULL;
423}
424
425static void pfifo_fast_reset(struct Qdisc* qdisc)
426{
427 int prio;
428 struct sk_buff_head *list = qdisc_priv(qdisc);
429
430 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)

--- 26 unchanged lines hidden (view full) ---

457 return 0;
458}
459
460static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
461 .id = "pfifo_fast",
462 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
463 .enqueue = pfifo_fast_enqueue,
464 .dequeue = pfifo_fast_dequeue,
414}
415
416static void pfifo_fast_reset(struct Qdisc* qdisc)
417{
418 int prio;
419 struct sk_buff_head *list = qdisc_priv(qdisc);
420
421 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)

--- 26 unchanged lines hidden (view full) ---

448 return 0;
449}
450
451static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
452 .id = "pfifo_fast",
453 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
454 .enqueue = pfifo_fast_enqueue,
455 .dequeue = pfifo_fast_dequeue,
465 .requeue = pfifo_fast_requeue,
456 .peek = pfifo_fast_peek,
466 .init = pfifo_fast_init,
467 .reset = pfifo_fast_reset,
468 .dump = pfifo_fast_dump,
469 .owner = THIS_MODULE,
470};
471
472struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
473 struct Qdisc_ops *ops)

--- 9 unchanged lines hidden (view full) ---

483
484 p = kzalloc(size, GFP_KERNEL);
485 if (!p)
486 goto errout;
487 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
488 sch->padded = (char *) sch - (char *) p;
489
490 INIT_LIST_HEAD(&sch->list);
457 .init = pfifo_fast_init,
458 .reset = pfifo_fast_reset,
459 .dump = pfifo_fast_dump,
460 .owner = THIS_MODULE,
461};
462
463struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
464 struct Qdisc_ops *ops)

--- 9 unchanged lines hidden (view full) ---

474
475 p = kzalloc(size, GFP_KERNEL);
476 if (!p)
477 goto errout;
478 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
479 sch->padded = (char *) sch - (char *) p;
480
481 INIT_LIST_HEAD(&sch->list);
491 skb_queue_head_init(&sch->requeue);
492 skb_queue_head_init(&sch->q);
493 sch->ops = ops;
494 sch->enqueue = ops->enqueue;
495 sch->dequeue = ops->dequeue;
496 sch->dev_queue = dev_queue;
497 dev_hold(qdisc_dev(sch));
498 atomic_set(&sch->refcnt, 1);
499

--- 26 unchanged lines hidden (view full) ---

526/* Under qdisc_lock(qdisc) and BH! */
527
528void qdisc_reset(struct Qdisc *qdisc)
529{
530 const struct Qdisc_ops *ops = qdisc->ops;
531
532 if (ops->reset)
533 ops->reset(qdisc);
482 skb_queue_head_init(&sch->q);
483 sch->ops = ops;
484 sch->enqueue = ops->enqueue;
485 sch->dequeue = ops->dequeue;
486 sch->dev_queue = dev_queue;
487 dev_hold(qdisc_dev(sch));
488 atomic_set(&sch->refcnt, 1);
489

--- 26 unchanged lines hidden (view full) ---

516/* Under qdisc_lock(qdisc) and BH! */
517
518void qdisc_reset(struct Qdisc *qdisc)
519{
520 const struct Qdisc_ops *ops = qdisc->ops;
521
522 if (ops->reset)
523 ops->reset(qdisc);
524
525 kfree_skb(qdisc->gso_skb);
526 qdisc->gso_skb = NULL;
534}
535EXPORT_SYMBOL(qdisc_reset);
536
537void qdisc_destroy(struct Qdisc *qdisc)
538{
539 const struct Qdisc_ops *ops = qdisc->ops;
540
541 if (qdisc->flags & TCQ_F_BUILTIN ||

--- 10 unchanged lines hidden (view full) ---

552 ops->reset(qdisc);
553 if (ops->destroy)
554 ops->destroy(qdisc);
555
556 module_put(ops->owner);
557 dev_put(qdisc_dev(qdisc));
558
559 kfree_skb(qdisc->gso_skb);
527}
528EXPORT_SYMBOL(qdisc_reset);
529
530void qdisc_destroy(struct Qdisc *qdisc)
531{
532 const struct Qdisc_ops *ops = qdisc->ops;
533
534 if (qdisc->flags & TCQ_F_BUILTIN ||

--- 10 unchanged lines hidden (view full) ---

545 ops->reset(qdisc);
546 if (ops->destroy)
547 ops->destroy(qdisc);
548
549 module_put(ops->owner);
550 dev_put(qdisc_dev(qdisc));
551
552 kfree_skb(qdisc->gso_skb);
560 __skb_queue_purge(&qdisc->requeue);
561
562 kfree((char *) qdisc - qdisc->padded);
563}
564EXPORT_SYMBOL(qdisc_destroy);
565
566static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
567{
568 unsigned int i;
569

--- 172 unchanged lines hidden ---
553 kfree((char *) qdisc - qdisc->padded);
554}
555EXPORT_SYMBOL(qdisc_destroy);
556
557static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
558{
559 unsigned int i;
560

--- 172 unchanged lines hidden ---