1 /* 2 * net/sched/sch_generic.c Generic packet scheduler routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 11 * - Ingress support 12 */ 13 14 #include <asm/uaccess.h> 15 #include <asm/system.h> 16 #include <linux/bitops.h> 17 #include <linux/config.h> 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/string.h> 23 #include <linux/mm.h> 24 #include <linux/socket.h> 25 #include <linux/sockios.h> 26 #include <linux/in.h> 27 #include <linux/errno.h> 28 #include <linux/interrupt.h> 29 #include <linux/netdevice.h> 30 #include <linux/skbuff.h> 31 #include <linux/rtnetlink.h> 32 #include <linux/init.h> 33 #include <linux/rcupdate.h> 34 #include <linux/list.h> 35 #include <net/sock.h> 36 #include <net/pkt_sched.h> 37 38 /* Main transmission queue. */ 39 40 /* Main qdisc structure lock. 41 42 However, modifications 43 to data, participating in scheduling must be additionally 44 protected with dev->queue_lock spinlock. 45 46 The idea is the following: 47 - enqueue, dequeue are serialized via top level device 48 spinlock dev->queue_lock. 49 - tree walking is protected by read_lock_bh(qdisc_tree_lock) 50 and this lock is used only in process context. 51 - updates to tree are made under rtnl semaphore or 52 from softirq context (__qdisc_destroy rcu-callback) 53 hence this lock needs local bh disabling. 54 55 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! 56 */ 57 DEFINE_RWLOCK(qdisc_tree_lock); 58 59 void qdisc_lock_tree(struct net_device *dev) 60 { 61 write_lock_bh(&qdisc_tree_lock); 62 spin_lock_bh(&dev->queue_lock); 63 } 64 65 void qdisc_unlock_tree(struct net_device *dev) 66 { 67 spin_unlock_bh(&dev->queue_lock); 68 write_unlock_bh(&qdisc_tree_lock); 69 } 70 71 /* 72 dev->queue_lock serializes queue accesses for this device 73 AND dev->qdisc pointer itself. 74 75 dev->xmit_lock serializes accesses to device driver. 76 77 dev->queue_lock and dev->xmit_lock are mutually exclusive, 78 if one is grabbed, another must be free. 79 */ 80 81 82 /* Kick device. 83 Note, that this procedure can be called by a watchdog timer, so that 84 we do not check dev->tbusy flag here. 85 86 Returns: 0 - queue is empty. 87 >0 - queue is not empty, but throttled. 88 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. 89 90 NOTE: Called under dev->queue_lock with locally disabled BH. 91 */ 92 93 int qdisc_restart(struct net_device *dev) 94 { 95 struct Qdisc *q = dev->qdisc; 96 struct sk_buff *skb; 97 98 /* Dequeue packet */ 99 if ((skb = q->dequeue(q)) != NULL) { 100 unsigned nolock = (dev->features & NETIF_F_LLTX); 101 /* 102 * When the driver has LLTX set it does its own locking 103 * in start_xmit. No need to add additional overhead by 104 * locking again. These checks are worth it because 105 * even uncongested locks can be quite expensive. 106 * The driver can do trylock like here too, in case 107 * of lock congestion it should return -1 and the packet 108 * will be requeued. 109 */ 110 if (!nolock) { 111 if (!spin_trylock(&dev->xmit_lock)) { 112 collision: 113 /* So, someone grabbed the driver. */ 114 115 /* It may be transient configuration error, 116 when hard_start_xmit() recurses. We detect 117 it by checking xmit owner and drop the 118 packet when deadloop is detected. 119 */ 120 if (dev->xmit_lock_owner == smp_processor_id()) { 121 kfree_skb(skb); 122 if (net_ratelimit()) 123 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); 124 return -1; 125 } 126 __get_cpu_var(netdev_rx_stat).cpu_collision++; 127 goto requeue; 128 } 129 /* Remember that the driver is grabbed by us. */ 130 dev->xmit_lock_owner = smp_processor_id(); 131 } 132 133 { 134 /* And release queue */ 135 spin_unlock(&dev->queue_lock); 136 137 if (!netif_queue_stopped(dev)) { 138 int ret; 139 if (netdev_nit) 140 dev_queue_xmit_nit(skb, dev); 141 142 ret = dev->hard_start_xmit(skb, dev); 143 if (ret == NETDEV_TX_OK) { 144 if (!nolock) { 145 dev->xmit_lock_owner = -1; 146 spin_unlock(&dev->xmit_lock); 147 } 148 spin_lock(&dev->queue_lock); 149 return -1; 150 } 151 if (ret == NETDEV_TX_LOCKED && nolock) { 152 spin_lock(&dev->queue_lock); 153 goto collision; 154 } 155 } 156 157 /* NETDEV_TX_BUSY - we need to requeue */ 158 /* Release the driver */ 159 if (!nolock) { 160 dev->xmit_lock_owner = -1; 161 spin_unlock(&dev->xmit_lock); 162 } 163 spin_lock(&dev->queue_lock); 164 q = dev->qdisc; 165 } 166 167 /* Device kicked us out :( 168 This is possible in three cases: 169 170 0. driver is locked 171 1. fastroute is enabled 172 2. device cannot determine busy state 173 before start of transmission (f.e. dialout) 174 3. device is buggy (ppp) 175 */ 176 177 requeue: 178 q->ops->requeue(skb, q); 179 netif_schedule(dev); 180 return 1; 181 } 182 BUG_ON((int) q->q.qlen < 0); 183 return q->q.qlen; 184 } 185 186 static void dev_watchdog(unsigned long arg) 187 { 188 struct net_device *dev = (struct net_device *)arg; 189 190 spin_lock(&dev->xmit_lock); 191 if (dev->qdisc != &noop_qdisc) { 192 if (netif_device_present(dev) && 193 netif_running(dev) && 194 netif_carrier_ok(dev)) { 195 if (netif_queue_stopped(dev) && 196 (jiffies - dev->trans_start) > dev->watchdog_timeo) { 197 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); 198 dev->tx_timeout(dev); 199 } 200 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) 201 dev_hold(dev); 202 } 203 } 204 spin_unlock(&dev->xmit_lock); 205 206 dev_put(dev); 207 } 208 209 static void dev_watchdog_init(struct net_device *dev) 210 { 211 init_timer(&dev->watchdog_timer); 212 dev->watchdog_timer.data = (unsigned long)dev; 213 dev->watchdog_timer.function = dev_watchdog; 214 } 215 216 void __netdev_watchdog_up(struct net_device *dev) 217 { 218 if (dev->tx_timeout) { 219 if (dev->watchdog_timeo <= 0) 220 dev->watchdog_timeo = 5*HZ; 221 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) 222 dev_hold(dev); 223 } 224 } 225 226 static void dev_watchdog_up(struct net_device *dev) 227 { 228 spin_lock_bh(&dev->xmit_lock); 229 __netdev_watchdog_up(dev); 230 spin_unlock_bh(&dev->xmit_lock); 231 } 232 233 static void dev_watchdog_down(struct net_device *dev) 234 { 235 spin_lock_bh(&dev->xmit_lock); 236 if (del_timer(&dev->watchdog_timer)) 237 __dev_put(dev); 238 spin_unlock_bh(&dev->xmit_lock); 239 } 240 241 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces 242 under all circumstances. It is difficult to invent anything faster or 243 cheaper. 244 */ 245 246 static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) 247 { 248 kfree_skb(skb); 249 return NET_XMIT_CN; 250 } 251 252 static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) 253 { 254 return NULL; 255 } 256 257 static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 258 { 259 if (net_ratelimit()) 260 printk(KERN_DEBUG "%s deferred output. It is buggy.\n", 261 skb->dev->name); 262 kfree_skb(skb); 263 return NET_XMIT_CN; 264 } 265 266 struct Qdisc_ops noop_qdisc_ops = { 267 .id = "noop", 268 .priv_size = 0, 269 .enqueue = noop_enqueue, 270 .dequeue = noop_dequeue, 271 .requeue = noop_requeue, 272 .owner = THIS_MODULE, 273 }; 274 275 struct Qdisc noop_qdisc = { 276 .enqueue = noop_enqueue, 277 .dequeue = noop_dequeue, 278 .flags = TCQ_F_BUILTIN, 279 .ops = &noop_qdisc_ops, 280 .list = LIST_HEAD_INIT(noop_qdisc.list), 281 }; 282 283 static struct Qdisc_ops noqueue_qdisc_ops = { 284 .id = "noqueue", 285 .priv_size = 0, 286 .enqueue = noop_enqueue, 287 .dequeue = noop_dequeue, 288 .requeue = noop_requeue, 289 .owner = THIS_MODULE, 290 }; 291 292 static struct Qdisc noqueue_qdisc = { 293 .enqueue = NULL, 294 .dequeue = noop_dequeue, 295 .flags = TCQ_F_BUILTIN, 296 .ops = &noqueue_qdisc_ops, 297 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 298 }; 299 300 301 static const u8 prio2band[TC_PRIO_MAX+1] = 302 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; 303 304 /* 3-band FIFO queue: old style, but should be a bit faster than 305 generic prio+fifo combination. 306 */ 307 308 #define PFIFO_FAST_BANDS 3 309 310 static inline struct sk_buff_head *prio2list(struct sk_buff *skb, 311 struct Qdisc *qdisc) 312 { 313 struct sk_buff_head *list = qdisc_priv(qdisc); 314 return list + prio2band[skb->priority & TC_PRIO_MAX]; 315 } 316 317 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) 318 { 319 struct sk_buff_head *list = prio2list(skb, qdisc); 320 321 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { 322 qdisc->q.qlen++; 323 return __qdisc_enqueue_tail(skb, qdisc, list); 324 } 325 326 return qdisc_drop(skb, qdisc); 327 } 328 329 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 330 { 331 int prio; 332 struct sk_buff_head *list = qdisc_priv(qdisc); 333 334 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 335 if (!skb_queue_empty(list + prio)) { 336 qdisc->q.qlen--; 337 return __qdisc_dequeue_head(qdisc, list + prio); 338 } 339 } 340 341 return NULL; 342 } 343 344 static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 345 { 346 qdisc->q.qlen++; 347 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); 348 } 349 350 static void pfifo_fast_reset(struct Qdisc* qdisc) 351 { 352 int prio; 353 struct sk_buff_head *list = qdisc_priv(qdisc); 354 355 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 356 __qdisc_reset_queue(qdisc, list + prio); 357 358 qdisc->qstats.backlog = 0; 359 qdisc->q.qlen = 0; 360 } 361 362 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) 363 { 364 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 365 366 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); 367 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 368 return skb->len; 369 370 rtattr_failure: 371 return -1; 372 } 373 374 static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) 375 { 376 int prio; 377 struct sk_buff_head *list = qdisc_priv(qdisc); 378 379 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 380 skb_queue_head_init(list + prio); 381 382 return 0; 383 } 384 385 static struct Qdisc_ops pfifo_fast_ops = { 386 .id = "pfifo_fast", 387 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), 388 .enqueue = pfifo_fast_enqueue, 389 .dequeue = pfifo_fast_dequeue, 390 .requeue = pfifo_fast_requeue, 391 .init = pfifo_fast_init, 392 .reset = pfifo_fast_reset, 393 .dump = pfifo_fast_dump, 394 .owner = THIS_MODULE, 395 }; 396 397 struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) 398 { 399 void *p; 400 struct Qdisc *sch; 401 unsigned int size; 402 int err = -ENOBUFS; 403 404 /* ensure that the Qdisc and the private data are 32-byte aligned */ 405 size = QDISC_ALIGN(sizeof(*sch)); 406 size += ops->priv_size + (QDISC_ALIGNTO - 1); 407 408 p = kmalloc(size, GFP_KERNEL); 409 if (!p) 410 goto errout; 411 memset(p, 0, size); 412 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 413 sch->padded = (char *) sch - (char *) p; 414 415 INIT_LIST_HEAD(&sch->list); 416 skb_queue_head_init(&sch->q); 417 sch->ops = ops; 418 sch->enqueue = ops->enqueue; 419 sch->dequeue = ops->dequeue; 420 sch->dev = dev; 421 dev_hold(dev); 422 sch->stats_lock = &dev->queue_lock; 423 atomic_set(&sch->refcnt, 1); 424 425 return sch; 426 errout: 427 return ERR_PTR(-err); 428 } 429 430 struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) 431 { 432 struct Qdisc *sch; 433 434 sch = qdisc_alloc(dev, ops); 435 if (IS_ERR(sch)) 436 goto errout; 437 438 if (!ops->init || ops->init(sch, NULL) == 0) 439 return sch; 440 441 qdisc_destroy(sch); 442 errout: 443 return NULL; 444 } 445 446 /* Under dev->queue_lock and BH! */ 447 448 void qdisc_reset(struct Qdisc *qdisc) 449 { 450 struct Qdisc_ops *ops = qdisc->ops; 451 452 if (ops->reset) 453 ops->reset(qdisc); 454 } 455 456 /* this is the rcu callback function to clean up a qdisc when there 457 * are no further references to it */ 458 459 static void __qdisc_destroy(struct rcu_head *head) 460 { 461 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 462 struct Qdisc_ops *ops = qdisc->ops; 463 464 #ifdef CONFIG_NET_ESTIMATOR 465 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 466 #endif 467 write_lock(&qdisc_tree_lock); 468 if (ops->reset) 469 ops->reset(qdisc); 470 if (ops->destroy) 471 ops->destroy(qdisc); 472 write_unlock(&qdisc_tree_lock); 473 module_put(ops->owner); 474 475 dev_put(qdisc->dev); 476 kfree((char *) qdisc - qdisc->padded); 477 } 478 479 /* Under dev->queue_lock and BH! */ 480 481 void qdisc_destroy(struct Qdisc *qdisc) 482 { 483 struct list_head cql = LIST_HEAD_INIT(cql); 484 struct Qdisc *cq, *q, *n; 485 486 if (qdisc->flags & TCQ_F_BUILTIN || 487 !atomic_dec_and_test(&qdisc->refcnt)) 488 return; 489 490 if (!list_empty(&qdisc->list)) { 491 if (qdisc->ops->cl_ops == NULL) 492 list_del(&qdisc->list); 493 else 494 list_move(&qdisc->list, &cql); 495 } 496 497 /* unlink inner qdiscs from dev->qdisc_list immediately */ 498 list_for_each_entry(cq, &cql, list) 499 list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list) 500 if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) { 501 if (q->ops->cl_ops == NULL) 502 list_del_init(&q->list); 503 else 504 list_move_tail(&q->list, &cql); 505 } 506 list_for_each_entry_safe(cq, n, &cql, list) 507 list_del_init(&cq->list); 508 509 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 510 } 511 512 void dev_activate(struct net_device *dev) 513 { 514 /* No queueing discipline is attached to device; 515 create default one i.e. pfifo_fast for devices, 516 which need queueing and noqueue_qdisc for 517 virtual interfaces 518 */ 519 520 if (dev->qdisc_sleeping == &noop_qdisc) { 521 struct Qdisc *qdisc; 522 if (dev->tx_queue_len) { 523 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); 524 if (qdisc == NULL) { 525 printk(KERN_INFO "%s: activation failed\n", dev->name); 526 return; 527 } 528 write_lock_bh(&qdisc_tree_lock); 529 list_add_tail(&qdisc->list, &dev->qdisc_list); 530 write_unlock_bh(&qdisc_tree_lock); 531 } else { 532 qdisc = &noqueue_qdisc; 533 } 534 write_lock_bh(&qdisc_tree_lock); 535 dev->qdisc_sleeping = qdisc; 536 write_unlock_bh(&qdisc_tree_lock); 537 } 538 539 if (!netif_carrier_ok(dev)) 540 /* Delay activation until next carrier-on event */ 541 return; 542 543 spin_lock_bh(&dev->queue_lock); 544 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 545 if (dev->qdisc != &noqueue_qdisc) { 546 dev->trans_start = jiffies; 547 dev_watchdog_up(dev); 548 } 549 spin_unlock_bh(&dev->queue_lock); 550 } 551 552 void dev_deactivate(struct net_device *dev) 553 { 554 struct Qdisc *qdisc; 555 556 spin_lock_bh(&dev->queue_lock); 557 qdisc = dev->qdisc; 558 dev->qdisc = &noop_qdisc; 559 560 qdisc_reset(qdisc); 561 562 spin_unlock_bh(&dev->queue_lock); 563 564 dev_watchdog_down(dev); 565 566 while (test_bit(__LINK_STATE_SCHED, &dev->state)) 567 yield(); 568 569 spin_unlock_wait(&dev->xmit_lock); 570 } 571 572 void dev_init_scheduler(struct net_device *dev) 573 { 574 qdisc_lock_tree(dev); 575 dev->qdisc = &noop_qdisc; 576 dev->qdisc_sleeping = &noop_qdisc; 577 INIT_LIST_HEAD(&dev->qdisc_list); 578 qdisc_unlock_tree(dev); 579 580 dev_watchdog_init(dev); 581 } 582 583 void dev_shutdown(struct net_device *dev) 584 { 585 struct Qdisc *qdisc; 586 587 qdisc_lock_tree(dev); 588 qdisc = dev->qdisc_sleeping; 589 dev->qdisc = &noop_qdisc; 590 dev->qdisc_sleeping = &noop_qdisc; 591 qdisc_destroy(qdisc); 592 #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) 593 if ((qdisc = dev->qdisc_ingress) != NULL) { 594 dev->qdisc_ingress = NULL; 595 qdisc_destroy(qdisc); 596 } 597 #endif 598 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 599 qdisc_unlock_tree(dev); 600 } 601 602 EXPORT_SYMBOL(__netdev_watchdog_up); 603 EXPORT_SYMBOL(noop_qdisc); 604 EXPORT_SYMBOL(noop_qdisc_ops); 605 EXPORT_SYMBOL(qdisc_create_dflt); 606 EXPORT_SYMBOL(qdisc_alloc); 607 EXPORT_SYMBOL(qdisc_destroy); 608 EXPORT_SYMBOL(qdisc_reset); 609 EXPORT_SYMBOL(qdisc_restart); 610 EXPORT_SYMBOL(qdisc_lock_tree); 611 EXPORT_SYMBOL(qdisc_unlock_tree); 612